xref: /dragonfly/sys/vfs/hammer2/hammer2_flush.c (revision 65867155)
1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  *			TRANSACTION AND FLUSH HANDLING
37  *
38  * Deceptively simple but actually fairly difficult to implement properly is
39  * how I would describe it.
40  *
41  * Flushing generally occurs bottom-up but requires a top-down scan to
42  * locate chains with MODIFIED and/or UPDATE bits set.  The ONFLUSH flag
43  * tells how to recurse downward to find these chains.
44  */
45 
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/uuid.h>
52 
53 #include "hammer2.h"
54 
55 #define FLUSH_DEBUG 0
56 
57 #define HAMMER2_FLUSH_DEPTH_LIMIT       10      /* stack recursion limit */
58 
59 
60 /*
61  * Recursively flush the specified chain.  The chain is locked and
62  * referenced by the caller and will remain so on return.  The chain
63  * will remain referenced throughout but can temporarily lose its
64  * lock during the recursion to avoid unnecessarily stalling user
65  * processes.
66  */
67 struct hammer2_flush_info {
68 	hammer2_chain_t *parent;
69 	int		depth;
70 	int		diddeferral;
71 	int		cache_index;
72 	int		flags;
73 	struct h2_flush_list flushq;
74 	hammer2_chain_t	*debug;
75 };
76 
77 typedef struct hammer2_flush_info hammer2_flush_info_t;
78 
79 static void hammer2_flush_core(hammer2_flush_info_t *info,
80 				hammer2_chain_t *chain, int flags);
81 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
82 
83 /*
84  * Any per-pfs transaction initialization goes here.
85  */
86 void
87 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
88 {
89 }
90 
91 /*
92  * Transaction support for any modifying operation.  Transactions are used
93  * in the pmp layer by the frontend and in the spmp layer by the backend.
94  *
95  * 0			- Normal transaction, interlocked against flush
96  *			  transaction.
97  *
98  * TRANS_ISFLUSH	- Flush transaction, interlocked against normal
99  *			  transaction.
100  *
101  * TRANS_BUFCACHE	- Buffer cache transaction, no interlock.
102  *
103  * Initializing a new transaction allocates a transaction ID.  Typically
104  * passed a pmp (hmp passed as NULL), indicating a cluster transaction.  Can
105  * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
106  * media target.  The latter mode is used by the recovery code.
107  *
108  * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
109  * other is a set of any number of concurrent filesystem operations.  We
110  * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
111  * or we can have <running_flush> + <concurrent_fs_ops>.
112  *
113  * During a flush, new fs_ops are only blocked until the fs_ops prior to
114  * the flush complete.  The new fs_ops can then run concurrent with the flush.
115  *
116  * Buffer-cache transactions operate as fs_ops but never block.  A
117  * buffer-cache flush will run either before or after the current pending
118  * flush depending on its state.
119  */
120 void
121 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
122 {
123 	uint32_t oflags;
124 	uint32_t nflags;
125 	int dowait;
126 
127 	for (;;) {
128 		oflags = pmp->trans.flags;
129 		cpu_ccfence();
130 		dowait = 0;
131 
132 		if (flags & HAMMER2_TRANS_ISFLUSH) {
133 			/*
134 			 * Requesting flush transaction.  Wait for all
135 			 * currently running transactions to finish.
136 			 */
137 			if (oflags & HAMMER2_TRANS_MASK) {
138 				nflags = oflags | HAMMER2_TRANS_FPENDING |
139 						  HAMMER2_TRANS_WAITING;
140 				dowait = 1;
141 			} else {
142 				nflags = (oflags | flags) + 1;
143 			}
144 		} else if (flags & HAMMER2_TRANS_BUFCACHE) {
145 			/*
146 			 * Requesting strategy transaction.  Generally
147 			 * allowed in all situations unless a flush
148 			 * is running without the preflush flag.
149 			 */
150 			if ((oflags & (HAMMER2_TRANS_ISFLUSH |
151 				       HAMMER2_TRANS_PREFLUSH)) ==
152 			    HAMMER2_TRANS_ISFLUSH) {
153 				nflags = oflags | HAMMER2_TRANS_WAITING;
154 				dowait = 1;
155 			} else {
156 				nflags = (oflags | flags) + 1;
157 			}
158 		} else {
159 			/*
160 			 * Requesting normal transaction.  Wait for any
161 			 * flush to finish before allowing.
162 			 */
163 			if (oflags & HAMMER2_TRANS_ISFLUSH) {
164 				nflags = oflags | HAMMER2_TRANS_WAITING;
165 				dowait = 1;
166 			} else {
167 				nflags = (oflags | flags) + 1;
168 			}
169 		}
170 		if (dowait)
171 			tsleep_interlock(&pmp->trans.sync_wait, 0);
172 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
173 			if (dowait == 0)
174 				break;
175 			tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
176 			       "h2trans", hz);
177 		} else {
178 			cpu_pause();
179 		}
180 		/* retry */
181 	}
182 }
183 
184 /*
185  * Start a sub-transaction, there is no 'subdone' function.  This will
186  * issue a new modify_tid (mtid) for the current transaction, which is a
187  * CLC (cluster level change) id and not a per-node id.
188  *
189  * This function must be called for each XOP when multiple XOPs are run in
190  * sequence within a transaction.
191  *
192  * Callers typically update the inode with the transaction mtid manually
193  * to enforce sequencing.
194  */
195 hammer2_tid_t
196 hammer2_trans_sub(hammer2_pfs_t *pmp)
197 {
198 	hammer2_tid_t mtid;
199 
200 	mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
201 
202 	return (mtid);
203 }
204 
205 /*
206  * Clears the PREFLUSH stage, called during a flush transaction after all
207  * logical buffer I/O has completed.
208  */
209 void
210 hammer2_trans_clear_preflush(hammer2_pfs_t *pmp)
211 {
212 	atomic_clear_int(&pmp->trans.flags, HAMMER2_TRANS_PREFLUSH);
213 }
214 
215 void
216 hammer2_trans_done(hammer2_pfs_t *pmp)
217 {
218 	uint32_t oflags;
219 	uint32_t nflags;
220 
221 	for (;;) {
222 		oflags = pmp->trans.flags;
223 		cpu_ccfence();
224 		KKASSERT(oflags & HAMMER2_TRANS_MASK);
225 		if ((oflags & HAMMER2_TRANS_MASK) == 1) {
226 			/*
227 			 * This was the last transaction
228 			 */
229 			nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH |
230 						  HAMMER2_TRANS_BUFCACHE |
231 						  HAMMER2_TRANS_PREFLUSH |
232 						  HAMMER2_TRANS_FPENDING |
233 						  HAMMER2_TRANS_WAITING);
234 		} else {
235 			/*
236 			 * Still transactions pending
237 			 */
238 			nflags = oflags - 1;
239 		}
240 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
241 			if ((nflags & HAMMER2_TRANS_MASK) == 0 &&
242 			    (oflags & HAMMER2_TRANS_WAITING)) {
243 				wakeup(&pmp->trans.sync_wait);
244 			}
245 			break;
246 		} else {
247 			cpu_pause();
248 		}
249 		/* retry */
250 	}
251 }
252 
253 /*
254  * Obtain new, unique inode number (not serialized by caller).
255  */
256 hammer2_tid_t
257 hammer2_trans_newinum(hammer2_pfs_t *pmp)
258 {
259 	hammer2_tid_t tid;
260 
261 	tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
262 
263 	return tid;
264 }
265 
266 /*
267  * Assert that a strategy call is ok here.  Strategy calls are legal
268  *
269  * (1) In a normal transaction.
270  * (2) In a flush transaction only if PREFLUSH is also set.
271  */
272 void
273 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
274 {
275 	KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
276 		 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
277 }
278 
279 
280 /*
281  * Chains undergoing destruction are removed from the in-memory topology.
282  * To avoid getting lost these chains are placed on the delayed flush
283  * queue which will properly dispose of them.
284  *
285  * We do this instead of issuing an immediate flush in order to give
286  * recursive deletions (rm -rf, etc) a chance to remove more of the
287  * hierarchy, potentially allowing an enormous amount of write I/O to
288  * be avoided.
289  */
290 void
291 hammer2_delayed_flush(hammer2_chain_t *chain)
292 {
293 	if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
294 		hammer2_spin_ex(&chain->hmp->list_spin);
295 		if ((chain->flags & (HAMMER2_CHAIN_DELAYED |
296 				     HAMMER2_CHAIN_DEFERRED)) == 0) {
297 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED |
298 						      HAMMER2_CHAIN_DEFERRED);
299 			TAILQ_INSERT_TAIL(&chain->hmp->flushq,
300 					  chain, flush_node);
301 			hammer2_chain_ref(chain);
302 		}
303 		hammer2_spin_unex(&chain->hmp->list_spin);
304 		hammer2_voldata_modify(chain->hmp);
305 	}
306 }
307 
308 /*
309  * Flush the chain and all modified sub-chains through the specified
310  * synchronization point, propagating blockref updates back up.  As
311  * part of this propagation, mirror_tid and inode/data usage statistics
312  * propagates back upward.
313  *
314  * modify_tid (clc - cluster level change) is not propagated.
315  *
316  * update_tid (clc) is used for validation and is not propagated by this
317  * function.
318  *
319  * This routine can be called from several places but the most important
320  * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend).
321  *
322  * chain is locked on call and will remain locked on return.  The chain's
323  * UPDATE flag indicates that its parent's block table (which is not yet
324  * part of the flush) should be updated.  The chain may be replaced by
325  * the call if it was modified.
326  */
327 void
328 hammer2_flush(hammer2_chain_t *chain, int flags)
329 {
330 	hammer2_chain_t *scan;
331 	hammer2_flush_info_t info;
332 	hammer2_dev_t *hmp;
333 	int loops;
334 
335 	/*
336 	 * Execute the recursive flush and handle deferrals.
337 	 *
338 	 * Chains can be ridiculously long (thousands deep), so to
339 	 * avoid blowing out the kernel stack the recursive flush has a
340 	 * depth limit.  Elements at the limit are placed on a list
341 	 * for re-execution after the stack has been popped.
342 	 */
343 	bzero(&info, sizeof(info));
344 	TAILQ_INIT(&info.flushq);
345 	info.cache_index = -1;
346 	info.flags = flags & ~HAMMER2_FLUSH_TOP;
347 
348 	/*
349 	 * Calculate parent (can be NULL), if not NULL the flush core
350 	 * expects the parent to be referenced so it can easily lock/unlock
351 	 * it without it getting ripped up.
352 	 */
353 	if ((info.parent = chain->parent) != NULL)
354 		hammer2_chain_ref(info.parent);
355 
356 	/*
357 	 * Extra ref needed because flush_core expects it when replacing
358 	 * chain.
359 	 */
360 	hammer2_chain_ref(chain);
361 	hmp = chain->hmp;
362 	loops = 0;
363 
364 	for (;;) {
365 		/*
366 		 * Move hmp->flushq to info.flushq if non-empty so it can
367 		 * be processed.
368 		 */
369 		if (TAILQ_FIRST(&hmp->flushq) != NULL) {
370 			hammer2_spin_ex(&chain->hmp->list_spin);
371 			TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node);
372 			hammer2_spin_unex(&chain->hmp->list_spin);
373 		}
374 
375 		/*
376 		 * Unwind deep recursions which had been deferred.  This
377 		 * can leave the FLUSH_* bits set for these chains, which
378 		 * will be handled when we [re]flush chain after the unwind.
379 		 */
380 		while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) {
381 			KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
382 			TAILQ_REMOVE(&info.flushq, scan, flush_node);
383 			atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED |
384 						       HAMMER2_CHAIN_DELAYED);
385 
386 			/*
387 			 * Now that we've popped back up we can do a secondary
388 			 * recursion on the deferred elements.
389 			 *
390 			 * NOTE: hammer2_flush() may replace scan.
391 			 */
392 			if (hammer2_debug & 0x0040)
393 				kprintf("deferred flush %p\n", scan);
394 			hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
395 			hammer2_flush(scan, flags & ~HAMMER2_FLUSH_TOP);
396 			hammer2_chain_unlock(scan);
397 			hammer2_chain_drop(scan);	/* ref from deferral */
398 		}
399 
400 		/*
401 		 * [re]flush chain.
402 		 */
403 		info.diddeferral = 0;
404 		hammer2_flush_core(&info, chain, flags);
405 
406 		/*
407 		 * Only loop if deep recursions have been deferred.
408 		 */
409 		if (TAILQ_EMPTY(&info.flushq))
410 			break;
411 
412 		if (++loops % 1000 == 0) {
413 			kprintf("hammer2_flush: excessive loops on %p\n",
414 				chain);
415 			if (hammer2_debug & 0x100000)
416 				Debugger("hell4");
417 		}
418 	}
419 	hammer2_chain_drop(chain);
420 	if (info.parent)
421 		hammer2_chain_drop(info.parent);
422 }
423 
424 /*
425  * This is the core of the chain flushing code.  The chain is locked by the
426  * caller and must also have an extra ref on it by the caller, and remains
427  * locked and will have an extra ref on return.  Upon return, the caller can
428  * test the UPDATE bit on the child to determine if the parent needs updating.
429  *
430  * (1) Determine if this node is a candidate for the flush, return if it is
431  *     not.  fchain and vchain are always candidates for the flush.
432  *
433  * (2) If we recurse too deep the chain is entered onto the deferral list and
434  *     the current flush stack is aborted until after the deferral list is
435  *     run.
436  *
437  * (3) Recursively flush live children (rbtree).  This can create deferrals.
438  *     A successful flush clears the MODIFIED and UPDATE bits on the children
439  *     and typically causes the parent to be marked MODIFIED as the children
440  *     update the parent's block table.  A parent might already be marked
441  *     MODIFIED due to a deletion (whos blocktable update in the parent is
442  *     handled by the frontend), or if the parent itself is modified by the
443  *     frontend for other reasons.
444  *
445  * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
446  *     Deleted-but-open inodes can still be individually flushed via the
447  *     filesystem syncer.
448  *
449  * (5) Note that an unmodified child may still need the block table in its
450  *     parent updated (e.g. rename/move).  The child will have UPDATE set
451  *     in this case.
452  *
453  *			WARNING ON BREF MODIFY_TID/MIRROR_TID
454  *
455  * blockref.modify_tid is consistent only within a PFS, and will not be
456  * consistent during synchronization.  mirror_tid is consistent across the
457  * block device regardless of the PFS.
458  */
459 static void
460 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
461 		   int flags)
462 {
463 	hammer2_chain_t *parent;
464 	hammer2_dev_t *hmp;
465 	int diddeferral;
466 
467 	/*
468 	 * (1) Optimize downward recursion to locate nodes needing action.
469 	 *     Nothing to do if none of these flags are set.
470 	 */
471 	if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
472 		if (hammer2_debug & 0x200) {
473 			if (info->debug == NULL)
474 				info->debug = chain;
475 		} else {
476 			return;
477 		}
478 	}
479 
480 	hmp = chain->hmp;
481 	diddeferral = info->diddeferral;
482 	parent = info->parent;		/* can be NULL */
483 
484 	/*
485 	 * Downward search recursion
486 	 */
487 	if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) {
488 		/*
489 		 * Already deferred.
490 		 */
491 		++info->diddeferral;
492 	} else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
493 		   (flags & HAMMER2_FLUSH_ALL) == 0 &&
494 		   (flags & HAMMER2_FLUSH_TOP) == 0) {
495 		/*
496 		 * We do not recurse through PFSROOTs.  PFSROOT flushes are
497 		 * handled by the related pmp's (whether mounted or not,
498 		 * including during recovery).
499 		 *
500 		 * But we must still process the PFSROOT chains for block
501 		 * table updates in their parent (which IS part of our flush).
502 		 *
503 		 * Note that the volume root, vchain, does not set this flag.
504 		 * Note the logic here requires that this test be done before
505 		 * the depth-limit test, else it might become the top on a
506 		 * flushq iteration.
507 		 */
508 		;
509 	} else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
510 		/*
511 		 * Recursion depth reached.
512 		 */
513 		KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0);
514 		hammer2_chain_ref(chain);
515 		TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
516 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
517 		++info->diddeferral;
518 	} else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
519 				   HAMMER2_CHAIN_DESTROY)) {
520 		/*
521 		 * Downward recursion search (actual flush occurs bottom-up).
522 		 * pre-clear ONFLUSH.  It can get set again due to races,
523 		 * which we want so the scan finds us again in the next flush.
524 		 *
525 		 * We must also recurse if DESTROY is set so we can finally
526 		 * get rid of the related children, otherwise the node will
527 		 * just get re-flushed on lastdrop.
528 		 */
529 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
530 		info->parent = chain;
531 		hammer2_spin_ex(&chain->core.spin);
532 		RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
533 			NULL, hammer2_flush_recurse, info);
534 		hammer2_spin_unex(&chain->core.spin);
535 		info->parent = parent;
536 		if (info->diddeferral)
537 			hammer2_chain_setflush(chain);
538 	}
539 
540 	/*
541 	 * Now we are in the bottom-up part of the recursion.
542 	 *
543 	 * Do not update chain if lower layers were deferred.
544 	 */
545 	if (info->diddeferral)
546 		goto done;
547 
548 	/*
549 	 * Propagate the DESTROY flag downwards.  This dummies up the flush
550 	 * code and tries to invalidate related buffer cache buffers to
551 	 * avoid the disk write.
552 	 */
553 	if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
554 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
555 
556 	/*
557 	 * Chain was already modified or has become modified, flush it out.
558 	 */
559 again:
560 	if ((hammer2_debug & 0x200) &&
561 	    info->debug &&
562 	    (chain->flags & (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_UPDATE))) {
563 		hammer2_chain_t *scan = chain;
564 
565 		kprintf("DISCONNECTED FLUSH %p->%p\n", info->debug, chain);
566 		while (scan) {
567 			kprintf("    chain %p [%08x] bref=%016jx:%02x\n",
568 				scan, scan->flags,
569 				scan->bref.key, scan->bref.type);
570 			if (scan == info->debug)
571 				break;
572 			scan = scan->parent;
573 		}
574 	}
575 
576 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
577 		/*
578 		 * Dispose of the modified bit.
579 		 *
580 		 * If parent is present, the UPDATE bit should already be set.
581 		 * UPDATE should already be set.
582 		 * bref.mirror_tid should already be set.
583 		 */
584 		KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
585 			 chain->parent == NULL);
586 		if (hammer2_debug & 0x800000) {
587 			hammer2_chain_t *pp;
588 
589 			for (pp = chain; pp->parent; pp = pp->parent)
590 				;
591 			kprintf("FLUSH CHAIN %p (p=%p pp=%p/%d) TYPE %d FLAGS %08x (%s)\n",
592 				chain, chain->parent, pp, pp->bref.type,
593 				chain->bref.type, chain->flags,
594 				(chain->bref.type == 1 ? (const char *)chain->data->ipdata.filename : "?")
595 
596 				);
597 			print_backtrace(10);
598 		}
599 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
600 		atomic_add_long(&hammer2_count_modified_chains, -1);
601 
602 		/*
603 		 * Manage threads waiting for excessive dirty memory to
604 		 * be retired.
605 		 */
606 		if (chain->pmp)
607 			hammer2_pfs_memory_wakeup(chain->pmp);
608 
609 #if 0
610 		if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
611 		    chain != &hmp->vchain &&
612 		    chain != &hmp->fchain) {
613 			/*
614 			 * Set UPDATE bit indicating that the parent block
615 			 * table requires updating.
616 			 */
617 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
618 		}
619 #endif
620 
621 		/*
622 		 * Issue the flush.  This is indirect via the DIO.
623 		 *
624 		 * NOTE: A DELETED node that reaches this point must be
625 		 *	 flushed for synchronization point consistency.
626 		 *
627 		 * NOTE: Even though MODIFIED was already set, the related DIO
628 		 *	 might not be dirty due to a system buffer cache
629 		 *	 flush and must be set dirty if we are going to make
630 		 *	 further modifications to the buffer.  Chains with
631 		 *	 embedded data don't need this.
632 		 */
633 		if (hammer2_debug & 0x1000) {
634 			kprintf("Flush %p.%d %016jx/%d data=%016jx",
635 				chain, chain->bref.type,
636 				(uintmax_t)chain->bref.key,
637 				chain->bref.keybits,
638 				(uintmax_t)chain->bref.data_off);
639 		}
640 		if (hammer2_debug & 0x2000) {
641 			Debugger("Flush hell");
642 		}
643 
644 		/*
645 		 * Update chain CRCs for flush.
646 		 *
647 		 * NOTE: Volume headers are NOT flushed here as they require
648 		 *	 special processing.
649 		 */
650 		switch(chain->bref.type) {
651 		case HAMMER2_BREF_TYPE_FREEMAP:
652 			/*
653 			 * Update the volume header's freemap_tid to the
654 			 * freemap's flushing mirror_tid.
655 			 *
656 			 * (note: embedded data, do not call setdirty)
657 			 */
658 			KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
659 			KKASSERT(chain == &hmp->fchain);
660 			hmp->voldata.freemap_tid = chain->bref.mirror_tid;
661 			if (hammer2_debug & 0x8000) {
662 				/* debug only, avoid syslogd loop */
663 				kprintf("sync freemap mirror_tid %08jx\n",
664 					(intmax_t)chain->bref.mirror_tid);
665 			}
666 
667 			/*
668 			 * The freemap can be flushed independently of the
669 			 * main topology, but for the case where it is
670 			 * flushed in the same transaction, and flushed
671 			 * before vchain (a case we want to allow for
672 			 * performance reasons), make sure modifications
673 			 * made during the flush under vchain use a new
674 			 * transaction id.
675 			 *
676 			 * Otherwise the mount recovery code will get confused.
677 			 */
678 			++hmp->voldata.mirror_tid;
679 			break;
680 		case HAMMER2_BREF_TYPE_VOLUME:
681 			/*
682 			 * The free block table is flushed by
683 			 * hammer2_vfs_sync() before it flushes vchain.
684 			 * We must still hold fchain locked while copying
685 			 * voldata to volsync, however.
686 			 *
687 			 * (note: embedded data, do not call setdirty)
688 			 */
689 			hammer2_chain_lock(&hmp->fchain,
690 					   HAMMER2_RESOLVE_ALWAYS);
691 			hammer2_voldata_lock(hmp);
692 			if (hammer2_debug & 0x8000) {
693 				/* debug only, avoid syslogd loop */
694 				kprintf("sync volume  mirror_tid %08jx\n",
695 					(intmax_t)chain->bref.mirror_tid);
696 			}
697 
698 			/*
699 			 * Update the volume header's mirror_tid to the
700 			 * main topology's flushing mirror_tid.  It is
701 			 * possible that voldata.mirror_tid is already
702 			 * beyond bref.mirror_tid due to the bump we made
703 			 * above in BREF_TYPE_FREEMAP.
704 			 */
705 			if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
706 				hmp->voldata.mirror_tid =
707 					chain->bref.mirror_tid;
708 			}
709 
710 			/*
711 			 * The volume header is flushed manually by the
712 			 * syncer, not here.  All we do here is adjust the
713 			 * crc's.
714 			 */
715 			KKASSERT(chain->data != NULL);
716 			KKASSERT(chain->dio == NULL);
717 
718 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
719 				hammer2_icrc32(
720 					(char *)&hmp->voldata +
721 					 HAMMER2_VOLUME_ICRC1_OFF,
722 					HAMMER2_VOLUME_ICRC1_SIZE);
723 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
724 				hammer2_icrc32(
725 					(char *)&hmp->voldata +
726 					 HAMMER2_VOLUME_ICRC0_OFF,
727 					HAMMER2_VOLUME_ICRC0_SIZE);
728 			hmp->voldata.icrc_volheader =
729 				hammer2_icrc32(
730 					(char *)&hmp->voldata +
731 					 HAMMER2_VOLUME_ICRCVH_OFF,
732 					HAMMER2_VOLUME_ICRCVH_SIZE);
733 
734 			if (hammer2_debug & 0x8000) {
735 				/* debug only, avoid syslogd loop */
736 				kprintf("syncvolhdr %016jx %016jx\n",
737 					hmp->voldata.mirror_tid,
738 					hmp->vchain.bref.mirror_tid);
739 			}
740 			hmp->volsync = hmp->voldata;
741 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
742 			hammer2_voldata_unlock(hmp);
743 			hammer2_chain_unlock(&hmp->fchain);
744 			break;
745 		case HAMMER2_BREF_TYPE_DATA:
746 			/*
747 			 * Data elements have already been flushed via the
748 			 * logical file buffer cache.  Their hash was set in
749 			 * the bref by the vop_write code.  Do not re-dirty.
750 			 *
751 			 * Make sure any device buffer(s) have been flushed
752 			 * out here (there aren't usually any to flush) XXX.
753 			 */
754 			break;
755 		case HAMMER2_BREF_TYPE_INDIRECT:
756 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
757 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
758 			/*
759 			 * Buffer I/O will be cleaned up when the volume is
760 			 * flushed (but the kernel is free to flush it before
761 			 * then, as well).
762 			 */
763 			KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
764 			hammer2_chain_setcheck(chain, chain->data);
765 			break;
766 		case HAMMER2_BREF_TYPE_INODE:
767 			/*
768 			 * NOTE: We must call io_setdirty() to make any late
769 			 *	 changes to the inode data, the system might
770 			 *	 have already flushed the buffer.
771 			 */
772 			if (chain->data->ipdata.meta.op_flags &
773 			    HAMMER2_OPFLAG_PFSROOT) {
774 				/*
775 				 * non-NULL pmp if mounted as a PFS.  We must
776 				 * sync fields cached in the pmp? XXX
777 				 */
778 				hammer2_inode_data_t *ipdata;
779 
780 				hammer2_io_setdirty(chain->dio);
781 				ipdata = &chain->data->ipdata;
782 				if (chain->pmp) {
783 					ipdata->meta.pfs_inum =
784 						chain->pmp->inode_tid;
785 				}
786 			} else {
787 				/* can't be mounted as a PFS */
788 			}
789 
790 			KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
791 			hammer2_chain_setcheck(chain, chain->data);
792 			break;
793 		default:
794 			KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
795 			panic("hammer2_flush_core: unsupported "
796 			      "embedded bref %d",
797 			      chain->bref.type);
798 			/* NOT REACHED */
799 		}
800 
801 		/*
802 		 * If the chain was destroyed try to avoid unnecessary I/O.
803 		 * (this only really works if the DIO system buffer is the
804 		 * same size as chain->bytes).
805 		 */
806 		if ((chain->flags & HAMMER2_CHAIN_DESTROY) &&
807 		    (chain->flags & HAMMER2_CHAIN_DEDUP) == 0 &&
808 		    chain->dio) {
809 			hammer2_io_setinval(chain->dio,
810 					    chain->bref.data_off,
811 					    chain->bytes);
812 		}
813 	}
814 
815 	/*
816 	 * If UPDATE is set the parent block table may need to be updated.
817 	 *
818 	 * NOTE: UPDATE may be set on vchain or fchain in which case
819 	 *	 parent could be NULL.  It's easiest to allow the case
820 	 *	 and test for NULL.  parent can also wind up being NULL
821 	 *	 due to a deletion so we need to handle the case anyway.
822 	 *
823 	 * If no parent exists we can just clear the UPDATE bit.  If the
824 	 * chain gets reattached later on the bit will simply get set
825 	 * again.
826 	 */
827 	if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
828 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
829 
830 	/*
831 	 * The chain may need its blockrefs updated in the parent.  This
832 	 * requires some fancy footwork.
833 	 */
834 	if (chain->flags & HAMMER2_CHAIN_UPDATE) {
835 		hammer2_blockref_t *base;
836 		int count;
837 
838 		/*
839 		 * Both parent and chain must be locked.  This requires
840 		 * temporarily unlocking the chain.  We have to deal with
841 		 * the case where the chain might be reparented or modified
842 		 * while it was unlocked.
843 		 */
844 		hammer2_chain_unlock(chain);
845 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
846 		hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
847 		if (chain->parent != parent) {
848 			kprintf("PARENT MISMATCH ch=%p p=%p/%p\n",
849 				chain, chain->parent, parent);
850 			hammer2_chain_unlock(parent);
851 			goto done;
852 		}
853 
854 		/*
855 		 * Check race condition.  If someone got in and modified
856 		 * it again while it was unlocked, we have to loop up.
857 		 */
858 		if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
859 			hammer2_chain_unlock(parent);
860 			kprintf("hammer2_flush: chain %p flush-mod race\n",
861 				chain);
862 			goto again;
863 		}
864 
865 		/*
866 		 * Clear UPDATE flag, mark parent modified, update its
867 		 * modify_tid if necessary, and adjust the parent blockmap.
868 		 */
869 		if (chain->flags & HAMMER2_CHAIN_UPDATE)
870 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
871 
872 		/*
873 		 * (optional code)
874 		 *
875 		 * Avoid actually modifying and updating the parent if it
876 		 * was flagged for destruction.  This can greatly reduce
877 		 * disk I/O in large tree removals because the
878 		 * hammer2_io_setinval() call in the upward recursion
879 		 * (see MODIFIED code above) can only handle a few cases.
880 		 */
881 		if (parent->flags & HAMMER2_CHAIN_DESTROY) {
882 			if (parent->bref.modify_tid < chain->bref.modify_tid) {
883 				parent->bref.modify_tid =
884 					chain->bref.modify_tid;
885 			}
886 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
887 							HAMMER2_CHAIN_BMAPUPD);
888 			hammer2_chain_unlock(parent);
889 			goto skipupdate;
890 		}
891 
892 		/*
893 		 * We are updating the parent's blockmap, the parent must
894 		 * be set modified.
895 		 */
896 		hammer2_chain_modify(parent, 0, 0, 0);
897 		if (parent->bref.modify_tid < chain->bref.modify_tid)
898 			parent->bref.modify_tid = chain->bref.modify_tid;
899 
900 		/*
901 		 * Calculate blockmap pointer
902 		 */
903 		switch(parent->bref.type) {
904 		case HAMMER2_BREF_TYPE_INODE:
905 			/*
906 			 * Access the inode's block array.  However, there is
907 			 * no block array if the inode is flagged DIRECTDATA.
908 			 */
909 			if (parent->data &&
910 			    (parent->data->ipdata.meta.op_flags &
911 			     HAMMER2_OPFLAG_DIRECTDATA) == 0) {
912 				base = &parent->data->
913 					ipdata.u.blockset.blockref[0];
914 			} else {
915 				base = NULL;
916 			}
917 			count = HAMMER2_SET_COUNT;
918 			break;
919 		case HAMMER2_BREF_TYPE_INDIRECT:
920 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
921 			if (parent->data)
922 				base = &parent->data->npdata[0];
923 			else
924 				base = NULL;
925 			count = parent->bytes / sizeof(hammer2_blockref_t);
926 			break;
927 		case HAMMER2_BREF_TYPE_VOLUME:
928 			base = &chain->hmp->voldata.sroot_blockset.blockref[0];
929 			count = HAMMER2_SET_COUNT;
930 			break;
931 		case HAMMER2_BREF_TYPE_FREEMAP:
932 			base = &parent->data->npdata[0];
933 			count = HAMMER2_SET_COUNT;
934 			break;
935 		default:
936 			base = NULL;
937 			count = 0;
938 			panic("hammer2_flush_core: "
939 			      "unrecognized blockref type: %d",
940 			      parent->bref.type);
941 		}
942 
943 		/*
944 		 * Blocktable updates
945 		 *
946 		 * We synchronize pending statistics at this time.  Delta
947 		 * adjustments designated for the current and upper level
948 		 * are synchronized.
949 		 */
950 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
951 			if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
952 				hammer2_spin_ex(&parent->core.spin);
953 				hammer2_base_delete(parent, base, count,
954 						    &info->cache_index, chain);
955 				hammer2_spin_unex(&parent->core.spin);
956 				/* base_delete clears both bits */
957 			} else {
958 				atomic_clear_int(&chain->flags,
959 						 HAMMER2_CHAIN_BMAPUPD);
960 			}
961 		}
962 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
963 			hammer2_spin_ex(&parent->core.spin);
964 			hammer2_base_insert(parent, base, count,
965 					    &info->cache_index, chain);
966 			hammer2_spin_unex(&parent->core.spin);
967 			/* base_insert sets BMAPPED */
968 		}
969 		hammer2_chain_unlock(parent);
970 	}
971 skipupdate:
972 	;
973 
974 	/*
975 	 * Final cleanup after flush
976 	 */
977 done:
978 	KKASSERT(chain->refs > 0);
979 	if (hammer2_debug & 0x200) {
980 		if (info->debug == chain)
981 			info->debug = NULL;
982 	}
983 }
984 
985 /*
986  * Flush recursion helper, called from flush_core, calls flush_core.
987  *
988  * Flushes the children of the caller's chain (info->parent), restricted
989  * by sync_tid.  Set info->domodify if the child's blockref must propagate
990  * back up to the parent.
991  *
992  * Ripouts can move child from rbtree to dbtree or dbq but the caller's
993  * flush scan order prevents any chains from being lost.  A child can be
994  * executes more than once.
995  *
996  * WARNING! If we do not call hammer2_flush_core() we must update
997  *	    bref.mirror_tid ourselves to indicate that the flush has
998  *	    processed the child.
999  *
1000  * WARNING! parent->core spinlock is held on entry and return.
1001  */
1002 static int
1003 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
1004 {
1005 	hammer2_flush_info_t *info = data;
1006 	hammer2_chain_t *parent = info->parent;
1007 
1008 	/*
1009 	 * (child can never be fchain or vchain so a special check isn't
1010 	 *  needed).
1011 	 *
1012 	 * We must ref the child before unlocking the spinlock.
1013 	 *
1014 	 * The caller has added a ref to the parent so we can temporarily
1015 	 * unlock it in order to lock the child.
1016 	 */
1017 	hammer2_chain_ref(child);
1018 	hammer2_spin_unex(&parent->core.spin);
1019 
1020 	hammer2_chain_unlock(parent);
1021 	hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1022 
1023 	/*
1024 	 * Must propagate the DESTROY flag downwards, otherwise the
1025 	 * parent could end up never being removed because it will
1026 	 * be requeued to the flusher if it survives this run due to
1027 	 * the flag.
1028 	 */
1029 	if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
1030 		atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY);
1031 
1032 	/*
1033 	 * Recurse and collect deferral data.  We're in the media flush,
1034 	 * this can cross PFS boundaries.
1035 	 */
1036 	if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1037 		++info->depth;
1038 		hammer2_flush_core(info, child, info->flags);
1039 		--info->depth;
1040 	} else if (hammer2_debug & 0x200) {
1041 		if (info->debug == NULL)
1042 			info->debug = child;
1043 		++info->depth;
1044 		hammer2_flush_core(info, child, info->flags);
1045 		--info->depth;
1046 		if (info->debug == child)
1047 			info->debug = NULL;
1048 	}
1049 
1050 	/*
1051 	 * Relock to continue the loop
1052 	 */
1053 	hammer2_chain_unlock(child);
1054 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1055 	hammer2_chain_drop(child);
1056 	KKASSERT(info->parent == parent);
1057 	hammer2_spin_ex(&parent->core.spin);
1058 
1059 	return (0);
1060 }
1061 
1062 /*
1063  * flush helper (direct)
1064  *
1065  * Quickly flushes any dirty chains for a device.  This will update our
1066  * concept of the volume root but does NOT flush the actual volume root
1067  * and does not flush dirty device buffers.
1068  *
1069  * This function is primarily used by the bulkfree code to allow it to
1070  * create a snapshot for the pass.  It doesn't care about any pending
1071  * work (dirty vnodes, dirty inodes, dirty logical buffers) for which blocks
1072  * have not yet been allocated.
1073  */
1074 void
1075 hammer2_flush_quick(hammer2_dev_t *hmp)
1076 {
1077 	hammer2_chain_t *chain;
1078 
1079 	hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1080 
1081 	hammer2_chain_ref(&hmp->vchain);
1082 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1083 	if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1084 		chain = &hmp->vchain;
1085 		hammer2_flush(chain, HAMMER2_FLUSH_TOP |
1086 				     HAMMER2_FLUSH_ALL);
1087 		KKASSERT(chain == &hmp->vchain);
1088 	}
1089 	hammer2_chain_unlock(&hmp->vchain);
1090 	hammer2_chain_drop(&hmp->vchain);
1091 
1092 	hammer2_trans_done(hmp->spmp);  /* spmp trans */
1093 }
1094 
1095 /*
1096  * flush helper (backend threaded)
1097  *
1098  * Flushes core chains, issues disk sync, flushes volume roots.
1099  *
1100  * Primarily called from vfs_sync().
1101  */
1102 void
1103 hammer2_inode_xop_flush(hammer2_xop_t *arg, int clindex)
1104 {
1105 	hammer2_xop_flush_t *xop = &arg->xop_flush;
1106 	hammer2_chain_t *chain;
1107 	hammer2_chain_t *parent;
1108 	hammer2_dev_t *hmp;
1109 	int error = 0;
1110 	int total_error = 0;
1111 	int j;
1112 
1113 	/*
1114 	 * Flush core chains
1115 	 */
1116 	chain = hammer2_inode_chain(xop->head.ip1, clindex,
1117 				    HAMMER2_RESOLVE_ALWAYS);
1118 	if (chain) {
1119 		hmp = chain->hmp;
1120 		if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) ||
1121 		    TAILQ_FIRST(&hmp->flushq) != NULL) {
1122 			hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1123 			parent = chain->parent;
1124 			KKASSERT(chain->pmp != parent->pmp);
1125 			hammer2_chain_setflush(parent);
1126 		}
1127 		hammer2_chain_unlock(chain);
1128 		hammer2_chain_drop(chain);
1129 		chain = NULL;
1130 	} else {
1131 		hmp = NULL;
1132 	}
1133 
1134 	/*
1135 	 * Flush volume roots.  Avoid replication, we only want to
1136 	 * flush each hammer2_dev (hmp) once.
1137 	 */
1138 	for (j = clindex - 1; j >= 0; --j) {
1139 		if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) {
1140 			if (chain->hmp == hmp) {
1141 				chain = NULL;	/* safety */
1142 				goto skip;
1143 			}
1144 		}
1145 	}
1146 	chain = NULL;	/* safety */
1147 
1148 	/*
1149 	 * spmp transaction.  The super-root is never directly mounted so
1150 	 * there shouldn't be any vnodes, let alone any dirty vnodes
1151 	 * associated with it, so we shouldn't have to mess around with any
1152 	 * vnode flushes here.
1153 	 */
1154 	hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1155 
1156 	/*
1157 	 * Media mounts have two 'roots', vchain for the topology
1158 	 * and fchain for the free block table.  Flush both.
1159 	 *
1160 	 * Note that the topology and free block table are handled
1161 	 * independently, so the free block table can wind up being
1162 	 * ahead of the topology.  We depend on the bulk free scan
1163 	 * code to deal with any loose ends.
1164 	 */
1165 	hammer2_chain_ref(&hmp->vchain);
1166 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1167 	hammer2_chain_ref(&hmp->fchain);
1168 	hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1169 	if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1170 		/*
1171 		 * This will also modify vchain as a side effect,
1172 		 * mark vchain as modified now.
1173 		 */
1174 		hammer2_voldata_modify(hmp);
1175 		chain = &hmp->fchain;
1176 		hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1177 		KKASSERT(chain == &hmp->fchain);
1178 	}
1179 	hammer2_chain_unlock(&hmp->fchain);
1180 	hammer2_chain_unlock(&hmp->vchain);
1181 	hammer2_chain_drop(&hmp->fchain);
1182 	/* vchain dropped down below */
1183 
1184 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1185 	if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1186 		chain = &hmp->vchain;
1187 		hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1188 		KKASSERT(chain == &hmp->vchain);
1189 	}
1190 	hammer2_chain_unlock(&hmp->vchain);
1191 	hammer2_chain_drop(&hmp->vchain);
1192 
1193 	error = 0;
1194 
1195 	/*
1196 	 * We can't safely flush the volume header until we have
1197 	 * flushed any device buffers which have built up.
1198 	 *
1199 	 * XXX this isn't being incremental
1200 	 */
1201 	vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1202 	error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1203 	vn_unlock(hmp->devvp);
1204 
1205 	/*
1206 	 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1207 	 * volume header needs synchronization via hmp->volsync.
1208 	 *
1209 	 * XXX synchronize the flag & data with only this flush XXX
1210 	 */
1211 	if (error == 0 &&
1212 	    (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1213 		struct buf *bp;
1214 
1215 		/*
1216 		 * Synchronize the disk before flushing the volume
1217 		 * header.
1218 		 */
1219 		bp = getpbuf(NULL);
1220 		bp->b_bio1.bio_offset = 0;
1221 		bp->b_bufsize = 0;
1222 		bp->b_bcount = 0;
1223 		bp->b_cmd = BUF_CMD_FLUSH;
1224 		bp->b_bio1.bio_done = biodone_sync;
1225 		bp->b_bio1.bio_flags |= BIO_SYNC;
1226 		vn_strategy(hmp->devvp, &bp->b_bio1);
1227 		biowait(&bp->b_bio1, "h2vol");
1228 		relpbuf(bp, NULL);
1229 
1230 		/*
1231 		 * Then we can safely flush the version of the
1232 		 * volume header synchronized by the flush code.
1233 		 */
1234 		j = hmp->volhdrno + 1;
1235 		if (j >= HAMMER2_NUM_VOLHDRS)
1236 			j = 0;
1237 		if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1238 		    hmp->volsync.volu_size) {
1239 			j = 0;
1240 		}
1241 		if (hammer2_debug & 0x8000) {
1242 			/* debug only, avoid syslogd loop */
1243 			kprintf("sync volhdr %d %jd\n",
1244 				j, (intmax_t)hmp->volsync.volu_size);
1245 		}
1246 		bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1247 			    HAMMER2_PBUFSIZE, 0, 0);
1248 		atomic_clear_int(&hmp->vchain.flags,
1249 				 HAMMER2_CHAIN_VOLUMESYNC);
1250 		bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1251 		bawrite(bp);
1252 		hmp->volhdrno = j;
1253 	}
1254 	if (error)
1255 		total_error = error;
1256 
1257 	hammer2_trans_done(hmp->spmp);  /* spmp trans */
1258 skip:
1259 	error = hammer2_xop_feed(&xop->head, NULL, clindex, total_error);
1260 }
1261