xref: /dragonfly/sys/vfs/hammer2/hammer2_flush.c (revision 59b0b316)
1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  *			TRANSACTION AND FLUSH HANDLING
37  *
38  * Deceptively simple but actually fairly difficult to implement properly is
39  * how I would describe it.
40  *
41  * Flushing generally occurs bottom-up but requires a top-down scan to
42  * locate chains with MODIFIED and/or UPDATE bits set.  The ONFLUSH flag
43  * tells how to recurse downward to find these chains.
44  */
45 
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/uuid.h>
52 
53 #include "hammer2.h"
54 
55 #define FLUSH_DEBUG 0
56 
57 #define HAMMER2_FLUSH_DEPTH_LIMIT       10      /* stack recursion limit */
58 
59 
60 /*
61  * Recursively flush the specified chain.  The chain is locked and
62  * referenced by the caller and will remain so on return.  The chain
63  * will remain referenced throughout but can temporarily lose its
64  * lock during the recursion to avoid unnecessarily stalling user
65  * processes.
66  */
67 struct hammer2_flush_info {
68 	hammer2_chain_t *parent;
69 	int		depth;
70 	int		diddeferral;
71 	int		cache_index;
72 	int		flags;
73 	struct h2_flush_list flushq;
74 	hammer2_chain_t	*debug;
75 };
76 
77 typedef struct hammer2_flush_info hammer2_flush_info_t;
78 
79 static void hammer2_flush_core(hammer2_flush_info_t *info,
80 				hammer2_chain_t *chain, int flags);
81 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
82 
83 /*
84  * Any per-pfs transaction initialization goes here.
85  */
86 void
87 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
88 {
89 }
90 
91 /*
92  * Transaction support for any modifying operation.  Transactions are used
93  * in the pmp layer by the frontend and in the spmp layer by the backend.
94  *
95  * 0			- Normal transaction, interlocked against flush
96  *			  transaction.
97  *
98  * TRANS_ISFLUSH	- Flush transaction, interlocked against normal
99  *			  transaction.
100  *
101  * TRANS_BUFCACHE	- Buffer cache transaction, no interlock.
102  *
103  * Initializing a new transaction allocates a transaction ID.  Typically
104  * passed a pmp (hmp passed as NULL), indicating a cluster transaction.  Can
105  * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
106  * media target.  The latter mode is used by the recovery code.
107  *
108  * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
109  * other is a set of any number of concurrent filesystem operations.  We
110  * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
111  * or we can have <running_flush> + <concurrent_fs_ops>.
112  *
113  * During a flush, new fs_ops are only blocked until the fs_ops prior to
114  * the flush complete.  The new fs_ops can then run concurrent with the flush.
115  *
116  * Buffer-cache transactions operate as fs_ops but never block.  A
117  * buffer-cache flush will run either before or after the current pending
118  * flush depending on its state.
119  */
120 void
121 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
122 {
123 	uint32_t oflags;
124 	uint32_t nflags;
125 	int dowait;
126 
127 	for (;;) {
128 		oflags = pmp->trans.flags;
129 		cpu_ccfence();
130 		dowait = 0;
131 
132 		if (flags & HAMMER2_TRANS_ISFLUSH) {
133 			/*
134 			 * Requesting flush transaction.  Wait for all
135 			 * currently running transactions to finish.
136 			 * Afterwords, normal transactions will be
137 			 * interlocked.
138 			 */
139 			if (oflags & HAMMER2_TRANS_MASK) {
140 				nflags = oflags | HAMMER2_TRANS_FPENDING |
141 						  HAMMER2_TRANS_WAITING;
142 				dowait = 1;
143 			} else {
144 				nflags = (oflags | flags) + 1;
145 			}
146 		} else if (flags & HAMMER2_TRANS_BUFCACHE) {
147 			/*
148 			 * Requesting strategy transaction from buffer-cache,
149 			 * or a VM getpages/putpages through the buffer cache.
150 			 * We must allow such transactions in all situations
151 			 * to avoid deadlocks.
152 			 */
153 			nflags = (oflags | flags) + 1;
154 #if 0
155 			/*
156 			 * (old) previous code interlocked against the main
157 			 *	 flush pass.
158 			 */
159 			if ((oflags & (HAMMER2_TRANS_ISFLUSH |
160 				       HAMMER2_TRANS_PREFLUSH)) ==
161 			    HAMMER2_TRANS_ISFLUSH) {
162 				nflags = oflags | HAMMER2_TRANS_WAITING;
163 				dowait = 1;
164 			} else {
165 				nflags = (oflags | flags) + 1;
166 			}
167 #endif
168 		} else {
169 			/*
170 			 * Requesting normal modifying transaction (read-only
171 			 * operations do not use transactions).  Waits for
172 			 * any flush to finish before allowing.  Multiple
173 			 * modifying transactions can run concurrently.
174 			 */
175 			if (oflags & HAMMER2_TRANS_ISFLUSH) {
176 				nflags = oflags | HAMMER2_TRANS_WAITING;
177 				dowait = 1;
178 			} else {
179 				nflags = (oflags | flags) + 1;
180 			}
181 		}
182 		if (dowait)
183 			tsleep_interlock(&pmp->trans.sync_wait, 0);
184 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
185 			if (dowait == 0)
186 				break;
187 			tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
188 			       "h2trans", hz);
189 		} else {
190 			cpu_pause();
191 		}
192 		/* retry */
193 	}
194 }
195 
196 /*
197  * Start a sub-transaction, there is no 'subdone' function.  This will
198  * issue a new modify_tid (mtid) for the current transaction, which is a
199  * CLC (cluster level change) id and not a per-node id.
200  *
201  * This function must be called for each XOP when multiple XOPs are run in
202  * sequence within a transaction.
203  *
204  * Callers typically update the inode with the transaction mtid manually
205  * to enforce sequencing.
206  */
207 hammer2_tid_t
208 hammer2_trans_sub(hammer2_pfs_t *pmp)
209 {
210 	hammer2_tid_t mtid;
211 
212 	mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
213 
214 	return (mtid);
215 }
216 
217 void
218 hammer2_trans_done(hammer2_pfs_t *pmp)
219 {
220 	uint32_t oflags;
221 	uint32_t nflags;
222 
223 	for (;;) {
224 		oflags = pmp->trans.flags;
225 		cpu_ccfence();
226 		KKASSERT(oflags & HAMMER2_TRANS_MASK);
227 		if ((oflags & HAMMER2_TRANS_MASK) == 1) {
228 			/*
229 			 * This was the last transaction
230 			 */
231 			nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH |
232 						  HAMMER2_TRANS_BUFCACHE |
233 						  HAMMER2_TRANS_FPENDING |
234 						  HAMMER2_TRANS_WAITING);
235 		} else {
236 			/*
237 			 * Still transactions pending
238 			 */
239 			nflags = oflags - 1;
240 		}
241 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
242 			if ((nflags & HAMMER2_TRANS_MASK) == 0 &&
243 			    (oflags & HAMMER2_TRANS_WAITING)) {
244 				wakeup(&pmp->trans.sync_wait);
245 			}
246 			break;
247 		} else {
248 			cpu_pause();
249 		}
250 		/* retry */
251 	}
252 }
253 
254 /*
255  * Obtain new, unique inode number (not serialized by caller).
256  */
257 hammer2_tid_t
258 hammer2_trans_newinum(hammer2_pfs_t *pmp)
259 {
260 	hammer2_tid_t tid;
261 
262 	tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
263 
264 	return tid;
265 }
266 
267 /*
268  * Assert that a strategy call is ok here.  Currently we allow strategy
269  * calls in all situations, including during flushes.  Previously:
270  *	(old) (1) In a normal transaction.
271  *	(old) (2) In a flush transaction only if PREFLUSH is also set.
272  */
273 void
274 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
275 {
276 #if 0
277 	KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
278 		 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
279 #endif
280 }
281 
282 
283 /*
284  * Chains undergoing destruction are removed from the in-memory topology.
285  * To avoid getting lost these chains are placed on the delayed flush
286  * queue which will properly dispose of them.
287  *
288  * We do this instead of issuing an immediate flush in order to give
289  * recursive deletions (rm -rf, etc) a chance to remove more of the
290  * hierarchy, potentially allowing an enormous amount of write I/O to
291  * be avoided.
292  */
293 void
294 hammer2_delayed_flush(hammer2_chain_t *chain)
295 {
296 	if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
297 		hammer2_spin_ex(&chain->hmp->list_spin);
298 		if ((chain->flags & (HAMMER2_CHAIN_DELAYED |
299 				     HAMMER2_CHAIN_DEFERRED)) == 0) {
300 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED |
301 						      HAMMER2_CHAIN_DEFERRED);
302 			TAILQ_INSERT_TAIL(&chain->hmp->flushq,
303 					  chain, flush_node);
304 			hammer2_chain_ref(chain);
305 		}
306 		hammer2_spin_unex(&chain->hmp->list_spin);
307 		hammer2_voldata_modify(chain->hmp);
308 	}
309 }
310 
311 /*
312  * Flush the chain and all modified sub-chains through the specified
313  * synchronization point, propagating blockref updates back up.  As
314  * part of this propagation, mirror_tid and inode/data usage statistics
315  * propagates back upward.
316  *
317  * modify_tid (clc - cluster level change) is not propagated.
318  *
319  * update_tid (clc) is used for validation and is not propagated by this
320  * function.
321  *
322  * This routine can be called from several places but the most important
323  * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend).
324  *
325  * chain is locked on call and will remain locked on return.  The chain's
326  * UPDATE flag indicates that its parent's block table (which is not yet
327  * part of the flush) should be updated.  The chain may be replaced by
328  * the call if it was modified.
329  */
330 void
331 hammer2_flush(hammer2_chain_t *chain, int flags)
332 {
333 	hammer2_chain_t *scan;
334 	hammer2_flush_info_t info;
335 	hammer2_dev_t *hmp;
336 	int loops;
337 
338 	/*
339 	 * Execute the recursive flush and handle deferrals.
340 	 *
341 	 * Chains can be ridiculously long (thousands deep), so to
342 	 * avoid blowing out the kernel stack the recursive flush has a
343 	 * depth limit.  Elements at the limit are placed on a list
344 	 * for re-execution after the stack has been popped.
345 	 */
346 	bzero(&info, sizeof(info));
347 	TAILQ_INIT(&info.flushq);
348 	info.cache_index = -1;
349 	info.flags = flags & ~HAMMER2_FLUSH_TOP;
350 
351 	/*
352 	 * Calculate parent (can be NULL), if not NULL the flush core
353 	 * expects the parent to be referenced so it can easily lock/unlock
354 	 * it without it getting ripped up.
355 	 */
356 	if ((info.parent = chain->parent) != NULL)
357 		hammer2_chain_ref(info.parent);
358 
359 	/*
360 	 * Extra ref needed because flush_core expects it when replacing
361 	 * chain.
362 	 */
363 	hammer2_chain_ref(chain);
364 	hmp = chain->hmp;
365 	loops = 0;
366 
367 	for (;;) {
368 		/*
369 		 * Move hmp->flushq to info.flushq if non-empty so it can
370 		 * be processed.
371 		 */
372 		if (TAILQ_FIRST(&hmp->flushq) != NULL) {
373 			hammer2_spin_ex(&chain->hmp->list_spin);
374 			TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node);
375 			hammer2_spin_unex(&chain->hmp->list_spin);
376 		}
377 
378 		/*
379 		 * Unwind deep recursions which had been deferred.  This
380 		 * can leave the FLUSH_* bits set for these chains, which
381 		 * will be handled when we [re]flush chain after the unwind.
382 		 */
383 		while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) {
384 			KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
385 			TAILQ_REMOVE(&info.flushq, scan, flush_node);
386 			atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED |
387 						       HAMMER2_CHAIN_DELAYED);
388 
389 			/*
390 			 * Now that we've popped back up we can do a secondary
391 			 * recursion on the deferred elements.
392 			 *
393 			 * NOTE: hammer2_flush() may replace scan.
394 			 */
395 			if (hammer2_debug & 0x0040)
396 				kprintf("deferred flush %p\n", scan);
397 			hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
398 			hammer2_flush(scan, flags & ~HAMMER2_FLUSH_TOP);
399 			hammer2_chain_unlock(scan);
400 			hammer2_chain_drop(scan);	/* ref from deferral */
401 		}
402 
403 		/*
404 		 * [re]flush chain.
405 		 */
406 		info.diddeferral = 0;
407 		hammer2_flush_core(&info, chain, flags);
408 
409 		/*
410 		 * Only loop if deep recursions have been deferred.
411 		 */
412 		if (TAILQ_EMPTY(&info.flushq))
413 			break;
414 
415 		if (++loops % 1000 == 0) {
416 			kprintf("hammer2_flush: excessive loops on %p\n",
417 				chain);
418 			if (hammer2_debug & 0x100000)
419 				Debugger("hell4");
420 		}
421 	}
422 	hammer2_chain_drop(chain);
423 	if (info.parent)
424 		hammer2_chain_drop(info.parent);
425 }
426 
427 /*
428  * This is the core of the chain flushing code.  The chain is locked by the
429  * caller and must also have an extra ref on it by the caller, and remains
430  * locked and will have an extra ref on return.  Upon return, the caller can
431  * test the UPDATE bit on the child to determine if the parent needs updating.
432  *
433  * (1) Determine if this node is a candidate for the flush, return if it is
434  *     not.  fchain and vchain are always candidates for the flush.
435  *
436  * (2) If we recurse too deep the chain is entered onto the deferral list and
437  *     the current flush stack is aborted until after the deferral list is
438  *     run.
439  *
440  * (3) Recursively flush live children (rbtree).  This can create deferrals.
441  *     A successful flush clears the MODIFIED and UPDATE bits on the children
442  *     and typically causes the parent to be marked MODIFIED as the children
443  *     update the parent's block table.  A parent might already be marked
444  *     MODIFIED due to a deletion (whos blocktable update in the parent is
445  *     handled by the frontend), or if the parent itself is modified by the
446  *     frontend for other reasons.
447  *
448  * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
449  *     Deleted-but-open inodes can still be individually flushed via the
450  *     filesystem syncer.
451  *
452  * (5) Delete parents on the way back up if they are normal indirect blocks
453  *     and have no children.
454  *
455  * (6) Note that an unmodified child may still need the block table in its
456  *     parent updated (e.g. rename/move).  The child will have UPDATE set
457  *     in this case.
458  *
459  *			WARNING ON BREF MODIFY_TID/MIRROR_TID
460  *
461  * blockref.modify_tid is consistent only within a PFS, and will not be
462  * consistent during synchronization.  mirror_tid is consistent across the
463  * block device regardless of the PFS.
464  */
465 static void
466 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
467 		   int flags)
468 {
469 	hammer2_chain_t *parent;
470 	hammer2_dev_t *hmp;
471 	int diddeferral;
472 
473 	/*
474 	 * (1) Optimize downward recursion to locate nodes needing action.
475 	 *     Nothing to do if none of these flags are set.
476 	 */
477 	if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
478 		if (hammer2_debug & 0x200) {
479 			if (info->debug == NULL)
480 				info->debug = chain;
481 		} else {
482 			return;
483 		}
484 	}
485 
486 	hmp = chain->hmp;
487 	diddeferral = info->diddeferral;
488 	parent = info->parent;		/* can be NULL */
489 
490 	/*
491 	 * Downward search recursion
492 	 */
493 	if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) {
494 		/*
495 		 * Already deferred.
496 		 */
497 		++info->diddeferral;
498 	} else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
499 		   (flags & HAMMER2_FLUSH_ALL) == 0 &&
500 		   (flags & HAMMER2_FLUSH_TOP) == 0) {
501 		/*
502 		 * We do not recurse through PFSROOTs.  PFSROOT flushes are
503 		 * handled by the related pmp's (whether mounted or not,
504 		 * including during recovery).
505 		 *
506 		 * But we must still process the PFSROOT chains for block
507 		 * table updates in their parent (which IS part of our flush).
508 		 *
509 		 * Note that the volume root, vchain, does not set this flag.
510 		 * Note the logic here requires that this test be done before
511 		 * the depth-limit test, else it might become the top on a
512 		 * flushq iteration.
513 		 */
514 		;
515 	} else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
516 		/*
517 		 * Recursion depth reached.
518 		 */
519 		KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0);
520 		hammer2_chain_ref(chain);
521 		TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
522 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
523 		++info->diddeferral;
524 	} else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
525 				   HAMMER2_CHAIN_DESTROY)) {
526 		/*
527 		 * Downward recursion search (actual flush occurs bottom-up).
528 		 * pre-clear ONFLUSH.  It can get set again due to races,
529 		 * which we want so the scan finds us again in the next flush.
530 		 *
531 		 * We must also recurse if DESTROY is set so we can finally
532 		 * get rid of the related children, otherwise the node will
533 		 * just get re-flushed on lastdrop.
534 		 */
535 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
536 		info->parent = chain;
537 		hammer2_spin_ex(&chain->core.spin);
538 		RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
539 			NULL, hammer2_flush_recurse, info);
540 		hammer2_spin_unex(&chain->core.spin);
541 		info->parent = parent;
542 		if (info->diddeferral)
543 			hammer2_chain_setflush(chain);
544 	}
545 
546 	/*
547 	 * Now we are in the bottom-up part of the recursion.
548 	 *
549 	 * Do not update chain if lower layers were deferred.
550 	 */
551 	if (info->diddeferral)
552 		goto done;
553 
554 	/*
555 	 * Propagate the DESTROY flag downwards.  This dummies up the flush
556 	 * code and tries to invalidate related buffer cache buffers to
557 	 * avoid the disk write.
558 	 */
559 	if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
560 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
561 
562 	/*
563 	 * Chain was already modified or has become modified, flush it out.
564 	 */
565 again:
566 	if ((hammer2_debug & 0x200) &&
567 	    info->debug &&
568 	    (chain->flags & (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_UPDATE))) {
569 		hammer2_chain_t *scan = chain;
570 
571 		kprintf("DISCONNECTED FLUSH %p->%p\n", info->debug, chain);
572 		while (scan) {
573 			kprintf("    chain %p [%08x] bref=%016jx:%02x\n",
574 				scan, scan->flags,
575 				scan->bref.key, scan->bref.type);
576 			if (scan == info->debug)
577 				break;
578 			scan = scan->parent;
579 		}
580 	}
581 
582 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
583 		/*
584 		 * Dispose of the modified bit.
585 		 *
586 		 * If parent is present, the UPDATE bit should already be set.
587 		 * UPDATE should already be set.
588 		 * bref.mirror_tid should already be set.
589 		 */
590 		KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
591 			 chain->parent == NULL);
592 		if (hammer2_debug & 0x800000) {
593 			hammer2_chain_t *pp;
594 
595 			for (pp = chain; pp->parent; pp = pp->parent)
596 				;
597 			kprintf("FLUSH CHAIN %p (p=%p pp=%p/%d) TYPE %d FLAGS %08x (%s)\n",
598 				chain, chain->parent, pp, pp->bref.type,
599 				chain->bref.type, chain->flags,
600 				(chain->bref.type == 1 ? (const char *)chain->data->ipdata.filename : "?")
601 
602 				);
603 			print_backtrace(10);
604 		}
605 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
606 		atomic_add_long(&hammer2_count_modified_chains, -1);
607 
608 		/*
609 		 * Manage threads waiting for excessive dirty memory to
610 		 * be retired.
611 		 */
612 		if (chain->pmp)
613 			hammer2_pfs_memory_wakeup(chain->pmp);
614 
615 #if 0
616 		if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
617 		    chain != &hmp->vchain &&
618 		    chain != &hmp->fchain) {
619 			/*
620 			 * Set UPDATE bit indicating that the parent block
621 			 * table requires updating.
622 			 */
623 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
624 		}
625 #endif
626 
627 		/*
628 		 * Issue the flush.  This is indirect via the DIO.
629 		 *
630 		 * NOTE: A DELETED node that reaches this point must be
631 		 *	 flushed for synchronization point consistency.
632 		 *
633 		 * NOTE: Even though MODIFIED was already set, the related DIO
634 		 *	 might not be dirty due to a system buffer cache
635 		 *	 flush and must be set dirty if we are going to make
636 		 *	 further modifications to the buffer.  Chains with
637 		 *	 embedded data don't need this.
638 		 */
639 		if (hammer2_debug & 0x1000) {
640 			kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
641 				chain, chain->bref.type,
642 				(uintmax_t)chain->bref.key,
643 				chain->bref.keybits,
644 				(uintmax_t)chain->bref.data_off);
645 		}
646 		if (hammer2_debug & 0x2000) {
647 			Debugger("Flush hell");
648 		}
649 
650 		/*
651 		 * Update chain CRCs for flush.
652 		 *
653 		 * NOTE: Volume headers are NOT flushed here as they require
654 		 *	 special processing.
655 		 */
656 		switch(chain->bref.type) {
657 		case HAMMER2_BREF_TYPE_FREEMAP:
658 			/*
659 			 * Update the volume header's freemap_tid to the
660 			 * freemap's flushing mirror_tid.
661 			 *
662 			 * (note: embedded data, do not call setdirty)
663 			 */
664 			KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
665 			KKASSERT(chain == &hmp->fchain);
666 			hmp->voldata.freemap_tid = chain->bref.mirror_tid;
667 			if (hammer2_debug & 0x8000) {
668 				/* debug only, avoid syslogd loop */
669 				kprintf("sync freemap mirror_tid %08jx\n",
670 					(intmax_t)chain->bref.mirror_tid);
671 			}
672 
673 			/*
674 			 * The freemap can be flushed independently of the
675 			 * main topology, but for the case where it is
676 			 * flushed in the same transaction, and flushed
677 			 * before vchain (a case we want to allow for
678 			 * performance reasons), make sure modifications
679 			 * made during the flush under vchain use a new
680 			 * transaction id.
681 			 *
682 			 * Otherwise the mount recovery code will get confused.
683 			 */
684 			++hmp->voldata.mirror_tid;
685 			break;
686 		case HAMMER2_BREF_TYPE_VOLUME:
687 			/*
688 			 * The free block table is flushed by
689 			 * hammer2_vfs_sync() before it flushes vchain.
690 			 * We must still hold fchain locked while copying
691 			 * voldata to volsync, however.
692 			 *
693 			 * (note: embedded data, do not call setdirty)
694 			 */
695 			hammer2_chain_lock(&hmp->fchain,
696 					   HAMMER2_RESOLVE_ALWAYS);
697 			hammer2_voldata_lock(hmp);
698 			if (hammer2_debug & 0x8000) {
699 				/* debug only, avoid syslogd loop */
700 				kprintf("sync volume  mirror_tid %08jx\n",
701 					(intmax_t)chain->bref.mirror_tid);
702 			}
703 
704 			/*
705 			 * Update the volume header's mirror_tid to the
706 			 * main topology's flushing mirror_tid.  It is
707 			 * possible that voldata.mirror_tid is already
708 			 * beyond bref.mirror_tid due to the bump we made
709 			 * above in BREF_TYPE_FREEMAP.
710 			 */
711 			if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
712 				hmp->voldata.mirror_tid =
713 					chain->bref.mirror_tid;
714 			}
715 
716 			/*
717 			 * The volume header is flushed manually by the
718 			 * syncer, not here.  All we do here is adjust the
719 			 * crc's.
720 			 */
721 			KKASSERT(chain->data != NULL);
722 			KKASSERT(chain->dio == NULL);
723 
724 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
725 				hammer2_icrc32(
726 					(char *)&hmp->voldata +
727 					 HAMMER2_VOLUME_ICRC1_OFF,
728 					HAMMER2_VOLUME_ICRC1_SIZE);
729 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
730 				hammer2_icrc32(
731 					(char *)&hmp->voldata +
732 					 HAMMER2_VOLUME_ICRC0_OFF,
733 					HAMMER2_VOLUME_ICRC0_SIZE);
734 			hmp->voldata.icrc_volheader =
735 				hammer2_icrc32(
736 					(char *)&hmp->voldata +
737 					 HAMMER2_VOLUME_ICRCVH_OFF,
738 					HAMMER2_VOLUME_ICRCVH_SIZE);
739 
740 			if (hammer2_debug & 0x8000) {
741 				/* debug only, avoid syslogd loop */
742 				kprintf("syncvolhdr %016jx %016jx\n",
743 					hmp->voldata.mirror_tid,
744 					hmp->vchain.bref.mirror_tid);
745 			}
746 			hmp->volsync = hmp->voldata;
747 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
748 			hammer2_voldata_unlock(hmp);
749 			hammer2_chain_unlock(&hmp->fchain);
750 			break;
751 		case HAMMER2_BREF_TYPE_DATA:
752 			/*
753 			 * Data elements have already been flushed via the
754 			 * logical file buffer cache.  Their hash was set in
755 			 * the bref by the vop_write code.  Do not re-dirty.
756 			 *
757 			 * Make sure any device buffer(s) have been flushed
758 			 * out here (there aren't usually any to flush) XXX.
759 			 */
760 			break;
761 		case HAMMER2_BREF_TYPE_INDIRECT:
762 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
763 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
764 			/*
765 			 * Buffer I/O will be cleaned up when the volume is
766 			 * flushed (but the kernel is free to flush it before
767 			 * then, as well).
768 			 */
769 			KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
770 			hammer2_chain_setcheck(chain, chain->data);
771 			break;
772 		case HAMMER2_BREF_TYPE_INODE:
773 			/*
774 			 * NOTE: We must call io_setdirty() to make any late
775 			 *	 changes to the inode data, the system might
776 			 *	 have already flushed the buffer.
777 			 */
778 			if (chain->data->ipdata.meta.op_flags &
779 			    HAMMER2_OPFLAG_PFSROOT) {
780 				/*
781 				 * non-NULL pmp if mounted as a PFS.  We must
782 				 * sync fields cached in the pmp? XXX
783 				 */
784 				hammer2_inode_data_t *ipdata;
785 
786 				hammer2_io_setdirty(chain->dio);
787 				ipdata = &chain->data->ipdata;
788 				if (chain->pmp) {
789 					ipdata->meta.pfs_inum =
790 						chain->pmp->inode_tid;
791 				}
792 			} else {
793 				/* can't be mounted as a PFS */
794 			}
795 
796 			KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
797 			hammer2_chain_setcheck(chain, chain->data);
798 			break;
799 		default:
800 			KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
801 			panic("hammer2_flush_core: unsupported "
802 			      "embedded bref %d",
803 			      chain->bref.type);
804 			/* NOT REACHED */
805 		}
806 
807 		/*
808 		 * If the chain was destroyed try to avoid unnecessary I/O.
809 		 * The DIO system buffer may silently disallow the
810 		 * invalidation.
811 		 */
812 		if (chain->flags & HAMMER2_CHAIN_DESTROY) {
813 			hammer2_io_t *dio;
814 
815 			if (chain->dio) {
816 				hammer2_io_setinval(chain->dio,
817 						    chain->bref.data_off,
818 						    chain->bytes);
819 			} else if ((dio = hammer2_io_getquick(hmp,
820 						  chain->bref.data_off,
821 						  chain->bytes)) != NULL) {
822 				hammer2_io_setinval(dio,
823 						    chain->bref.data_off,
824 						    chain->bytes);
825 				hammer2_io_putblk(&dio);
826 			}
827 		}
828 	}
829 
830 	/*
831 	 * If UPDATE is set the parent block table may need to be updated.
832 	 *
833 	 * NOTE: UPDATE may be set on vchain or fchain in which case
834 	 *	 parent could be NULL.  It's easiest to allow the case
835 	 *	 and test for NULL.  parent can also wind up being NULL
836 	 *	 due to a deletion so we need to handle the case anyway.
837 	 *
838 	 * If no parent exists we can just clear the UPDATE bit.  If the
839 	 * chain gets reattached later on the bit will simply get set
840 	 * again.
841 	 */
842 	if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
843 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
844 
845 	/*
846 	 * The chain may need its blockrefs updated in the parent.  This
847 	 * requires some fancy footwork.
848 	 */
849 	if (chain->flags & HAMMER2_CHAIN_UPDATE) {
850 		hammer2_blockref_t *base;
851 		int count;
852 
853 		/*
854 		 * Both parent and chain must be locked.  This requires
855 		 * temporarily unlocking the chain.  We have to deal with
856 		 * the case where the chain might be reparented or modified
857 		 * while it was unlocked.
858 		 */
859 		hammer2_chain_unlock(chain);
860 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
861 		hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
862 		if (chain->parent != parent) {
863 			kprintf("PARENT MISMATCH ch=%p p=%p/%p\n",
864 				chain, chain->parent, parent);
865 			hammer2_chain_unlock(parent);
866 			goto done;
867 		}
868 
869 		/*
870 		 * Check race condition.  If someone got in and modified
871 		 * it again while it was unlocked, we have to loop up.
872 		 */
873 		if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
874 			hammer2_chain_unlock(parent);
875 			kprintf("hammer2_flush: chain %p flush-mod race\n",
876 				chain);
877 			goto again;
878 		}
879 
880 		/*
881 		 * Clear UPDATE flag, mark parent modified, update its
882 		 * modify_tid if necessary, and adjust the parent blockmap.
883 		 */
884 		if (chain->flags & HAMMER2_CHAIN_UPDATE)
885 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
886 
887 		/*
888 		 * (optional code)
889 		 *
890 		 * Avoid actually modifying and updating the parent if it
891 		 * was flagged for destruction.  This can greatly reduce
892 		 * disk I/O in large tree removals because the
893 		 * hammer2_io_setinval() call in the upward recursion
894 		 * (see MODIFIED code above) can only handle a few cases.
895 		 */
896 		if (parent->flags & HAMMER2_CHAIN_DESTROY) {
897 			if (parent->bref.modify_tid < chain->bref.modify_tid) {
898 				parent->bref.modify_tid =
899 					chain->bref.modify_tid;
900 			}
901 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
902 							HAMMER2_CHAIN_BMAPUPD);
903 			hammer2_chain_unlock(parent);
904 			goto skipupdate;
905 		}
906 
907 		/*
908 		 * (semi-optional code)
909 		 *
910 		 * The flusher is responsible for deleting empty indirect
911 		 * blocks at this point.  If we don't do this, no major harm
912 		 * will be done but the empty indirect blocks will stay in
913 		 * the topology and make it a bit messy.
914 		 */
915 		if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT &&
916 		    chain->core.live_count == 0 &&
917 		    (chain->flags & (HAMMER2_CHAIN_INITIAL |
918 				     HAMMER2_CHAIN_COUNTEDBREFS)) == 0) {
919 			base = &chain->data->npdata[0];
920 			count = chain->bytes / sizeof(hammer2_blockref_t);
921 			hammer2_chain_countbrefs(chain, base, count);
922 		}
923 		if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT &&
924 		    chain->core.live_count == 0) {
925 #if 0
926 			kprintf("DELETE CHAIN %016jx.%02x %016jx/%d refs=%d\n",
927 				chain->bref.data_off, chain->bref.type,
928 				chain->bref.key, chain->bref.keybits,
929 				chain->refs);
930 #endif
931 			hammer2_chain_delete(parent, chain,
932 					     chain->bref.modify_tid,
933 					     HAMMER2_DELETE_PERMANENT);
934 			hammer2_chain_unlock(parent);
935 			goto skipupdate;
936 		}
937 
938 		/*
939 		 * We are updating the parent's blockmap, the parent must
940 		 * be set modified.
941 		 */
942 		hammer2_chain_modify(parent, 0, 0, 0);
943 		if (parent->bref.modify_tid < chain->bref.modify_tid)
944 			parent->bref.modify_tid = chain->bref.modify_tid;
945 
946 		/*
947 		 * Calculate blockmap pointer
948 		 */
949 		switch(parent->bref.type) {
950 		case HAMMER2_BREF_TYPE_INODE:
951 			/*
952 			 * Access the inode's block array.  However, there is
953 			 * no block array if the inode is flagged DIRECTDATA.
954 			 */
955 			if (parent->data &&
956 			    (parent->data->ipdata.meta.op_flags &
957 			     HAMMER2_OPFLAG_DIRECTDATA) == 0) {
958 				base = &parent->data->
959 					ipdata.u.blockset.blockref[0];
960 			} else {
961 				base = NULL;
962 			}
963 			count = HAMMER2_SET_COUNT;
964 			break;
965 		case HAMMER2_BREF_TYPE_INDIRECT:
966 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
967 			if (parent->data)
968 				base = &parent->data->npdata[0];
969 			else
970 				base = NULL;
971 			count = parent->bytes / sizeof(hammer2_blockref_t);
972 			break;
973 		case HAMMER2_BREF_TYPE_VOLUME:
974 			base = &chain->hmp->voldata.sroot_blockset.blockref[0];
975 			count = HAMMER2_SET_COUNT;
976 			break;
977 		case HAMMER2_BREF_TYPE_FREEMAP:
978 			base = &parent->data->npdata[0];
979 			count = HAMMER2_SET_COUNT;
980 			break;
981 		default:
982 			base = NULL;
983 			count = 0;
984 			panic("hammer2_flush_core: "
985 			      "unrecognized blockref type: %d",
986 			      parent->bref.type);
987 		}
988 
989 		/*
990 		 * Blocktable updates
991 		 *
992 		 * We synchronize pending statistics at this time.  Delta
993 		 * adjustments designated for the current and upper level
994 		 * are synchronized.
995 		 */
996 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
997 			if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
998 				hammer2_spin_ex(&parent->core.spin);
999 				hammer2_base_delete(parent, base, count,
1000 						    &info->cache_index, chain);
1001 				hammer2_spin_unex(&parent->core.spin);
1002 				/* base_delete clears both bits */
1003 			} else {
1004 				atomic_clear_int(&chain->flags,
1005 						 HAMMER2_CHAIN_BMAPUPD);
1006 			}
1007 		}
1008 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
1009 			hammer2_spin_ex(&parent->core.spin);
1010 			hammer2_base_insert(parent, base, count,
1011 					    &info->cache_index, chain);
1012 			hammer2_spin_unex(&parent->core.spin);
1013 			/* base_insert sets BMAPPED */
1014 		}
1015 		hammer2_chain_unlock(parent);
1016 	}
1017 skipupdate:
1018 	;
1019 
1020 	/*
1021 	 * Final cleanup after flush
1022 	 */
1023 done:
1024 	KKASSERT(chain->refs > 0);
1025 	if (hammer2_debug & 0x200) {
1026 		if (info->debug == chain)
1027 			info->debug = NULL;
1028 	}
1029 }
1030 
1031 /*
1032  * Flush recursion helper, called from flush_core, calls flush_core.
1033  *
1034  * Flushes the children of the caller's chain (info->parent), restricted
1035  * by sync_tid.  Set info->domodify if the child's blockref must propagate
1036  * back up to the parent.
1037  *
1038  * Ripouts can move child from rbtree to dbtree or dbq but the caller's
1039  * flush scan order prevents any chains from being lost.  A child can be
1040  * executes more than once.
1041  *
1042  * WARNING! If we do not call hammer2_flush_core() we must update
1043  *	    bref.mirror_tid ourselves to indicate that the flush has
1044  *	    processed the child.
1045  *
1046  * WARNING! parent->core spinlock is held on entry and return.
1047  */
1048 static int
1049 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
1050 {
1051 	hammer2_flush_info_t *info = data;
1052 	hammer2_chain_t *parent = info->parent;
1053 
1054 	/*
1055 	 * (child can never be fchain or vchain so a special check isn't
1056 	 *  needed).
1057 	 *
1058 	 * We must ref the child before unlocking the spinlock.
1059 	 *
1060 	 * The caller has added a ref to the parent so we can temporarily
1061 	 * unlock it in order to lock the child.
1062 	 */
1063 	hammer2_chain_ref(child);
1064 	hammer2_spin_unex(&parent->core.spin);
1065 
1066 	hammer2_chain_unlock(parent);
1067 	hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1068 
1069 	/*
1070 	 * Must propagate the DESTROY flag downwards, otherwise the
1071 	 * parent could end up never being removed because it will
1072 	 * be requeued to the flusher if it survives this run due to
1073 	 * the flag.
1074 	 */
1075 	if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
1076 		atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY);
1077 
1078 	/*
1079 	 * Recurse and collect deferral data.  We're in the media flush,
1080 	 * this can cross PFS boundaries.
1081 	 */
1082 	if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1083 		++info->depth;
1084 		hammer2_flush_core(info, child, info->flags);
1085 		--info->depth;
1086 	} else if (hammer2_debug & 0x200) {
1087 		if (info->debug == NULL)
1088 			info->debug = child;
1089 		++info->depth;
1090 		hammer2_flush_core(info, child, info->flags);
1091 		--info->depth;
1092 		if (info->debug == child)
1093 			info->debug = NULL;
1094 	}
1095 
1096 	/*
1097 	 * Relock to continue the loop
1098 	 */
1099 	hammer2_chain_unlock(child);
1100 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1101 	hammer2_chain_drop(child);
1102 	KKASSERT(info->parent == parent);
1103 	hammer2_spin_ex(&parent->core.spin);
1104 
1105 	return (0);
1106 }
1107 
1108 /*
1109  * flush helper (direct)
1110  *
1111  * Quickly flushes any dirty chains for a device.  This will update our
1112  * concept of the volume root but does NOT flush the actual volume root
1113  * and does not flush dirty device buffers.
1114  *
1115  * This function is primarily used by the bulkfree code to allow it to
1116  * create a snapshot for the pass.  It doesn't care about any pending
1117  * work (dirty vnodes, dirty inodes, dirty logical buffers) for which blocks
1118  * have not yet been allocated.
1119  */
1120 void
1121 hammer2_flush_quick(hammer2_dev_t *hmp)
1122 {
1123 	hammer2_chain_t *chain;
1124 
1125 	hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1126 
1127 	hammer2_chain_ref(&hmp->vchain);
1128 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1129 	if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1130 		chain = &hmp->vchain;
1131 		hammer2_flush(chain, HAMMER2_FLUSH_TOP |
1132 				     HAMMER2_FLUSH_ALL);
1133 		KKASSERT(chain == &hmp->vchain);
1134 	}
1135 	hammer2_chain_unlock(&hmp->vchain);
1136 	hammer2_chain_drop(&hmp->vchain);
1137 
1138 	hammer2_trans_done(hmp->spmp);  /* spmp trans */
1139 }
1140 
1141 /*
1142  * flush helper (backend threaded)
1143  *
1144  * Flushes core chains, issues disk sync, flushes volume roots.
1145  *
1146  * Primarily called from vfs_sync().
1147  */
1148 void
1149 hammer2_inode_xop_flush(hammer2_thread_t *thr, hammer2_xop_t *arg)
1150 {
1151 	hammer2_xop_flush_t *xop = &arg->xop_flush;
1152 	hammer2_chain_t *chain;
1153 	hammer2_chain_t *parent;
1154 	hammer2_dev_t *hmp;
1155 	int error = 0;
1156 	int total_error = 0;
1157 	int j;
1158 
1159 	/*
1160 	 * Flush core chains
1161 	 */
1162 	chain = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1163 				    HAMMER2_RESOLVE_ALWAYS);
1164 	if (chain) {
1165 		hmp = chain->hmp;
1166 		if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) ||
1167 		    TAILQ_FIRST(&hmp->flushq) != NULL) {
1168 			hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1169 			parent = chain->parent;
1170 			KKASSERT(chain->pmp != parent->pmp);
1171 			hammer2_chain_setflush(parent);
1172 		}
1173 		hammer2_chain_unlock(chain);
1174 		hammer2_chain_drop(chain);
1175 		chain = NULL;
1176 	} else {
1177 		hmp = NULL;
1178 	}
1179 
1180 	/*
1181 	 * Flush volume roots.  Avoid replication, we only want to
1182 	 * flush each hammer2_dev (hmp) once.
1183 	 */
1184 	for (j = thr->clindex - 1; j >= 0; --j) {
1185 		if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) {
1186 			if (chain->hmp == hmp) {
1187 				chain = NULL;	/* safety */
1188 				goto skip;
1189 			}
1190 		}
1191 	}
1192 	chain = NULL;	/* safety */
1193 
1194 	/*
1195 	 * spmp transaction.  The super-root is never directly mounted so
1196 	 * there shouldn't be any vnodes, let alone any dirty vnodes
1197 	 * associated with it, so we shouldn't have to mess around with any
1198 	 * vnode flushes here.
1199 	 */
1200 	hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1201 
1202 	/*
1203 	 * Media mounts have two 'roots', vchain for the topology
1204 	 * and fchain for the free block table.  Flush both.
1205 	 *
1206 	 * Note that the topology and free block table are handled
1207 	 * independently, so the free block table can wind up being
1208 	 * ahead of the topology.  We depend on the bulk free scan
1209 	 * code to deal with any loose ends.
1210 	 */
1211 	hammer2_chain_ref(&hmp->vchain);
1212 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1213 	hammer2_chain_ref(&hmp->fchain);
1214 	hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1215 	if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1216 		/*
1217 		 * This will also modify vchain as a side effect,
1218 		 * mark vchain as modified now.
1219 		 */
1220 		hammer2_voldata_modify(hmp);
1221 		chain = &hmp->fchain;
1222 		hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1223 		KKASSERT(chain == &hmp->fchain);
1224 	}
1225 	hammer2_chain_unlock(&hmp->fchain);
1226 	hammer2_chain_unlock(&hmp->vchain);
1227 	hammer2_chain_drop(&hmp->fchain);
1228 	/* vchain dropped down below */
1229 
1230 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1231 	if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1232 		chain = &hmp->vchain;
1233 		hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1234 		KKASSERT(chain == &hmp->vchain);
1235 	}
1236 	hammer2_chain_unlock(&hmp->vchain);
1237 	hammer2_chain_drop(&hmp->vchain);
1238 
1239 	error = 0;
1240 
1241 	/*
1242 	 * We can't safely flush the volume header until we have
1243 	 * flushed any device buffers which have built up.
1244 	 *
1245 	 * XXX this isn't being incremental
1246 	 */
1247 	vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1248 	error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1249 	vn_unlock(hmp->devvp);
1250 
1251 	/*
1252 	 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1253 	 * volume header needs synchronization via hmp->volsync.
1254 	 *
1255 	 * XXX synchronize the flag & data with only this flush XXX
1256 	 */
1257 	if (error == 0 &&
1258 	    (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1259 		struct buf *bp;
1260 
1261 		/*
1262 		 * Synchronize the disk before flushing the volume
1263 		 * header.
1264 		 */
1265 		bp = getpbuf(NULL);
1266 		bp->b_bio1.bio_offset = 0;
1267 		bp->b_bufsize = 0;
1268 		bp->b_bcount = 0;
1269 		bp->b_cmd = BUF_CMD_FLUSH;
1270 		bp->b_bio1.bio_done = biodone_sync;
1271 		bp->b_bio1.bio_flags |= BIO_SYNC;
1272 		vn_strategy(hmp->devvp, &bp->b_bio1);
1273 		biowait(&bp->b_bio1, "h2vol");
1274 		relpbuf(bp, NULL);
1275 
1276 		/*
1277 		 * Then we can safely flush the version of the
1278 		 * volume header synchronized by the flush code.
1279 		 */
1280 		j = hmp->volhdrno + 1;
1281 		if (j >= HAMMER2_NUM_VOLHDRS)
1282 			j = 0;
1283 		if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1284 		    hmp->volsync.volu_size) {
1285 			j = 0;
1286 		}
1287 		if (hammer2_debug & 0x8000) {
1288 			/* debug only, avoid syslogd loop */
1289 			kprintf("sync volhdr %d %jd\n",
1290 				j, (intmax_t)hmp->volsync.volu_size);
1291 		}
1292 		bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1293 			    HAMMER2_PBUFSIZE, 0, 0);
1294 		atomic_clear_int(&hmp->vchain.flags,
1295 				 HAMMER2_CHAIN_VOLUMESYNC);
1296 		bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1297 		bawrite(bp);
1298 		hmp->volhdrno = j;
1299 	}
1300 	if (error)
1301 		total_error = error;
1302 
1303 	hammer2_trans_done(hmp->spmp);  /* spmp trans */
1304 skip:
1305 	error = hammer2_xop_feed(&xop->head, NULL, thr->clindex, total_error);
1306 }
1307