xref: /dragonfly/sys/vfs/hammer2/hammer2_flush.c (revision 3ea159d2)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  *			TRANSACTION AND FLUSH HANDLING
37  *
38  * Deceptively simple but actually fairly difficult to implement properly is
39  * how I would describe it.
40  *
41  * Flushing generally occurs bottom-up but requires a top-down scan to
42  * locate chains with MODIFIED and/or UPDATE bits set.  The ONFLUSH flag
43  * tells how to recurse downward to find these chains.
44  */
45 
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/uuid.h>
52 
53 #include "hammer2.h"
54 
55 #define FLUSH_DEBUG 0
56 
57 #define HAMMER2_FLUSH_DEPTH_LIMIT	60      /* stack recursion limit */
58 
59 
60 /*
61  * Recursively flush the specified chain.  The chain is locked and
62  * referenced by the caller and will remain so on return.  The chain
63  * will remain referenced throughout but can temporarily lose its
64  * lock during the recursion to avoid unnecessarily stalling user
65  * processes.
66  */
67 struct hammer2_flush_info {
68 	hammer2_chain_t *parent;
69 	int		depth;
70 	int		error;			/* cumulative error */
71 	int		flags;
72 #ifdef HAMMER2_SCAN_DEBUG
73 	long		scan_count;
74 	long		scan_mod_count;
75 	long		scan_upd_count;
76 	long		scan_onf_count;
77 	long		scan_del_count;
78 	long		scan_btype[7];
79 #endif
80 	hammer2_chain_t	*debug;
81 };
82 
83 typedef struct hammer2_flush_info hammer2_flush_info_t;
84 
85 static int hammer2_flush_core(hammer2_flush_info_t *info,
86 				hammer2_chain_t *chain, int flags);
87 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
88 
89 /*
90  * Any per-pfs transaction initialization goes here.
91  */
92 void
93 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
94 {
95 }
96 
97 /*
98  * Transaction support for any modifying operation.  Transactions are used
99  * in the pmp layer by the frontend and in the spmp layer by the backend.
100  *
101  * 0			- Normal transaction.  Interlocks against just the
102  *			  COPYQ portion of an ISFLUSH transaction.
103  *
104  * TRANS_ISFLUSH	- Flush transaction.  Interlocks against other flush
105  *			  transactions.
106  *
107  *			  When COPYQ is also specified, waits for the count
108  *			  to drop to 1.
109  *
110  * TRANS_BUFCACHE	- Buffer cache transaction.  No interlock.
111  *
112  * TRANS_SIDEQ		- Run the sideq (only tested in trans_done())
113  *
114  * Initializing a new transaction allocates a transaction ID.  Typically
115  * passed a pmp (hmp passed as NULL), indicating a cluster transaction.  Can
116  * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
117  * media target.  The latter mode is used by the recovery code.
118  */
119 void
120 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
121 {
122 	uint32_t oflags;
123 	uint32_t nflags;
124 	int dowait;
125 
126 	for (;;) {
127 		oflags = pmp->trans.flags;
128 		cpu_ccfence();
129 		dowait = 0;
130 
131 		if (flags & HAMMER2_TRANS_ISFLUSH) {
132 			/*
133 			 * Interlock against other flush transactions.
134 			 */
135 			if (oflags & HAMMER2_TRANS_ISFLUSH) {
136 				nflags = oflags | HAMMER2_TRANS_WAITING;
137 				dowait = 1;
138 			} else {
139 				nflags = (oflags | flags) + 1;
140 			}
141 		} else if (flags & HAMMER2_TRANS_BUFCACHE) {
142 			/*
143 			 * Requesting strategy transaction from buffer-cache,
144 			 * or a VM getpages/putpages through the buffer cache.
145 			 * We must allow such transactions in all situations
146 			 * to avoid deadlocks.
147 			 */
148 			nflags = (oflags | flags) + 1;
149 		} else {
150 			/*
151 			 * Normal transaction.  We do not interlock against
152 			 * BUFCACHE or ISFLUSH.
153 			 *
154 			 * Note that vnode locks may be held going into
155 			 * this call.
156 			 *
157 			 * NOTE: Remember that non-modifying operations
158 			 *	 such as read, stat, readdir, etc, do
159 			 *	 not use transactions.
160 			 */
161 			nflags = (oflags | flags) + 1;
162 		}
163 		if (dowait)
164 			tsleep_interlock(&pmp->trans.sync_wait, 0);
165 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
166 			if (dowait == 0)
167 				break;
168 			tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
169 			       "h2trans", hz);
170 			/* retry */
171 		} else {
172 			cpu_pause();
173 			/* retry */
174 		}
175 		/* retry */
176 	}
177 
178 #if 0
179 	/*
180 	 * When entering a FLUSH transaction with COPYQ set, wait for the
181 	 * transaction count to drop to 1 (our flush transaction only)
182 	 * before proceeding.
183 	 *
184 	 * This waits for all non-flush transactions to complete and blocks
185 	 * new non-flush transactions from starting until COPYQ is cleared.
186 	 * (the flush will then proceed after clearing COPYQ).  This should
187 	 * be a very short stall on modifying operations.
188 	 */
189 	while ((flags & HAMMER2_TRANS_ISFLUSH) &&
190 	       (flags & HAMMER2_TRANS_COPYQ)) {
191 		oflags = pmp->trans.flags;
192 		cpu_ccfence();
193 		if ((oflags & HAMMER2_TRANS_MASK) == 1)
194 			break;
195 		nflags = oflags | HAMMER2_TRANS_WAITING;
196 		tsleep_interlock(&pmp->trans.sync_wait, 0);
197 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
198 			tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
199 			       "h2trans2", hz);
200 		}
201 	}
202 #endif
203 }
204 
205 /*
206  * Start a sub-transaction, there is no 'subdone' function.  This will
207  * issue a new modify_tid (mtid) for the current transaction, which is a
208  * CLC (cluster level change) id and not a per-node id.
209  *
210  * This function must be called for each XOP when multiple XOPs are run in
211  * sequence within a transaction.
212  *
213  * Callers typically update the inode with the transaction mtid manually
214  * to enforce sequencing.
215  */
216 hammer2_tid_t
217 hammer2_trans_sub(hammer2_pfs_t *pmp)
218 {
219 	hammer2_tid_t mtid;
220 
221 	mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
222 
223 	return (mtid);
224 }
225 
226 void
227 hammer2_trans_setflags(hammer2_pfs_t *pmp, uint32_t flags)
228 {
229 	atomic_set_int(&pmp->trans.flags, flags);
230 }
231 
232 /*
233  * Typically used to clear trans flags asynchronously.  If TRANS_WAITING
234  * is in the mask, and was previously set, this function will wake up
235  * any waiters.
236  */
237 void
238 hammer2_trans_clearflags(hammer2_pfs_t *pmp, uint32_t flags)
239 {
240 	uint32_t oflags;
241 	uint32_t nflags;
242 
243 	for (;;) {
244 		oflags = pmp->trans.flags;
245 		cpu_ccfence();
246 		nflags = oflags & ~flags;
247 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
248 			if ((oflags ^ nflags) & HAMMER2_TRANS_WAITING)
249 				wakeup(&pmp->trans.sync_wait);
250 			break;
251 		}
252 		cpu_pause();
253 		/* retry */
254 	}
255 }
256 
257 void
258 hammer2_trans_done(hammer2_pfs_t *pmp, uint32_t flags)
259 {
260 	uint32_t oflags;
261 	uint32_t nflags;
262 
263 #if 0
264 	/*
265 	 * Modifying ops on the front-end can cause dirty inodes to
266 	 * build up in the sideq.  We don't flush these on inactive/reclaim
267 	 * due to potential deadlocks, so we have to deal with them from
268 	 * inside other nominal modifying front-end transactions.
269 	 */
270 	if ((flags & HAMMER2_TRANS_SIDEQ) &&
271 	    pmp->sideq_count > hammer2_limit_dirty_inodes / 2 &&
272 	    pmp->sideq_count > (pmp->inum_count >> 3) &&
273 	    pmp->mp) {
274 		speedup_syncer(pmp->mp);
275 	}
276 #endif
277 
278 	/*
279 	 * Clean-up the transaction.  Wakeup any waiters when finishing
280 	 * a flush transaction or transitioning the non-flush transaction
281 	 * count from 2->1 while a flush transaction is pending.
282 	 */
283 	for (;;) {
284 		oflags = pmp->trans.flags;
285 		cpu_ccfence();
286 		KKASSERT(oflags & HAMMER2_TRANS_MASK);
287 
288 		nflags = (oflags - 1) & ~flags;
289 		if (flags & HAMMER2_TRANS_ISFLUSH) {
290 			nflags &= ~HAMMER2_TRANS_WAITING;
291 		}
292 		if ((oflags & (HAMMER2_TRANS_ISFLUSH|HAMMER2_TRANS_MASK)) ==
293 		    (HAMMER2_TRANS_ISFLUSH|2)) {
294 			nflags &= ~HAMMER2_TRANS_WAITING;
295 		}
296 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
297 			if ((oflags ^ nflags) & HAMMER2_TRANS_WAITING)
298 				wakeup(&pmp->trans.sync_wait);
299 			break;
300 		}
301 		cpu_pause();
302 		/* retry */
303 	}
304 }
305 
306 /*
307  * Obtain new, unique inode number (not serialized by caller).
308  */
309 hammer2_tid_t
310 hammer2_trans_newinum(hammer2_pfs_t *pmp)
311 {
312 	hammer2_tid_t tid;
313 
314 	tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
315 
316 	return tid;
317 }
318 
319 /*
320  * Assert that a strategy call is ok here.  Currently we allow strategy
321  * calls in all situations, including during flushes.  Previously:
322  *	(old) (1) In a normal transaction.
323  *	(old) (2) In a flush transaction only if PREFLUSH is also set.
324  */
325 void
326 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
327 {
328 #if 0
329 	KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
330 		 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
331 #endif
332 }
333 
334 /*
335  * Flush the chain and all modified sub-chains through the specified
336  * synchronization point, propagating blockref updates back up.  As
337  * part of this propagation, mirror_tid and inode/data usage statistics
338  * propagates back upward.
339  *
340  * Returns a HAMMER2 error code, 0 if no error.  Note that I/O errors from
341  * buffers dirtied during the flush operation can occur later.
342  *
343  * modify_tid (clc - cluster level change) is not propagated.
344  *
345  * update_tid (clc) is used for validation and is not propagated by this
346  * function.
347  *
348  * This routine can be called from several places but the most important
349  * is from VFS_SYNC (frontend) via hammer2_xop_inode_flush (backend).
350  *
351  * chain is locked on call and will remain locked on return.  The chain's
352  * UPDATE flag indicates that its parent's block table (which is not yet
353  * part of the flush) should be updated.
354  *
355  * flags:
356  *	HAMMER2_FLUSH_TOP	Indicates that this is the top of the flush.
357  *				Is cleared for the recursion.
358  *
359  *	HAMMER2_FLUSH_ALL	Recurse everything
360  *
361  *	HAMMER2_FLUSH_INODE_STOP
362  *				Stop at PFS inode or normal inode boundary
363  */
364 int
365 hammer2_flush(hammer2_chain_t *chain, int flags)
366 {
367 	hammer2_flush_info_t info;
368 	hammer2_dev_t *hmp;
369 	int loops;
370 
371 	/*
372 	 * Execute the recursive flush and handle deferrals.
373 	 *
374 	 * Chains can be ridiculously long (thousands deep), so to
375 	 * avoid blowing out the kernel stack the recursive flush has a
376 	 * depth limit.  Elements at the limit are placed on a list
377 	 * for re-execution after the stack has been popped.
378 	 */
379 	bzero(&info, sizeof(info));
380 	info.flags = flags & ~HAMMER2_FLUSH_TOP;
381 
382 	/*
383 	 * Calculate parent (can be NULL), if not NULL the flush core
384 	 * expects the parent to be referenced so it can easily lock/unlock
385 	 * it without it getting ripped up.
386 	 */
387 	if ((info.parent = chain->parent) != NULL)
388 		hammer2_chain_ref(info.parent);
389 
390 	/*
391 	 * Extra ref needed because flush_core expects it when replacing
392 	 * chain.
393 	 */
394 	hammer2_chain_ref(chain);
395 	hmp = chain->hmp;
396 	loops = 0;
397 
398 	for (;;) {
399 		/*
400 		 * [re]flush chain as the deep recursion may have generated
401 		 * additional modifications.
402 		 */
403 		if (info.parent != chain->parent) {
404 			if (hammer2_debug & 0x0040) {
405 				kprintf("LOST CHILD4 %p->%p "
406 					"(actual parent %p)\n",
407 					info.parent, chain, chain->parent);
408 			}
409 			hammer2_chain_drop(info.parent);
410 			info.parent = chain->parent;
411 			hammer2_chain_ref(info.parent);
412 		}
413 		if (hammer2_flush_core(&info, chain, flags) == 0)
414 			break;
415 
416 		if (++loops % 1000 == 0) {
417 			kprintf("hammer2_flush: excessive loops on %p\n",
418 				chain);
419 			if (hammer2_debug & 0x100000)
420 				Debugger("hell4");
421 		}
422 	}
423 #ifdef HAMMER2_SCAN_DEBUG
424 	if (info.scan_count >= 10)
425 	kprintf("hammer2_flush: scan_count %ld (%ld,%ld,%ld,%ld) "
426 		"bt(%ld,%ld,%ld,%ld,%ld,%ld)\n",
427 		info.scan_count,
428 		info.scan_mod_count,
429 		info.scan_upd_count,
430 		info.scan_onf_count,
431 		info.scan_del_count,
432 		info.scan_btype[1],
433 		info.scan_btype[2],
434 		info.scan_btype[3],
435 		info.scan_btype[4],
436 		info.scan_btype[5],
437 		info.scan_btype[6]);
438 #endif
439 	hammer2_chain_drop(chain);
440 	if (info.parent)
441 		hammer2_chain_drop(info.parent);
442 	return (info.error);
443 }
444 
445 /*
446  * This is the core of the chain flushing code.  The chain is locked by the
447  * caller and must also have an extra ref on it by the caller, and remains
448  * locked and will have an extra ref on return.  info.parent is referenced
449  * but not locked.
450  *
451  * Upon return, the caller can test the UPDATE bit on the chain to determine
452  * if the parent needs updating.
453  *
454  * If non-zero is returned, the chain's parent changed during the flush and
455  * the caller must retry the operation.
456  *
457  * (1) Determine if this node is a candidate for the flush, return if it is
458  *     not.  fchain and vchain are always candidates for the flush.
459  *
460  * (2) If we recurse too deep the chain is entered onto the deferral list and
461  *     the current flush stack is aborted until after the deferral list is
462  *     run.
463  *
464  * (3) Recursively flush live children (rbtree).  This can create deferrals.
465  *     A successful flush clears the MODIFIED and UPDATE bits on the children
466  *     and typically causes the parent to be marked MODIFIED as the children
467  *     update the parent's block table.  A parent might already be marked
468  *     MODIFIED due to a deletion (whos blocktable update in the parent is
469  *     handled by the frontend), or if the parent itself is modified by the
470  *     frontend for other reasons.
471  *
472  * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
473  *     Deleted-but-open inodes can still be individually flushed via the
474  *     filesystem syncer.
475  *
476  * (5) Delete parents on the way back up if they are normal indirect blocks
477  *     and have no children.
478  *
479  * (6) Note that an unmodified child may still need the block table in its
480  *     parent updated (e.g. rename/move).  The child will have UPDATE set
481  *     in this case.
482  *
483  *			WARNING ON BREF MODIFY_TID/MIRROR_TID
484  *
485  * blockref.modify_tid is consistent only within a PFS, and will not be
486  * consistent during synchronization.  mirror_tid is consistent across the
487  * block device regardless of the PFS.
488  */
489 static int
490 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
491 		   int flags)
492 {
493 	hammer2_chain_t *parent;
494 	hammer2_dev_t *hmp;
495 	int save_error;
496 	int retry;
497 
498 	retry = 0;
499 
500 	/*
501 	 * (1) Optimize downward recursion to locate nodes needing action.
502 	 *     Nothing to do if none of these flags are set.
503 	 */
504 	if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
505 		if (hammer2_debug & 0x200) {
506 			if (info->debug == NULL)
507 				info->debug = chain;
508 		} else {
509 			return 0;
510 		}
511 	}
512 
513 	hmp = chain->hmp;
514 
515 	/*
516 	 * NOTE: parent can be NULL, usually due to destroy races.
517 	 */
518 	parent = info->parent;
519 	KKASSERT(chain->parent == parent);
520 
521 	/*
522 	 * Downward search recursion
523 	 *
524 	 * We must be careful on cold stops, which often occur on inode
525 	 * boundaries due to the way hammer2_vfs_sync() sequences the flush.
526 	 * Be sure to issue an appropriate chain_setflush()
527 	 */
528 	if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
529 	    (flags & HAMMER2_FLUSH_ALL) == 0 &&
530 	    (flags & HAMMER2_FLUSH_TOP) == 0 &&
531 	    chain->pmp && chain->pmp->mp) {
532 		/*
533 		 * If FLUSH_ALL is not specified the caller does not want
534 		 * to recurse through PFS roots that have been mounted.
535 		 *
536 		 * (If the PFS has not been mounted there may not be
537 		 *  anything monitoring its chains and its up to us
538 		 *  to flush it).
539 		 *
540 		 * The typical sequence is to flush dirty PFS's starting at
541 		 * their root downward, then flush the device root (vchain).
542 		 * It is this second flush that typically leaves out the
543 		 * ALL flag.
544 		 *
545 		 * However we must still process the PFSROOT chains for block
546 		 * table updates in their parent (which IS part of our flush).
547 		 *
548 		 * NOTE: The volume root, vchain, does not set PFSBOUNDARY.
549 		 *
550 		 * NOTE: We must re-set ONFLUSH in the parent to retain if
551 		 *	 this chain (that we are skipping) requires work.
552 		 */
553 		if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
554 				    HAMMER2_CHAIN_DESTROY |
555 				    HAMMER2_CHAIN_MODIFIED)) {
556 			hammer2_chain_setflush(parent);
557 		}
558 		goto done;
559 	} else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
560 		   (flags & HAMMER2_FLUSH_INODE_STOP) &&
561 		   (flags & HAMMER2_FLUSH_ALL) == 0 &&
562 		   (flags & HAMMER2_FLUSH_TOP) == 0 &&
563 		   chain->pmp && chain->pmp->mp) {
564 		/*
565 		 * When FLUSH_INODE_STOP is specified we are being asked not
566 		 * to include any inode changes for inodes we encounter,
567 		 * with the exception of the inode that the flush began with.
568 		 * So: INODE, INODE_STOP, and TOP==0 basically.
569 		 *
570 		 * Dirty inodes are flushed based on the hammer2_inode
571 		 * in-memory structure, issuing a chain_setflush() here
572 		 * will only cause unnecessary traversals of the topology.
573 		 */
574 		goto done;
575 #if 0
576 		/*
577 		 * If FLUSH_INODE_STOP is specified and both ALL and TOP
578 		 * are clear, we must not flush the chain.  The chain should
579 		 * have already been flushed and any further ONFLUSH/UPDATE
580 		 * setting will be related to the next flush.
581 		 *
582 		 * This features allows us to flush inodes independently of
583 		 * each other and meta-data above the inodes separately.
584 		 */
585 		if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
586 				    HAMMER2_CHAIN_DESTROY |
587 				    HAMMER2_CHAIN_MODIFIED)) {
588 			if (parent)
589 				hammer2_chain_setflush(parent);
590 		}
591 #endif
592 	} else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
593 		/*
594 		 * Recursion depth reached.
595 		 */
596 		panic("hammer2: flush depth limit");
597 	} else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
598 				   HAMMER2_CHAIN_DESTROY)) {
599 		/*
600 		 * Downward recursion search (actual flush occurs bottom-up).
601 		 * pre-clear ONFLUSH.  It can get set again due to races or
602 		 * flush errors, which we want so the scan finds us again in
603 		 * the next flush.
604 		 *
605 		 * We must also recurse if DESTROY is set so we can finally
606 		 * get rid of the related children, otherwise the node will
607 		 * just get re-flushed on lastdrop.
608 		 *
609 		 * WARNING!  The recursion will unlock/relock info->parent
610 		 *	     (which is 'chain'), potentially allowing it
611 		 *	     to be ripped up.
612 		 */
613 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
614 		save_error = info->error;
615 		info->error = 0;
616 		info->parent = chain;
617 
618 		/*
619 		 * We may have to do this twice to catch any indirect
620 		 * block maintenance that occurs.
621 		 */
622 		hammer2_spin_ex(&chain->core.spin);
623 		RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
624 			NULL, hammer2_flush_recurse, info);
625 		if (chain->flags & HAMMER2_CHAIN_ONFLUSH) {
626 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
627 			RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
628 				NULL, hammer2_flush_recurse, info);
629 		}
630 		hammer2_spin_unex(&chain->core.spin);
631 		info->parent = parent;
632 
633 		/*
634 		 * Re-set the flush bits if the flush was incomplete or
635 		 * an error occurred.  If an error occurs it is typically
636 		 * an allocation error.  Errors do not cause deferrals.
637 		 */
638 		if (info->error)
639 			hammer2_chain_setflush(chain);
640 		info->error |= save_error;
641 
642 		/*
643 		 * If we lost the parent->chain association we have to
644 		 * stop processing this chain because it is no longer
645 		 * in this recursion.  If it moved, it will be handled
646 		 * by the ONFLUSH flag elsewhere.
647 		 */
648 		if (chain->parent != parent) {
649 			kprintf("LOST CHILD2 %p->%p (actual parent %p)\n",
650 				parent, chain, chain->parent);
651 			goto done;
652 		}
653 	}
654 
655 	/*
656 	 * Now we are in the bottom-up part of the recursion.
657 	 *
658 	 * We continue to try to update the chain on lower-level errors, but
659 	 * the flush code may decide not to flush the volume root.
660 	 *
661 	 * XXX should we continue to try to update the chain if an error
662 	 *     occurred?
663 	 */
664 
665 	/*
666 	 * Both parent and chain must be locked in order to flush chain,
667 	 * in order to properly update the parent under certain conditions.
668 	 *
669 	 * In addition, we can't safely unlock/relock the chain once we
670 	 * start flushing the chain itself, which we would have to do later
671 	 * on in order to lock the parent if we didn't do that now.
672 	 */
673 	hammer2_chain_ref_hold(chain);
674 	hammer2_chain_unlock(chain);
675 	if (parent)
676 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
677 	hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
678 	hammer2_chain_drop_unhold(chain);
679 
680 	/*
681 	 * Can't process if we can't access their content.
682 	 */
683 	if ((parent && parent->error) || chain->error) {
684 		kprintf("hammer2: chain error during flush\n");
685 		info->error |= chain->error;
686 		if (parent) {
687 			info->error |= parent->error;
688 			hammer2_chain_unlock(parent);
689 		}
690 		goto done;
691 	}
692 
693 	if (chain->parent != parent) {
694 		if (hammer2_debug & 0x0040) {
695 			kprintf("LOST CHILD3 %p->%p (actual parent %p)\n",
696 				parent, chain, chain->parent);
697 		}
698 		KKASSERT(parent != NULL);
699 		hammer2_chain_unlock(parent);
700 		retry = 1;
701 		goto done;
702 	}
703 
704 	/*
705 	 * Propagate the DESTROY flag downwards.  This dummies up the flush
706 	 * code and tries to invalidate related buffer cache buffers to
707 	 * avoid the disk write.
708 	 */
709 	if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
710 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
711 
712 	/*
713 	 * Dispose of the modified bit.
714 	 *
715 	 * If parent is present, the UPDATE bit should already be set.
716 	 * UPDATE should already be set.
717 	 * bref.mirror_tid should already be set.
718 	 */
719 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
720 		KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
721 			 chain->parent == NULL);
722 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
723 		atomic_add_long(&hammer2_count_modified_chains, -1);
724 
725 		/*
726 		 * Manage threads waiting for excessive dirty memory to
727 		 * be retired.
728 		 */
729 		if (chain->pmp)
730 			hammer2_pfs_memory_wakeup(chain->pmp, -1);
731 
732 #if 0
733 		if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
734 		    chain != &hmp->vchain &&
735 		    chain != &hmp->fchain) {
736 			/*
737 			 * Set UPDATE bit indicating that the parent block
738 			 * table requires updating.
739 			 */
740 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
741 		}
742 #endif
743 
744 		/*
745 		 * Issue the flush.  This is indirect via the DIO.
746 		 *
747 		 * NOTE: A DELETED node that reaches this point must be
748 		 *	 flushed for synchronization point consistency.
749 		 *
750 		 * NOTE: Even though MODIFIED was already set, the related DIO
751 		 *	 might not be dirty due to a system buffer cache
752 		 *	 flush and must be set dirty if we are going to make
753 		 *	 further modifications to the buffer.  Chains with
754 		 *	 embedded data don't need this.
755 		 */
756 		if (hammer2_debug & 0x1000) {
757 			kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
758 				chain, chain->bref.type,
759 				(uintmax_t)chain->bref.key,
760 				chain->bref.keybits,
761 				(uintmax_t)chain->bref.data_off);
762 		}
763 
764 		/*
765 		 * Update chain CRCs for flush.
766 		 *
767 		 * NOTE: Volume headers are NOT flushed here as they require
768 		 *	 special processing.
769 		 */
770 		switch(chain->bref.type) {
771 		case HAMMER2_BREF_TYPE_FREEMAP:
772 			/*
773 			 * Update the volume header's freemap_tid to the
774 			 * freemap's flushing mirror_tid.
775 			 *
776 			 * (note: embedded data, do not call setdirty)
777 			 */
778 			KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
779 			KKASSERT(chain == &hmp->fchain);
780 			hmp->voldata.freemap_tid = chain->bref.mirror_tid;
781 			if (hammer2_debug & 0x8000) {
782 				/* debug only, avoid syslogd loop */
783 				kprintf("sync freemap mirror_tid %08jx\n",
784 					(intmax_t)chain->bref.mirror_tid);
785 			}
786 
787 			/*
788 			 * The freemap can be flushed independently of the
789 			 * main topology, but for the case where it is
790 			 * flushed in the same transaction, and flushed
791 			 * before vchain (a case we want to allow for
792 			 * performance reasons), make sure modifications
793 			 * made during the flush under vchain use a new
794 			 * transaction id.
795 			 *
796 			 * Otherwise the mount recovery code will get confused.
797 			 */
798 			++hmp->voldata.mirror_tid;
799 			break;
800 		case HAMMER2_BREF_TYPE_VOLUME:
801 			/*
802 			 * The free block table is flushed by
803 			 * hammer2_vfs_sync() before it flushes vchain.
804 			 * We must still hold fchain locked while copying
805 			 * voldata to volsync, however.
806 			 *
807 			 * These do not error per-say since their data does
808 			 * not need to be re-read from media on lock.
809 			 *
810 			 * (note: embedded data, do not call setdirty)
811 			 */
812 			hammer2_chain_lock(&hmp->fchain,
813 					   HAMMER2_RESOLVE_ALWAYS);
814 			hammer2_voldata_lock(hmp);
815 			if (hammer2_debug & 0x8000) {
816 				/* debug only, avoid syslogd loop */
817 				kprintf("sync volume  mirror_tid %08jx\n",
818 					(intmax_t)chain->bref.mirror_tid);
819 			}
820 
821 			/*
822 			 * Update the volume header's mirror_tid to the
823 			 * main topology's flushing mirror_tid.  It is
824 			 * possible that voldata.mirror_tid is already
825 			 * beyond bref.mirror_tid due to the bump we made
826 			 * above in BREF_TYPE_FREEMAP.
827 			 */
828 			if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
829 				hmp->voldata.mirror_tid =
830 					chain->bref.mirror_tid;
831 			}
832 
833 			/*
834 			 * The volume header is flushed manually by the
835 			 * syncer, not here.  All we do here is adjust the
836 			 * crc's.
837 			 */
838 			KKASSERT(chain->data != NULL);
839 			KKASSERT(chain->dio == NULL);
840 
841 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
842 				hammer2_icrc32(
843 					(char *)&hmp->voldata +
844 					 HAMMER2_VOLUME_ICRC1_OFF,
845 					HAMMER2_VOLUME_ICRC1_SIZE);
846 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
847 				hammer2_icrc32(
848 					(char *)&hmp->voldata +
849 					 HAMMER2_VOLUME_ICRC0_OFF,
850 					HAMMER2_VOLUME_ICRC0_SIZE);
851 			hmp->voldata.icrc_volheader =
852 				hammer2_icrc32(
853 					(char *)&hmp->voldata +
854 					 HAMMER2_VOLUME_ICRCVH_OFF,
855 					HAMMER2_VOLUME_ICRCVH_SIZE);
856 
857 			if (hammer2_debug & 0x8000) {
858 				/* debug only, avoid syslogd loop */
859 				kprintf("syncvolhdr %016jx %016jx\n",
860 					hmp->voldata.mirror_tid,
861 					hmp->vchain.bref.mirror_tid);
862 			}
863 			hmp->volsync = hmp->voldata;
864 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
865 			hammer2_voldata_unlock(hmp);
866 			hammer2_chain_unlock(&hmp->fchain);
867 			break;
868 		case HAMMER2_BREF_TYPE_DATA:
869 			/*
870 			 * Data elements have already been flushed via the
871 			 * logical file buffer cache.  Their hash was set in
872 			 * the bref by the vop_write code.  Do not re-dirty.
873 			 *
874 			 * Make sure any device buffer(s) have been flushed
875 			 * out here (there aren't usually any to flush) XXX.
876 			 */
877 			break;
878 		case HAMMER2_BREF_TYPE_INDIRECT:
879 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
880 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
881 			/*
882 			 * Buffer I/O will be cleaned up when the volume is
883 			 * flushed (but the kernel is free to flush it before
884 			 * then, as well).
885 			 */
886 			hammer2_chain_setcheck(chain, chain->data);
887 			break;
888 		case HAMMER2_BREF_TYPE_DIRENT:
889 			/*
890 			 * A directory entry can use the check area to store
891 			 * the filename for filenames <= 64 bytes, don't blow
892 			 * it up!
893 			 */
894 			if (chain->bytes)
895 				hammer2_chain_setcheck(chain, chain->data);
896 			break;
897 		case HAMMER2_BREF_TYPE_INODE:
898 			/*
899 			 * NOTE: We must call io_setdirty() to make any late
900 			 *	 changes to the inode data, the system might
901 			 *	 have already flushed the buffer.
902 			 */
903 			if (chain->data->ipdata.meta.op_flags &
904 			    HAMMER2_OPFLAG_PFSROOT) {
905 				/*
906 				 * non-NULL pmp if mounted as a PFS.  We must
907 				 * sync fields cached in the pmp? XXX
908 				 */
909 				hammer2_inode_data_t *ipdata;
910 
911 				hammer2_io_setdirty(chain->dio);
912 				ipdata = &chain->data->ipdata;
913 				if (chain->pmp) {
914 					ipdata->meta.pfs_inum =
915 						chain->pmp->inode_tid;
916 				}
917 			} else {
918 				/* can't be mounted as a PFS */
919 			}
920 
921 			hammer2_chain_setcheck(chain, chain->data);
922 			break;
923 		default:
924 			panic("hammer2_flush_core: unsupported "
925 			      "embedded bref %d",
926 			      chain->bref.type);
927 			/* NOT REACHED */
928 		}
929 
930 		/*
931 		 * If the chain was destroyed try to avoid unnecessary I/O
932 		 * that might not have yet occurred.  Remove the data range
933 		 * from dedup candidacy and attempt to invalidation that
934 		 * potentially dirty portion of the I/O buffer.
935 		 */
936 		if (chain->flags & HAMMER2_CHAIN_DESTROY) {
937 			hammer2_io_dedup_delete(hmp,
938 						chain->bref.type,
939 						chain->bref.data_off,
940 						chain->bytes);
941 #if 0
942 			hammer2_io_t *dio;
943 			if (chain->dio) {
944 				hammer2_io_inval(chain->dio,
945 						 chain->bref.data_off,
946 						 chain->bytes);
947 			} else if ((dio = hammer2_io_getquick(hmp,
948 						  chain->bref.data_off,
949 						  chain->bytes,
950 						  1)) != NULL) {
951 				hammer2_io_inval(dio,
952 						 chain->bref.data_off,
953 						 chain->bytes);
954 				hammer2_io_putblk(&dio);
955 			}
956 #endif
957 		}
958 	}
959 
960 	/*
961 	 * If UPDATE is set the parent block table may need to be updated.
962 	 * This can fail if the hammer2_chain_modify() fails.
963 	 *
964 	 * NOTE: UPDATE may be set on vchain or fchain in which case
965 	 *	 parent could be NULL, or on an inode that has not yet
966 	 *	 been inserted into the radix tree.  It's easiest to allow
967 	 *	 the case and test for NULL.  parent can also wind up being
968 	 *	 NULL due to a deletion so we need to handle the case anyway.
969 	 *
970 	 * NOTE: UPDATE can be set when chains are renamed into or out of
971 	 *	 an indirect block, without the chain itself being flagged
972 	 *	 MODIFIED.
973 	 *
974 	 * If no parent exists we can just clear the UPDATE bit.  If the
975 	 * chain gets reattached later on the bit will simply get set
976 	 * again.
977 	 */
978 	if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
979 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
980 
981 	/*
982 	 * When flushing an inode outside of a FLUSH_FSSYNC we must NOT
983 	 * update the parent block table to point at the flushed inode.
984 	 * The block table should only ever be updated by the filesystem
985 	 * sync code.  If we do, inode<->inode dependencies (such as
986 	 * directory entries vs inode nlink count) can wind up not being
987 	 * flushed together and result in a broken topology if a crash/reboot
988 	 * occurs at the wrong time.
989 	 */
990 	if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
991 	    (flags & HAMMER2_FLUSH_INODE_STOP) &&
992 	    (flags & HAMMER2_FLUSH_FSSYNC) == 0 &&
993 	    (flags & HAMMER2_FLUSH_ALL) == 0 &&
994 	    chain->pmp && chain->pmp->mp) {
995 #ifdef HAMMER2_DEBUG_SYNC
996 		kprintf("inum %ld do not update parent, non-fssync\n",
997 			(long)chain->bref.key);
998 #endif
999 		goto skipupdate;
1000 	}
1001 #ifdef HAMMER2_DEBUG_SYNC
1002 	if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
1003 		kprintf("inum %ld update parent\n", (long)chain->bref.key);
1004 #endif
1005 
1006 	/*
1007 	 * The chain may need its blockrefs updated in the parent, normal
1008 	 * path.
1009 	 */
1010 	if (chain->flags & HAMMER2_CHAIN_UPDATE) {
1011 		hammer2_blockref_t *base;
1012 		int count;
1013 
1014 		/*
1015 		 * Clear UPDATE flag, mark parent modified, update its
1016 		 * modify_tid if necessary, and adjust the parent blockmap.
1017 		 */
1018 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1019 
1020 		/*
1021 		 * (optional code)
1022 		 *
1023 		 * Avoid actually modifying and updating the parent if it
1024 		 * was flagged for destruction.  This can greatly reduce
1025 		 * disk I/O in large tree removals because the
1026 		 * hammer2_io_setinval() call in the upward recursion
1027 		 * (see MODIFIED code above) can only handle a few cases.
1028 		 */
1029 		if (parent->flags & HAMMER2_CHAIN_DESTROY) {
1030 			if (parent->bref.modify_tid < chain->bref.modify_tid) {
1031 				parent->bref.modify_tid =
1032 					chain->bref.modify_tid;
1033 			}
1034 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
1035 							HAMMER2_CHAIN_BMAPUPD);
1036 			goto skipupdate;
1037 		}
1038 
1039 		/*
1040 		 * The flusher is responsible for deleting empty indirect
1041 		 * blocks at this point.  If we don't do this, no major harm
1042 		 * will be done but the empty indirect blocks will stay in
1043 		 * the topology and make it a messy and inefficient.
1044 		 *
1045 		 * The flusher is also responsible for collapsing the
1046 		 * content of an indirect block into its parent whenever
1047 		 * possible (with some hysteresis).  Not doing this will also
1048 		 * not harm the topology, but would make it messy and
1049 		 * inefficient.
1050 		 */
1051 		if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1052 			if (hammer2_chain_indirect_maintenance(parent, chain))
1053 				goto skipupdate;
1054 		}
1055 
1056 		/*
1057 		 * We are updating the parent's blockmap, the parent must
1058 		 * be set modified.  If this fails we re-set the UPDATE flag
1059 		 * in the child.
1060 		 *
1061 		 * NOTE! A modification error can be ENOSPC.  We still want
1062 		 *	 to flush modified chains recursively, not break out,
1063 		 *	 so we just skip the update in this situation and
1064 		 *	 continue.  That is, we still need to try to clean
1065 		 *	 out dirty chains and buffers.
1066 		 *
1067 		 *	 This may not help bulkfree though. XXX
1068 		 */
1069 		save_error = hammer2_chain_modify(parent, 0, 0, 0);
1070 		if (save_error) {
1071 			info->error |= save_error;
1072 			kprintf("hammer2_flush: %016jx.%02x error=%08x\n",
1073 				parent->bref.data_off, parent->bref.type,
1074 				save_error);
1075 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1076 			goto skipupdate;
1077 		}
1078 		if (parent->bref.modify_tid < chain->bref.modify_tid)
1079 			parent->bref.modify_tid = chain->bref.modify_tid;
1080 
1081 		/*
1082 		 * Calculate blockmap pointer
1083 		 */
1084 		switch(parent->bref.type) {
1085 		case HAMMER2_BREF_TYPE_INODE:
1086 			/*
1087 			 * Access the inode's block array.  However, there is
1088 			 * no block array if the inode is flagged DIRECTDATA.
1089 			 */
1090 			if (parent->data &&
1091 			    (parent->data->ipdata.meta.op_flags &
1092 			     HAMMER2_OPFLAG_DIRECTDATA) == 0) {
1093 				base = &parent->data->
1094 					ipdata.u.blockset.blockref[0];
1095 			} else {
1096 				base = NULL;
1097 			}
1098 			count = HAMMER2_SET_COUNT;
1099 			break;
1100 		case HAMMER2_BREF_TYPE_INDIRECT:
1101 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1102 			if (parent->data)
1103 				base = &parent->data->npdata[0];
1104 			else
1105 				base = NULL;
1106 			count = parent->bytes / sizeof(hammer2_blockref_t);
1107 			break;
1108 		case HAMMER2_BREF_TYPE_VOLUME:
1109 			base = &chain->hmp->voldata.sroot_blockset.blockref[0];
1110 			count = HAMMER2_SET_COUNT;
1111 			break;
1112 		case HAMMER2_BREF_TYPE_FREEMAP:
1113 			base = &parent->data->npdata[0];
1114 			count = HAMMER2_SET_COUNT;
1115 			break;
1116 		default:
1117 			base = NULL;
1118 			count = 0;
1119 			panic("hammer2_flush_core: "
1120 			      "unrecognized blockref type: %d",
1121 			      parent->bref.type);
1122 			break;
1123 		}
1124 
1125 		/*
1126 		 * Blocktable updates
1127 		 *
1128 		 * We synchronize pending statistics at this time.  Delta
1129 		 * adjustments designated for the current and upper level
1130 		 * are synchronized.
1131 		 */
1132 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
1133 			if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
1134 				hammer2_spin_ex(&parent->core.spin);
1135 				hammer2_base_delete(parent, base, count, chain,
1136 						    NULL);
1137 				hammer2_spin_unex(&parent->core.spin);
1138 				/* base_delete clears both bits */
1139 			} else {
1140 				atomic_clear_int(&chain->flags,
1141 						 HAMMER2_CHAIN_BMAPUPD);
1142 			}
1143 		}
1144 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
1145 			hammer2_spin_ex(&parent->core.spin);
1146 			hammer2_base_insert(parent, base, count,
1147 					    chain, &chain->bref);
1148 			hammer2_spin_unex(&parent->core.spin);
1149 			/* base_insert sets BMAPPED */
1150 		}
1151 	}
1152 skipupdate:
1153 	if (parent)
1154 		hammer2_chain_unlock(parent);
1155 
1156 	/*
1157 	 * Final cleanup after flush
1158 	 */
1159 done:
1160 	KKASSERT(chain->refs > 0);
1161 	if (hammer2_debug & 0x200) {
1162 		if (info->debug == chain)
1163 			info->debug = NULL;
1164 	}
1165 	return retry;
1166 }
1167 
1168 /*
1169  * Flush recursion helper, called from flush_core, calls flush_core.
1170  *
1171  * Flushes the children of the caller's chain (info->parent), restricted
1172  * by sync_tid.
1173  *
1174  * This function may set info->error as a side effect.
1175  *
1176  * WARNING! If we do not call hammer2_flush_core() we must update
1177  *	    bref.mirror_tid ourselves to indicate that the flush has
1178  *	    processed the child.
1179  *
1180  * WARNING! parent->core spinlock is held on entry and return.
1181  */
1182 static int
1183 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
1184 {
1185 	hammer2_flush_info_t *info = data;
1186 	hammer2_chain_t *parent = info->parent;
1187 
1188 #ifdef HAMMER2_SCAN_DEBUG
1189 	++info->scan_count;
1190 	if (child->flags & HAMMER2_CHAIN_MODIFIED)
1191 		++info->scan_mod_count;
1192 	if (child->flags & HAMMER2_CHAIN_UPDATE)
1193 		++info->scan_upd_count;
1194 	if (child->flags & HAMMER2_CHAIN_ONFLUSH)
1195 		++info->scan_onf_count;
1196 #endif
1197 
1198 	/*
1199 	 * (child can never be fchain or vchain so a special check isn't
1200 	 *  needed).
1201 	 *
1202 	 * We must ref the child before unlocking the spinlock.
1203 	 *
1204 	 * The caller has added a ref to the parent so we can temporarily
1205 	 * unlock it in order to lock the child.  However, if it no longer
1206 	 * winds up being the child of the parent we must skip this child.
1207 	 *
1208 	 * NOTE! chain locking errors are fatal.  They are never out-of-space
1209 	 *	 errors.
1210 	 */
1211 	hammer2_chain_ref(child);
1212 	hammer2_spin_unex(&parent->core.spin);
1213 
1214 	hammer2_chain_ref_hold(parent);
1215 	hammer2_chain_unlock(parent);
1216 	hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1217 	if (child->parent != parent) {
1218 		kprintf("LOST CHILD1 %p->%p (actual parent %p)\n",
1219 			parent, child, child->parent);
1220 		goto done;
1221 	}
1222 	if (child->error) {
1223 		kprintf("CHILD ERROR DURING FLUSH LOCK %p->%p\n",
1224 			parent, child);
1225 		info->error |= child->error;
1226 		goto done;
1227 	}
1228 
1229 	/*
1230 	 * Must propagate the DESTROY flag downwards, otherwise the
1231 	 * parent could end up never being removed because it will
1232 	 * be requeued to the flusher if it survives this run due to
1233 	 * the flag.
1234 	 */
1235 	if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
1236 		atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY);
1237 #ifdef HAMMER2_SCAN_DEBUG
1238 	if (child->flags & HAMMER2_CHAIN_DESTROY)
1239 		++info->scan_del_count;
1240 #endif
1241 	/*
1242 	 * Special handling of the root inode.  Because the root inode
1243 	 * contains an index of all the inodes in the PFS in addition to
1244 	 * its normal directory entries, any flush that is not part of a
1245 	 * filesystem sync must only flush the directory entries, and not
1246 	 * anything else.
1247 	 *
1248 	 * The child might be an indirect block, but H2 guarantees that
1249 	 * the key-range will fully partition the inode index from the
1250 	 * directory entries so the case just works naturally.
1251 	 */
1252 	if ((parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
1253 	    (child->flags & HAMMER2_CHAIN_DESTROY) == 0 &&
1254 	    parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
1255 	    (info->flags & HAMMER2_FLUSH_FSSYNC) == 0) {
1256 		if ((child->bref.key & HAMMER2_DIRHASH_VISIBLE) == 0) {
1257 			if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1258 				hammer2_chain_setflush(parent);
1259 			}
1260 			goto done;
1261 		}
1262 	}
1263 
1264 	/*
1265 	 * Recurse and collect deferral data.  We're in the media flush,
1266 	 * this can cross PFS boundaries.
1267 	 */
1268 	if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1269 #ifdef HAMMER2_SCAN_DEBUG
1270 		if (child->bref.type < 7)
1271 			++info->scan_btype[child->bref.type];
1272 #endif
1273 		++info->depth;
1274 		hammer2_flush_core(info, child, info->flags);
1275 		--info->depth;
1276 	} else if (hammer2_debug & 0x200) {
1277 		if (info->debug == NULL)
1278 			info->debug = child;
1279 		++info->depth;
1280 		hammer2_flush_core(info, child, info->flags);
1281 		--info->depth;
1282 		if (info->debug == child)
1283 			info->debug = NULL;
1284 	}
1285 
1286 done:
1287 	/*
1288 	 * Relock to continue the loop.
1289 	 */
1290 	hammer2_chain_unlock(child);
1291 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1292 	hammer2_chain_drop_unhold(parent);
1293 	if (parent->error) {
1294 		kprintf("PARENT ERROR DURING FLUSH LOCK %p->%p\n",
1295 			parent, child);
1296 		info->error |= parent->error;
1297 	}
1298 	hammer2_chain_drop(child);
1299 	KKASSERT(info->parent == parent);
1300 	hammer2_spin_ex(&parent->core.spin);
1301 
1302 	return (0);
1303 }
1304 
1305 /*
1306  * flush helper (backend threaded)
1307  *
1308  * Flushes chain topology for the specified inode.
1309  *
1310  * HAMMER2_XOP_INODE_STOP	The flush recursion stops at inode boundaries.
1311  *				Inodes belonging to the same flush are flushed
1312  *				separately.
1313  *
1314  * chain->parent can be NULL, usually due to destroy races or detached inodes.
1315  *
1316  * Primarily called from vfs_sync().
1317  */
1318 void
1319 hammer2_xop_inode_flush(hammer2_xop_t *arg, void *scratch __unused, int clindex)
1320 {
1321 	hammer2_xop_flush_t *xop = &arg->xop_flush;
1322 	hammer2_chain_t *chain;
1323 	hammer2_inode_t *ip;
1324 	hammer2_dev_t *hmp;
1325 	hammer2_pfs_t *pmp;
1326 	hammer2_devvp_t *e;
1327 	struct vnode *devvp;
1328 	int flush_error = 0;
1329 	int fsync_error = 0;
1330 	int total_error = 0;
1331 	int j;
1332 	int xflags;
1333 	int ispfsroot = 0;
1334 
1335 	xflags = HAMMER2_FLUSH_TOP;
1336 	if (xop->head.flags & HAMMER2_XOP_INODE_STOP)
1337 		xflags |= HAMMER2_FLUSH_INODE_STOP;
1338 	if (xop->head.flags & HAMMER2_XOP_FSSYNC)
1339 		xflags |= HAMMER2_FLUSH_FSSYNC;
1340 
1341 	/*
1342 	 * Flush core chains
1343 	 */
1344 	ip = xop->head.ip1;
1345 	pmp = ip->pmp;
1346 	chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1347 	if (chain) {
1348 		hmp = chain->hmp;
1349 		if (chain->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1350 			/*
1351 			 * Due to flush partitioning the chain topology
1352 			 * above the inode's chain may no longer be flagged.
1353 			 * When asked to flush an inode, remark the topology
1354 			 * leading to that inode.
1355 			 */
1356 			if (chain->parent)
1357 				hammer2_chain_setflush(chain->parent);
1358 			hammer2_flush(chain, xflags);
1359 
1360 			/* XXX cluster */
1361 			if (ip == pmp->iroot && pmp != hmp->spmp) {
1362 				hammer2_spin_ex(&pmp->inum_spin);
1363 				pmp->pfs_iroot_blocksets[clindex] =
1364 					chain->data->ipdata.u.blockset;
1365 				hammer2_spin_unex(&pmp->inum_spin);
1366 			}
1367 
1368 #if 0
1369 			/*
1370 			 * Propogate upwards but only cross an inode boundary
1371 			 * for inodes associated with the current filesystem
1372 			 * sync.
1373 			 */
1374 			if ((xop->head.flags & HAMMER2_XOP_PARENTONFLUSH) ||
1375 			    chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
1376 				parent = chain->parent;
1377 				if (parent)
1378 					hammer2_chain_setflush(parent);
1379 			}
1380 #endif
1381 		}
1382 		if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY)
1383 			ispfsroot = 1;
1384 		hammer2_chain_unlock(chain);
1385 		hammer2_chain_drop(chain);
1386 		chain = NULL;
1387 	} else {
1388 		hmp = NULL;
1389 	}
1390 
1391 	/*
1392 	 * Only flush the volume header if asked to, plus the inode must also
1393 	 * be the PFS root.
1394 	 */
1395 	if ((xop->head.flags & HAMMER2_XOP_VOLHDR) == 0)
1396 		goto skip;
1397 	if (ispfsroot == 0)
1398 		goto skip;
1399 
1400 	/*
1401 	 * Flush volume roots.  Avoid replication, we only want to
1402 	 * flush each hammer2_dev (hmp) once.
1403 	 */
1404 	for (j = clindex - 1; j >= 0; --j) {
1405 		if ((chain = ip->cluster.array[j].chain) != NULL) {
1406 			if (chain->hmp == hmp) {
1407 				chain = NULL;	/* safety */
1408 				goto skip;
1409 			}
1410 		}
1411 	}
1412 	chain = NULL;	/* safety */
1413 
1414 	/*
1415 	 * spmp transaction.  The super-root is never directly mounted so
1416 	 * there shouldn't be any vnodes, let alone any dirty vnodes
1417 	 * associated with it, so we shouldn't have to mess around with any
1418 	 * vnode flushes here.
1419 	 */
1420 	hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1421 
1422 	/*
1423 	 * We must flush the superroot down to the PFS iroot.  Remember
1424 	 * that hammer2_chain_setflush() stops at inode boundaries, so
1425 	 * the pmp->iroot has been flushed and flagged down to the superroot,
1426 	 * but the volume root (vchain) probably has not yet been flagged.
1427 	 */
1428 	if (hmp->spmp->iroot) {
1429 		chain = hmp->spmp->iroot->cluster.array[0].chain;
1430 		if (chain) {
1431 			hammer2_chain_ref(chain);
1432 			hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1433 			flush_error |=
1434 				hammer2_flush(chain,
1435 					      HAMMER2_FLUSH_TOP |
1436 					      HAMMER2_FLUSH_INODE_STOP |
1437 					      HAMMER2_FLUSH_FSSYNC);
1438 			hammer2_chain_unlock(chain);
1439 			hammer2_chain_drop(chain);
1440 		}
1441 	}
1442 
1443 	/*
1444 	 * Media mounts have two 'roots', vchain for the topology
1445 	 * and fchain for the free block table.  Flush both.
1446 	 *
1447 	 * Note that the topology and free block table are handled
1448 	 * independently, so the free block table can wind up being
1449 	 * ahead of the topology.  We depend on the bulk free scan
1450 	 * code to deal with any loose ends.
1451 	 *
1452 	 * vchain and fchain do not error on-lock since their data does
1453 	 * not have to be re-read from media.
1454 	 */
1455 	hammer2_chain_ref(&hmp->vchain);
1456 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1457 	hammer2_chain_ref(&hmp->fchain);
1458 	hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1459 	if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1460 		/*
1461 		 * This will also modify vchain as a side effect,
1462 		 * mark vchain as modified now.
1463 		 */
1464 		hammer2_voldata_modify(hmp);
1465 		chain = &hmp->fchain;
1466 		flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1467 		KKASSERT(chain == &hmp->fchain);
1468 	}
1469 	hammer2_chain_unlock(&hmp->fchain);
1470 	hammer2_chain_unlock(&hmp->vchain);
1471 	hammer2_chain_drop(&hmp->fchain);
1472 	/* vchain dropped down below */
1473 
1474 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1475 	if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1476 		chain = &hmp->vchain;
1477 		flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1478 		KKASSERT(chain == &hmp->vchain);
1479 	}
1480 	hammer2_chain_unlock(&hmp->vchain);
1481 	hammer2_chain_drop(&hmp->vchain);
1482 
1483 	/*
1484 	 * We can't safely flush the volume header until we have
1485 	 * flushed any device buffers which have built up.
1486 	 *
1487 	 * XXX this isn't being incremental
1488 	 */
1489 	TAILQ_FOREACH(e, &hmp->devvpl, entry) {
1490 		devvp = e->devvp;
1491 		KKASSERT(devvp);
1492 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1493 		fsync_error = VOP_FSYNC(devvp, MNT_WAIT, 0);
1494 		vn_unlock(devvp);
1495 		if (fsync_error || flush_error) {
1496 			kprintf("hammer2: sync error fsync=%d h2flush=0x%04x dev=%s\n",
1497 				fsync_error, flush_error, e->path);
1498 		}
1499 	}
1500 
1501 	/*
1502 	 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1503 	 * volume header needs synchronization via hmp->volsync.
1504 	 *
1505 	 * XXX synchronize the flag & data with only this flush XXX
1506 	 */
1507 	if (fsync_error == 0 && flush_error == 0 &&
1508 	    (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1509 		struct buf *bp;
1510 		int vol_error = 0;
1511 
1512 		/*
1513 		 * Synchronize the disk before flushing the volume
1514 		 * header.
1515 		 */
1516 		bp = getpbuf(NULL);
1517 		bp->b_bio1.bio_offset = 0;
1518 		bp->b_bufsize = 0;
1519 		bp->b_bcount = 0;
1520 		bp->b_cmd = BUF_CMD_FLUSH;
1521 		bp->b_bio1.bio_done = biodone_sync;
1522 		bp->b_bio1.bio_flags |= BIO_SYNC;
1523 		vn_strategy(hmp->devvp, &bp->b_bio1);
1524 		fsync_error = biowait(&bp->b_bio1, "h2vol");
1525 		relpbuf(bp, NULL);
1526 
1527 		/*
1528 		 * Then we can safely flush the version of the
1529 		 * volume header synchronized by the flush code.
1530 		 */
1531 		j = hmp->volhdrno + 1;
1532 		if (j < 0)
1533 			j = 0;
1534 		if (j >= HAMMER2_NUM_VOLHDRS)
1535 			j = 0;
1536 		if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1537 		    hmp->volsync.volu_size) {
1538 			j = 0;
1539 		}
1540 		if (hammer2_debug & 0x8000) {
1541 			/* debug only, avoid syslogd loop */
1542 			kprintf("sync volhdr %d %jd\n",
1543 				j, (intmax_t)hmp->volsync.volu_size);
1544 		}
1545 		bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1546 			    HAMMER2_PBUFSIZE, GETBLK_KVABIO, 0);
1547 		atomic_clear_int(&hmp->vchain.flags,
1548 				 HAMMER2_CHAIN_VOLUMESYNC);
1549 		bkvasync(bp);
1550 		bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1551 		vol_error = bwrite(bp);
1552 		hmp->volhdrno = j;
1553 		if (vol_error)
1554 			fsync_error = vol_error;
1555 	}
1556 	if (flush_error)
1557 		total_error = flush_error;
1558 	if (fsync_error)
1559 		total_error = hammer2_errno_to_error(fsync_error);
1560 
1561 	/* spmp trans */
1562 	hammer2_trans_done(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1563 skip:
1564 	hammer2_xop_feed(&xop->head, NULL, clindex, total_error);
1565 }
1566