xref: /dragonfly/sys/vfs/hammer2/hammer2_flush.c (revision c87dd536)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  *			TRANSACTION AND FLUSH HANDLING
37  *
38  * Deceptively simple but actually fairly difficult to implement properly is
39  * how I would describe it.
40  *
41  * Flushing generally occurs bottom-up but requires a top-down scan to
42  * locate chains with MODIFIED and/or UPDATE bits set.  The ONFLUSH flag
43  * tells how to recurse downward to find these chains.
44  */
45 
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/uuid.h>
52 
53 #include "hammer2.h"
54 
55 #define FLUSH_DEBUG 0
56 
57 #define HAMMER2_FLUSH_DEPTH_LIMIT	60      /* stack recursion limit */
58 
59 
60 /*
61  * Recursively flush the specified chain.  The chain is locked and
62  * referenced by the caller and will remain so on return.  The chain
63  * will remain referenced throughout but can temporarily lose its
64  * lock during the recursion to avoid unnecessarily stalling user
65  * processes.
66  */
67 struct hammer2_flush_info {
68 	hammer2_chain_t *parent;
69 	int		depth;
70 	long		diddeferral;
71 	int		error;			/* cumulative error */
72 	int		flags;
73 #ifdef HAMMER2_SCAN_DEBUG
74 	long		scan_count;
75 	long		scan_mod_count;
76 	long		scan_upd_count;
77 	long		scan_onf_count;
78 	long		scan_del_count;
79 	long		scan_btype[7];
80 	long		flushq_count;
81 #endif
82 	struct h2_flush_list flushq;
83 	hammer2_chain_t	*debug;
84 };
85 
86 typedef struct hammer2_flush_info hammer2_flush_info_t;
87 
88 static void hammer2_flush_core(hammer2_flush_info_t *info,
89 				hammer2_chain_t *chain, int flags);
90 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
91 
92 /*
93  * Any per-pfs transaction initialization goes here.
94  */
95 void
96 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
97 {
98 }
99 
100 /*
101  * Transaction support for any modifying operation.  Transactions are used
102  * in the pmp layer by the frontend and in the spmp layer by the backend.
103  *
104  * 0			- Normal transaction, interlocked against flush
105  *			  transaction.
106  *
107  * TRANS_ISFLUSH	- Flush transaction, interlocked against normal
108  *			  transaction.
109  *
110  * TRANS_BUFCACHE	- Buffer cache transaction, no interlock.
111  *
112  * Initializing a new transaction allocates a transaction ID.  Typically
113  * passed a pmp (hmp passed as NULL), indicating a cluster transaction.  Can
114  * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
115  * media target.  The latter mode is used by the recovery code.
116  *
117  * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
118  * other is a set of any number of concurrent filesystem operations.  We
119  * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
120  * or we can have <running_flush> + <concurrent_fs_ops>.
121  *
122  * During a flush, new fs_ops are only blocked until the fs_ops prior to
123  * the flush complete.  The new fs_ops can then run concurrent with the flush.
124  *
125  * Buffer-cache transactions operate as fs_ops but never block.  A
126  * buffer-cache flush will run either before or after the current pending
127  * flush depending on its state.
128  */
129 void
130 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
131 {
132 	uint32_t oflags;
133 	uint32_t nflags;
134 	int dowait;
135 
136 	for (;;) {
137 		oflags = pmp->trans.flags;
138 		cpu_ccfence();
139 		dowait = 0;
140 
141 		if (flags & HAMMER2_TRANS_ISFLUSH) {
142 			/*
143 			 * Requesting flush transaction.  Wait for all
144 			 * currently running transactions to finish.
145 			 * Afterwords, normal transactions will be
146 			 * interlocked.
147 			 */
148 			if (oflags & HAMMER2_TRANS_MASK) {
149 				nflags = oflags | HAMMER2_TRANS_FPENDING |
150 						  HAMMER2_TRANS_WAITING;
151 				dowait = 1;
152 			} else {
153 				nflags = (oflags | flags) + 1;
154 			}
155 		} else if (flags & HAMMER2_TRANS_BUFCACHE) {
156 			/*
157 			 * Requesting strategy transaction from buffer-cache,
158 			 * or a VM getpages/putpages through the buffer cache.
159 			 * We must allow such transactions in all situations
160 			 * to avoid deadlocks.
161 			 */
162 			nflags = (oflags | flags) + 1;
163 #if 0
164 			/*
165 			 * (old) previous code interlocked against the main
166 			 *	 flush pass.
167 			 */
168 			if ((oflags & (HAMMER2_TRANS_ISFLUSH |
169 				       HAMMER2_TRANS_PREFLUSH)) ==
170 			    HAMMER2_TRANS_ISFLUSH) {
171 				nflags = oflags | HAMMER2_TRANS_WAITING;
172 				dowait = 1;
173 			} else {
174 				nflags = (oflags | flags) + 1;
175 			}
176 #endif
177 		} else {
178 			/*
179 			 * Requesting a normal modifying transaction.
180 			 * Waits for any flush to finish before allowing.
181 			 * Multiple modifying transactions can run
182 			 * concurrently.
183 			 *
184 			 * If a flush is pending for more than one second
185 			 * but can't run because many modifying transactions
186 			 * are active, we wait for the flush to be granted.
187 			 *
188 			 * NOTE: Remember that non-modifying operations
189 			 *	 such as read, stat, readdir, etc, do
190 			 *	 not use transactions.
191 			 */
192 			if ((oflags & HAMMER2_TRANS_FPENDING) &&
193 			    (u_int)(ticks - pmp->trans.fticks) >= (u_int)hz) {
194 				nflags = oflags | HAMMER2_TRANS_WAITING;
195 				dowait = 1;
196 			} else if (oflags & HAMMER2_TRANS_ISFLUSH) {
197 				nflags = oflags | HAMMER2_TRANS_WAITING;
198 				dowait = 1;
199 			} else {
200 				nflags = (oflags | flags) + 1;
201 			}
202 		}
203 		if (dowait)
204 			tsleep_interlock(&pmp->trans.sync_wait, 0);
205 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
206 			if ((oflags & HAMMER2_TRANS_FPENDING) == 0 &&
207 			    (nflags & HAMMER2_TRANS_FPENDING)) {
208 				pmp->trans.fticks = ticks;
209 			}
210 			if (dowait == 0)
211 				break;
212 			tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
213 			       "h2trans", hz);
214 		} else {
215 			cpu_pause();
216 		}
217 		/* retry */
218 	}
219 }
220 
221 /*
222  * Start a sub-transaction, there is no 'subdone' function.  This will
223  * issue a new modify_tid (mtid) for the current transaction, which is a
224  * CLC (cluster level change) id and not a per-node id.
225  *
226  * This function must be called for each XOP when multiple XOPs are run in
227  * sequence within a transaction.
228  *
229  * Callers typically update the inode with the transaction mtid manually
230  * to enforce sequencing.
231  */
232 hammer2_tid_t
233 hammer2_trans_sub(hammer2_pfs_t *pmp)
234 {
235 	hammer2_tid_t mtid;
236 
237 	mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
238 
239 	return (mtid);
240 }
241 
242 void
243 hammer2_trans_done(hammer2_pfs_t *pmp, int quicksideq)
244 {
245 	uint32_t oflags;
246 	uint32_t nflags;
247 
248 	/*
249 	 * Modifying ops on the front-end can cause dirty inodes to
250 	 * build up in the sideq.  We don't flush these on inactive/reclaim
251 	 * due to potential deadlocks, so we have to deal with them from
252 	 * inside other nominal modifying front-end transactions.
253 	 */
254 	if (quicksideq && pmp->sideq_count > (pmp->inum_count >> 3))
255 		hammer2_inode_run_sideq(pmp, 0);
256 
257 	/*
258 	 * Clean-up the transaction
259 	 */
260 	for (;;) {
261 		oflags = pmp->trans.flags;
262 		cpu_ccfence();
263 		KKASSERT(oflags & HAMMER2_TRANS_MASK);
264 		if ((oflags & HAMMER2_TRANS_MASK) == 1) {
265 			/*
266 			 * This was the last transaction
267 			 */
268 			nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH |
269 						  HAMMER2_TRANS_BUFCACHE |
270 						  HAMMER2_TRANS_FPENDING |
271 						  HAMMER2_TRANS_WAITING);
272 		} else {
273 			/*
274 			 * Still transactions pending
275 			 */
276 			nflags = oflags - 1;
277 		}
278 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
279 			if ((nflags & HAMMER2_TRANS_MASK) == 0 &&
280 			    (oflags & HAMMER2_TRANS_WAITING)) {
281 				wakeup(&pmp->trans.sync_wait);
282 			}
283 			break;
284 		} else {
285 			cpu_pause();
286 		}
287 		/* retry */
288 	}
289 }
290 
291 /*
292  * Obtain new, unique inode number (not serialized by caller).
293  */
294 hammer2_tid_t
295 hammer2_trans_newinum(hammer2_pfs_t *pmp)
296 {
297 	hammer2_tid_t tid;
298 
299 	tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
300 
301 	return tid;
302 }
303 
304 /*
305  * Assert that a strategy call is ok here.  Currently we allow strategy
306  * calls in all situations, including during flushes.  Previously:
307  *	(old) (1) In a normal transaction.
308  *	(old) (2) In a flush transaction only if PREFLUSH is also set.
309  */
310 void
311 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
312 {
313 #if 0
314 	KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
315 		 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
316 #endif
317 }
318 
319 
320 /*
321  * Chains undergoing destruction are removed from the in-memory topology.
322  * To avoid getting lost these chains are placed on the delayed flush
323  * queue which will properly dispose of them.
324  *
325  * We do this instead of issuing an immediate flush in order to give
326  * recursive deletions (rm -rf, etc) a chance to remove more of the
327  * hierarchy, potentially allowing an enormous amount of write I/O to
328  * be avoided.
329  *
330  * NOTE: The flush code tests HAMMER2_CHAIN_DESTROY to differentiate
331  *	 between these chains and the deep-recursion requeue.
332  */
333 void
334 hammer2_delayed_flush(hammer2_chain_t *chain)
335 {
336 	if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
337 		hammer2_spin_ex(&chain->hmp->list_spin);
338 		if ((chain->flags & (HAMMER2_CHAIN_DELAYED |
339 				     HAMMER2_CHAIN_DEFERRED)) == 0) {
340 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED |
341 						      HAMMER2_CHAIN_DEFERRED);
342 			TAILQ_INSERT_TAIL(&chain->hmp->flushq,
343 					  chain, flush_node);
344 			hammer2_chain_ref(chain);
345 		}
346 		hammer2_spin_unex(&chain->hmp->list_spin);
347 		hammer2_voldata_modify(chain->hmp);
348 	}
349 }
350 
351 /*
352  * Flush the chain and all modified sub-chains through the specified
353  * synchronization point, propagating blockref updates back up.  As
354  * part of this propagation, mirror_tid and inode/data usage statistics
355  * propagates back upward.
356  *
357  * Returns a HAMMER2 error code, 0 if no error.  Note that I/O errors from
358  * buffers dirtied during the flush operation can occur later.
359  *
360  * modify_tid (clc - cluster level change) is not propagated.
361  *
362  * update_tid (clc) is used for validation and is not propagated by this
363  * function.
364  *
365  * This routine can be called from several places but the most important
366  * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend).
367  *
368  * chain is locked on call and will remain locked on return.  The chain's
369  * UPDATE flag indicates that its parent's block table (which is not yet
370  * part of the flush) should be updated.
371  *
372  * flags:
373  *	HAMMER2_FLUSH_TOP	Indicates that this is the top of the flush.
374  *				Is cleared for the recursion.
375  *
376  *	HAMMER2_FLUSH_ALL	Recurse everything
377  *
378  *	HAMMER2_FLUSH_INODE_STOP
379  *				Stop at PFS inode or normal inode boundary
380  */
381 int
382 hammer2_flush(hammer2_chain_t *chain, int flags)
383 {
384 	hammer2_chain_t *scan;
385 	hammer2_flush_info_t info;
386 	hammer2_dev_t *hmp;
387 	int loops;
388 
389 	/*
390 	 * Execute the recursive flush and handle deferrals.
391 	 *
392 	 * Chains can be ridiculously long (thousands deep), so to
393 	 * avoid blowing out the kernel stack the recursive flush has a
394 	 * depth limit.  Elements at the limit are placed on a list
395 	 * for re-execution after the stack has been popped.
396 	 */
397 	bzero(&info, sizeof(info));
398 	TAILQ_INIT(&info.flushq);
399 	info.flags = flags & ~HAMMER2_FLUSH_TOP;
400 
401 	/*
402 	 * Calculate parent (can be NULL), if not NULL the flush core
403 	 * expects the parent to be referenced so it can easily lock/unlock
404 	 * it without it getting ripped up.
405 	 */
406 	if ((info.parent = chain->parent) != NULL)
407 		hammer2_chain_ref(info.parent);
408 
409 	/*
410 	 * Extra ref needed because flush_core expects it when replacing
411 	 * chain.
412 	 */
413 	hammer2_chain_ref(chain);
414 	hmp = chain->hmp;
415 	loops = 0;
416 
417 	for (;;) {
418 		/*
419 		 * Move hmp->flushq to info.flushq if non-empty so it can
420 		 * be processed.
421 		 */
422 		if (TAILQ_FIRST(&hmp->flushq) != NULL) {
423 			hammer2_spin_ex(&chain->hmp->list_spin);
424 			TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node);
425 			hammer2_spin_unex(&chain->hmp->list_spin);
426 		}
427 
428 		/*
429 		 * Unwind deep recursions which had been deferred.  This
430 		 * can leave the FLUSH_* bits set for these chains, which
431 		 * will be handled when we [re]flush chain after the unwind.
432 		 */
433 		while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) {
434 			KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
435 			TAILQ_REMOVE(&info.flushq, scan, flush_node);
436 #ifdef HAMMER2_SCAN_DEBUG
437 			++info.flushq_count;
438 #endif
439 			atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED |
440 						       HAMMER2_CHAIN_DELAYED);
441 
442 			/*
443 			 * Now that we've popped back up we can do a secondary
444 			 * recursion on the deferred elements.
445 			 *
446 			 * NOTE: hmp->flushq chains (marked DESTROY) must be
447 			 *	 handled unconditionally so they can be cleaned
448 			 *	 out.
449 			 *
450 			 * NOTE: hammer2_flush() may replace scan.
451 			 */
452 			if (hammer2_debug & 0x0040)
453 				kprintf("deferred flush %p\n", scan);
454 			hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
455 			if (scan->error == 0) {
456 				if (scan->flags & HAMMER2_CHAIN_DESTROY) {
457 					hammer2_flush(scan,
458 						    flags |
459 						    HAMMER2_FLUSH_TOP |
460 						    HAMMER2_FLUSH_ALL);
461 				} else {
462 					hammer2_flush(scan,
463 						    flags & ~HAMMER2_FLUSH_TOP);
464 				}
465 			} else {
466 				info.error |= scan->error;
467 			}
468 			hammer2_chain_unlock(scan);
469 			hammer2_chain_drop(scan);/* ref from defer */
470 		}
471 
472 		/*
473 		 * [re]flush chain as the deep recursion may have generated
474 		 * additional modifications.
475 		 */
476 		info.diddeferral = 0;
477 		if (info.parent != chain->parent) {
478 			if (hammer2_debug & 0x0040) {
479 				kprintf("LOST CHILD4 %p->%p "
480 					"(actual parent %p)\n",
481 					info.parent, chain, chain->parent);
482 			}
483 			hammer2_chain_drop(info.parent);
484 			info.parent = chain->parent;
485 			hammer2_chain_ref(info.parent);
486 		}
487 		hammer2_flush_core(&info, chain, flags);
488 
489 		/*
490 		 * Only loop if deep recursions have been deferred.
491 		 */
492 		if (TAILQ_EMPTY(&info.flushq))
493 			break;
494 
495 		if (++loops % 1000 == 0) {
496 			kprintf("hammer2_flush: excessive loops on %p\n",
497 				chain);
498 			if (hammer2_debug & 0x100000)
499 				Debugger("hell4");
500 		}
501 	}
502 #ifdef HAMMER2_SCAN_DEBUG
503 	if (info.scan_count >= 10)
504 	kprintf("hammer2_flush: scan_count %ld (%ld,%ld,%ld,%ld) "
505 		"bt(%ld,%ld,%ld,%ld,%ld,%ld) flushq %ld\n",
506 		info.scan_count,
507 		info.scan_mod_count,
508 		info.scan_upd_count,
509 		info.scan_onf_count,
510 		info.scan_del_count,
511 		info.scan_btype[1],
512 		info.scan_btype[2],
513 		info.scan_btype[3],
514 		info.scan_btype[4],
515 		info.scan_btype[5],
516 		info.scan_btype[6],
517 		info.flushq_count);
518 #endif
519 	hammer2_chain_drop(chain);
520 	if (info.parent)
521 		hammer2_chain_drop(info.parent);
522 	return (info.error);
523 }
524 
525 /*
526  * This is the core of the chain flushing code.  The chain is locked by the
527  * caller and must also have an extra ref on it by the caller, and remains
528  * locked and will have an extra ref on return.  info.parent is referenced
529  * but not locked.
530  *
531  * Upon return, the caller can test the UPDATE bit on the chain to determine
532  * if the parent needs updating.
533  *
534  * (1) Determine if this node is a candidate for the flush, return if it is
535  *     not.  fchain and vchain are always candidates for the flush.
536  *
537  * (2) If we recurse too deep the chain is entered onto the deferral list and
538  *     the current flush stack is aborted until after the deferral list is
539  *     run.
540  *
541  * (3) Recursively flush live children (rbtree).  This can create deferrals.
542  *     A successful flush clears the MODIFIED and UPDATE bits on the children
543  *     and typically causes the parent to be marked MODIFIED as the children
544  *     update the parent's block table.  A parent might already be marked
545  *     MODIFIED due to a deletion (whos blocktable update in the parent is
546  *     handled by the frontend), or if the parent itself is modified by the
547  *     frontend for other reasons.
548  *
549  * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
550  *     Deleted-but-open inodes can still be individually flushed via the
551  *     filesystem syncer.
552  *
553  * (5) Delete parents on the way back up if they are normal indirect blocks
554  *     and have no children.
555  *
556  * (6) Note that an unmodified child may still need the block table in its
557  *     parent updated (e.g. rename/move).  The child will have UPDATE set
558  *     in this case.
559  *
560  *			WARNING ON BREF MODIFY_TID/MIRROR_TID
561  *
562  * blockref.modify_tid is consistent only within a PFS, and will not be
563  * consistent during synchronization.  mirror_tid is consistent across the
564  * block device regardless of the PFS.
565  */
566 static void
567 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
568 		   int flags)
569 {
570 	hammer2_chain_t *parent;
571 	hammer2_dev_t *hmp;
572 	int save_error;
573 
574 	/*
575 	 * (1) Optimize downward recursion to locate nodes needing action.
576 	 *     Nothing to do if none of these flags are set.
577 	 */
578 	if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
579 		if (hammer2_debug & 0x200) {
580 			if (info->debug == NULL)
581 				info->debug = chain;
582 		} else {
583 			return;
584 		}
585 	}
586 
587 	hmp = chain->hmp;
588 
589 	/*
590 	 * NOTE: parent can be NULL, usually due to destroy races.
591 	 */
592 	parent = info->parent;
593 	KKASSERT(chain->parent == parent);
594 
595 	/*
596 	 * Downward search recursion
597 	 *
598 	 * We must be careful on cold stops.  If CHAIN_UPDATE is set and
599 	 * we stop cold (verses a deferral which will re-run the chain later),
600 	 * the update can wind up never being applied.  This situation most
601 	 * typically occurs on inode boundaries due to the way
602 	 * hammer2_vfs_sync() breaks-up the flush.  As a safety, we
603 	 * flush-through such situations.
604 	 */
605 	if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) {
606 		/*
607 		 * Already deferred.
608 		 */
609 		++info->diddeferral;
610 	} else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
611 		   (chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
612 		   (flags & HAMMER2_FLUSH_ALL) == 0 &&
613 		   (flags & HAMMER2_FLUSH_TOP) == 0 &&
614 		   chain->pmp && chain->pmp->mp) {
615 		/*
616 		 * If FLUSH_ALL is not specified the caller does not want
617 		 * to recurse through PFS roots that have been mounted.
618 		 *
619 		 * (If the PFS has not been mounted there may not be
620 		 *  anything monitoring its chains and its up to us
621 		 *  to flush it).
622 		 *
623 		 * The typical sequence is to flush dirty PFS's starting at
624 		 * their root downward, then flush the device root (vchain).
625 		 * It is this second flush that typically leaves out the
626 		 * ALL flag.
627 		 *
628 		 * However we must still process the PFSROOT chains for block
629 		 * table updates in their parent (which IS part of our flush).
630 		 *
631 		 * NOTE: The volume root, vchain, does not set PFSBOUNDARY.
632 		 *
633 		 * NOTE: This test must be done before the depth-limit test,
634 		 *	 else it might become the top on a flushq iteration.
635 		 *
636 		 * NOTE: We must re-set ONFLUSH in the parent to retain if
637 		 *	 this chain (that we are skipping) requires work.
638 		 */
639 		if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
640 				    HAMMER2_CHAIN_DESTROY |
641 				    HAMMER2_CHAIN_MODIFIED)) {
642 			hammer2_chain_setflush(parent);
643 		}
644 	} else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
645 		   (chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
646 		   (flags & HAMMER2_FLUSH_INODE_STOP) &&
647 		   (flags & HAMMER2_FLUSH_ALL) == 0 &&
648 		   (flags & HAMMER2_FLUSH_TOP) == 0 &&
649 		   chain->pmp && chain->pmp->mp) {
650 		/*
651 		 * If FLUSH_INODE_STOP is specified and both ALL and TOP
652 		 * are clear, we must not flush the chain.  The chain should
653 		 * have already been flushed and any further ONFLUSH/UPDATE
654 		 * setting will be related to the next flush.
655 		 *
656 		 * This features allows us to flush inodes independently of
657 		 * each other and meta-data above the inodes separately.
658 		 */
659 		if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
660 				    HAMMER2_CHAIN_DESTROY |
661 				    HAMMER2_CHAIN_MODIFIED)) {
662 			if (parent)
663 				hammer2_chain_setflush(parent);
664 		}
665 	} else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
666 		/*
667 		 * Recursion depth reached.
668 		 */
669 		KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0);
670 		hammer2_chain_ref(chain);
671 		TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
672 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
673 		++info->diddeferral;
674 	} else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
675 				   HAMMER2_CHAIN_DESTROY)) {
676 		/*
677 		 * Downward recursion search (actual flush occurs bottom-up).
678 		 * pre-clear ONFLUSH.  It can get set again due to races or
679 		 * flush errors, which we want so the scan finds us again in
680 		 * the next flush.
681 		 *
682 		 * We must also recurse if DESTROY is set so we can finally
683 		 * get rid of the related children, otherwise the node will
684 		 * just get re-flushed on lastdrop.
685 		 *
686 		 * WARNING!  The recursion will unlock/relock info->parent
687 		 *	     (which is 'chain'), potentially allowing it
688 		 *	     to be ripped up.
689 		 */
690 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
691 		save_error = info->error;
692 		info->error = 0;
693 		info->parent = chain;
694 
695 		/*
696 		 * We may have to do this twice to catch any indirect
697 		 * block maintenance that occurs.  Other conditions which
698 		 * can keep setting ONFLUSH (such as deferrals) ought to
699 		 * be handled by the flushq code.  XXX needs more help
700 		 */
701 		hammer2_spin_ex(&chain->core.spin);
702 		RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
703 			NULL, hammer2_flush_recurse, info);
704 		if (chain->flags & HAMMER2_CHAIN_ONFLUSH) {
705 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
706 			RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
707 				NULL, hammer2_flush_recurse, info);
708 		}
709 		hammer2_spin_unex(&chain->core.spin);
710 		info->parent = parent;
711 
712 		/*
713 		 * Re-set the flush bits if the flush was incomplete or
714 		 * an error occurred.  If an error occurs it is typically
715 		 * an allocation error.  Errors do not cause deferrals.
716 		 */
717 		if (info->error)
718 			hammer2_chain_setflush(chain);
719 		info->error |= save_error;
720 		if (info->diddeferral)
721 			hammer2_chain_setflush(chain);
722 
723 		/*
724 		 * If we lost the parent->chain association we have to
725 		 * stop processing this chain because it is no longer
726 		 * in this recursion.  If it moved, it will be handled
727 		 * by the ONFLUSH flag elsewhere.
728 		 */
729 		if (chain->parent != parent) {
730 			kprintf("LOST CHILD2 %p->%p (actual parent %p)\n",
731 				parent, chain, chain->parent);
732 			goto done;
733 		}
734 	}
735 
736 	/*
737 	 * Now we are in the bottom-up part of the recursion.
738 	 *
739 	 * Do not update chain if lower layers were deferred.  We continue
740 	 * to try to update the chain on lower-level errors, but the flush
741 	 * code may decide not to flush the volume root.
742 	 *
743 	 * XXX should we continue to try to update the chain if an error
744 	 *     occurred?
745 	 */
746 	if (info->diddeferral)
747 		goto done;
748 
749 	/*
750 	 * Both parent and chain must be locked in order to flush chain,
751 	 * in order to properly update the parent under certain conditions.
752 	 *
753 	 * In addition, we can't safely unlock/relock the chain once we
754 	 * start flushing the chain itself, which we would have to do later
755 	 * on in order to lock the parent if we didn't do that now.
756 	 */
757 	hammer2_chain_ref_hold(chain);
758 	hammer2_chain_unlock(chain);
759 	if (parent)
760 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
761 	hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
762 	hammer2_chain_drop_unhold(chain);
763 
764 	/*
765 	 * Can't process if we can't access their content.
766 	 */
767 	if ((parent && parent->error) || chain->error) {
768 		kprintf("hammer2: chain error during flush\n");
769 		info->error |= chain->error;
770 		if (parent) {
771 			info->error |= parent->error;
772 			hammer2_chain_unlock(parent);
773 		}
774 		goto done;
775 	}
776 
777 	if (chain->parent != parent) {
778 		if (hammer2_debug & 0x0040) {
779 			kprintf("LOST CHILD3 %p->%p (actual parent %p)\n",
780 				parent, chain, chain->parent);
781 		}
782 		KKASSERT(parent != NULL);
783 		hammer2_chain_unlock(parent);
784 		if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
785 			hammer2_chain_ref(chain);
786 			TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
787 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
788 			++info->diddeferral;
789 		}
790 		goto done;
791 	}
792 
793 	/*
794 	 * Propagate the DESTROY flag downwards.  This dummies up the flush
795 	 * code and tries to invalidate related buffer cache buffers to
796 	 * avoid the disk write.
797 	 */
798 	if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
799 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
800 
801 	/*
802 	 * Dispose of the modified bit.
803 	 *
804 	 * If parent is present, the UPDATE bit should already be set.
805 	 * UPDATE should already be set.
806 	 * bref.mirror_tid should already be set.
807 	 */
808 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
809 		KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
810 			 chain->parent == NULL);
811 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
812 		atomic_add_long(&hammer2_count_modified_chains, -1);
813 
814 		/*
815 		 * Manage threads waiting for excessive dirty memory to
816 		 * be retired.
817 		 */
818 		if (chain->pmp)
819 			hammer2_pfs_memory_wakeup(chain->pmp);
820 
821 #if 0
822 		if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
823 		    chain != &hmp->vchain &&
824 		    chain != &hmp->fchain) {
825 			/*
826 			 * Set UPDATE bit indicating that the parent block
827 			 * table requires updating.
828 			 */
829 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
830 		}
831 #endif
832 
833 		/*
834 		 * Issue the flush.  This is indirect via the DIO.
835 		 *
836 		 * NOTE: A DELETED node that reaches this point must be
837 		 *	 flushed for synchronization point consistency.
838 		 *
839 		 * NOTE: Even though MODIFIED was already set, the related DIO
840 		 *	 might not be dirty due to a system buffer cache
841 		 *	 flush and must be set dirty if we are going to make
842 		 *	 further modifications to the buffer.  Chains with
843 		 *	 embedded data don't need this.
844 		 */
845 		if (hammer2_debug & 0x1000) {
846 			kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
847 				chain, chain->bref.type,
848 				(uintmax_t)chain->bref.key,
849 				chain->bref.keybits,
850 				(uintmax_t)chain->bref.data_off);
851 		}
852 		if (hammer2_debug & 0x2000) {
853 			Debugger("Flush hell");
854 		}
855 
856 		/*
857 		 * Update chain CRCs for flush.
858 		 *
859 		 * NOTE: Volume headers are NOT flushed here as they require
860 		 *	 special processing.
861 		 */
862 		switch(chain->bref.type) {
863 		case HAMMER2_BREF_TYPE_FREEMAP:
864 			/*
865 			 * Update the volume header's freemap_tid to the
866 			 * freemap's flushing mirror_tid.
867 			 *
868 			 * (note: embedded data, do not call setdirty)
869 			 */
870 			KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
871 			KKASSERT(chain == &hmp->fchain);
872 			hmp->voldata.freemap_tid = chain->bref.mirror_tid;
873 			if (hammer2_debug & 0x8000) {
874 				/* debug only, avoid syslogd loop */
875 				kprintf("sync freemap mirror_tid %08jx\n",
876 					(intmax_t)chain->bref.mirror_tid);
877 			}
878 
879 			/*
880 			 * The freemap can be flushed independently of the
881 			 * main topology, but for the case where it is
882 			 * flushed in the same transaction, and flushed
883 			 * before vchain (a case we want to allow for
884 			 * performance reasons), make sure modifications
885 			 * made during the flush under vchain use a new
886 			 * transaction id.
887 			 *
888 			 * Otherwise the mount recovery code will get confused.
889 			 */
890 			++hmp->voldata.mirror_tid;
891 			break;
892 		case HAMMER2_BREF_TYPE_VOLUME:
893 			/*
894 			 * The free block table is flushed by
895 			 * hammer2_vfs_sync() before it flushes vchain.
896 			 * We must still hold fchain locked while copying
897 			 * voldata to volsync, however.
898 			 *
899 			 * These do not error per-say since their data does
900 			 * not need to be re-read from media on lock.
901 			 *
902 			 * (note: embedded data, do not call setdirty)
903 			 */
904 			hammer2_chain_lock(&hmp->fchain,
905 					   HAMMER2_RESOLVE_ALWAYS);
906 			hammer2_voldata_lock(hmp);
907 			if (hammer2_debug & 0x8000) {
908 				/* debug only, avoid syslogd loop */
909 				kprintf("sync volume  mirror_tid %08jx\n",
910 					(intmax_t)chain->bref.mirror_tid);
911 			}
912 
913 			/*
914 			 * Update the volume header's mirror_tid to the
915 			 * main topology's flushing mirror_tid.  It is
916 			 * possible that voldata.mirror_tid is already
917 			 * beyond bref.mirror_tid due to the bump we made
918 			 * above in BREF_TYPE_FREEMAP.
919 			 */
920 			if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
921 				hmp->voldata.mirror_tid =
922 					chain->bref.mirror_tid;
923 			}
924 
925 			/*
926 			 * The volume header is flushed manually by the
927 			 * syncer, not here.  All we do here is adjust the
928 			 * crc's.
929 			 */
930 			KKASSERT(chain->data != NULL);
931 			KKASSERT(chain->dio == NULL);
932 
933 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
934 				hammer2_icrc32(
935 					(char *)&hmp->voldata +
936 					 HAMMER2_VOLUME_ICRC1_OFF,
937 					HAMMER2_VOLUME_ICRC1_SIZE);
938 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
939 				hammer2_icrc32(
940 					(char *)&hmp->voldata +
941 					 HAMMER2_VOLUME_ICRC0_OFF,
942 					HAMMER2_VOLUME_ICRC0_SIZE);
943 			hmp->voldata.icrc_volheader =
944 				hammer2_icrc32(
945 					(char *)&hmp->voldata +
946 					 HAMMER2_VOLUME_ICRCVH_OFF,
947 					HAMMER2_VOLUME_ICRCVH_SIZE);
948 
949 			if (hammer2_debug & 0x8000) {
950 				/* debug only, avoid syslogd loop */
951 				kprintf("syncvolhdr %016jx %016jx\n",
952 					hmp->voldata.mirror_tid,
953 					hmp->vchain.bref.mirror_tid);
954 			}
955 			hmp->volsync = hmp->voldata;
956 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
957 			hammer2_voldata_unlock(hmp);
958 			hammer2_chain_unlock(&hmp->fchain);
959 			break;
960 		case HAMMER2_BREF_TYPE_DATA:
961 			/*
962 			 * Data elements have already been flushed via the
963 			 * logical file buffer cache.  Their hash was set in
964 			 * the bref by the vop_write code.  Do not re-dirty.
965 			 *
966 			 * Make sure any device buffer(s) have been flushed
967 			 * out here (there aren't usually any to flush) XXX.
968 			 */
969 			break;
970 		case HAMMER2_BREF_TYPE_INDIRECT:
971 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
972 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
973 			/*
974 			 * Buffer I/O will be cleaned up when the volume is
975 			 * flushed (but the kernel is free to flush it before
976 			 * then, as well).
977 			 */
978 			KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
979 			hammer2_chain_setcheck(chain, chain->data);
980 			break;
981 		case HAMMER2_BREF_TYPE_DIRENT:
982 			/*
983 			 * A directory entry can use the check area to store
984 			 * the filename for filenames <= 64 bytes, don't blow
985 			 * it up!
986 			 */
987 			KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
988 			if (chain->bytes)
989 				hammer2_chain_setcheck(chain, chain->data);
990 			break;
991 		case HAMMER2_BREF_TYPE_INODE:
992 			/*
993 			 * NOTE: We must call io_setdirty() to make any late
994 			 *	 changes to the inode data, the system might
995 			 *	 have already flushed the buffer.
996 			 */
997 			if (chain->data->ipdata.meta.op_flags &
998 			    HAMMER2_OPFLAG_PFSROOT) {
999 				/*
1000 				 * non-NULL pmp if mounted as a PFS.  We must
1001 				 * sync fields cached in the pmp? XXX
1002 				 */
1003 				hammer2_inode_data_t *ipdata;
1004 
1005 				hammer2_io_setdirty(chain->dio);
1006 				ipdata = &chain->data->ipdata;
1007 				if (chain->pmp) {
1008 					ipdata->meta.pfs_inum =
1009 						chain->pmp->inode_tid;
1010 				}
1011 			} else {
1012 				/* can't be mounted as a PFS */
1013 			}
1014 
1015 			KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
1016 			hammer2_chain_setcheck(chain, chain->data);
1017 
1018 				hammer2_inode_data_t *ipdata;
1019 			ipdata = &chain->data->ipdata;
1020 			break;
1021 		default:
1022 			KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
1023 			panic("hammer2_flush_core: unsupported "
1024 			      "embedded bref %d",
1025 			      chain->bref.type);
1026 			/* NOT REACHED */
1027 		}
1028 
1029 		/*
1030 		 * If the chain was destroyed try to avoid unnecessary I/O
1031 		 * that might not have yet occurred.  Remove the data range
1032 		 * from dedup candidacy and attempt to invalidation that
1033 		 * potentially dirty portion of the I/O buffer.
1034 		 */
1035 		if (chain->flags & HAMMER2_CHAIN_DESTROY) {
1036 			hammer2_io_dedup_delete(hmp,
1037 						chain->bref.type,
1038 						chain->bref.data_off,
1039 						chain->bytes);
1040 #if 0
1041 			hammer2_io_t *dio;
1042 			if (chain->dio) {
1043 				hammer2_io_inval(chain->dio,
1044 						 chain->bref.data_off,
1045 						 chain->bytes);
1046 			} else if ((dio = hammer2_io_getquick(hmp,
1047 						  chain->bref.data_off,
1048 						  chain->bytes,
1049 						  1)) != NULL) {
1050 				hammer2_io_inval(dio,
1051 						 chain->bref.data_off,
1052 						 chain->bytes);
1053 				hammer2_io_putblk(&dio);
1054 			}
1055 #endif
1056 		}
1057 	}
1058 
1059 	/*
1060 	 * If UPDATE is set the parent block table may need to be updated.
1061 	 * This can fail if the hammer2_chain_modify() fails.
1062 	 *
1063 	 * NOTE: UPDATE may be set on vchain or fchain in which case
1064 	 *	 parent could be NULL.  It's easiest to allow the case
1065 	 *	 and test for NULL.  parent can also wind up being NULL
1066 	 *	 due to a deletion so we need to handle the case anyway.
1067 	 *
1068 	 * If no parent exists we can just clear the UPDATE bit.  If the
1069 	 * chain gets reattached later on the bit will simply get set
1070 	 * again.
1071 	 */
1072 	if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
1073 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1074 
1075 	/*
1076 	 * The chain may need its blockrefs updated in the parent.
1077 	 */
1078 	if (chain->flags & HAMMER2_CHAIN_UPDATE) {
1079 		hammer2_blockref_t *base;
1080 		int count;
1081 
1082 		/*
1083 		 * Clear UPDATE flag, mark parent modified, update its
1084 		 * modify_tid if necessary, and adjust the parent blockmap.
1085 		 */
1086 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1087 
1088 		/*
1089 		 * (optional code)
1090 		 *
1091 		 * Avoid actually modifying and updating the parent if it
1092 		 * was flagged for destruction.  This can greatly reduce
1093 		 * disk I/O in large tree removals because the
1094 		 * hammer2_io_setinval() call in the upward recursion
1095 		 * (see MODIFIED code above) can only handle a few cases.
1096 		 */
1097 		if (parent->flags & HAMMER2_CHAIN_DESTROY) {
1098 			if (parent->bref.modify_tid < chain->bref.modify_tid) {
1099 				parent->bref.modify_tid =
1100 					chain->bref.modify_tid;
1101 			}
1102 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
1103 							HAMMER2_CHAIN_BMAPUPD);
1104 			goto skipupdate;
1105 		}
1106 
1107 		/*
1108 		 * The flusher is responsible for deleting empty indirect
1109 		 * blocks at this point.  If we don't do this, no major harm
1110 		 * will be done but the empty indirect blocks will stay in
1111 		 * the topology and make it a messy and inefficient.
1112 		 *
1113 		 * The flusher is also responsible for collapsing the
1114 		 * content of an indirect block into its parent whenever
1115 		 * possible (with some hysteresis).  Not doing this will also
1116 		 * not harm the topology, but would make it messy and
1117 		 * inefficient.
1118 		 */
1119 		if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1120 			if (hammer2_chain_indirect_maintenance(parent, chain))
1121 				goto skipupdate;
1122 		}
1123 
1124 		/*
1125 		 * We are updating the parent's blockmap, the parent must
1126 		 * be set modified.  If this fails we re-set the UPDATE flag
1127 		 * in the child.
1128 		 *
1129 		 * NOTE! A modification error can be ENOSPC.  We still want
1130 		 *	 to flush modified chains recursively, not break out,
1131 		 *	 so we just skip the update in this situation and
1132 		 *	 continue.  That is, we still need to try to clean
1133 		 *	 out dirty chains and buffers.
1134 		 *
1135 		 *	 This may not help bulkfree though. XXX
1136 		 */
1137 		save_error = hammer2_chain_modify(parent, 0, 0, 0);
1138 		if (save_error) {
1139 			info->error |= save_error;
1140 			kprintf("hammer2_flush: %016jx.%02x error=%08x\n",
1141 				parent->bref.data_off, parent->bref.type,
1142 				save_error);
1143 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1144 			goto skipupdate;
1145 		}
1146 		if (parent->bref.modify_tid < chain->bref.modify_tid)
1147 			parent->bref.modify_tid = chain->bref.modify_tid;
1148 
1149 		/*
1150 		 * Calculate blockmap pointer
1151 		 */
1152 		switch(parent->bref.type) {
1153 		case HAMMER2_BREF_TYPE_INODE:
1154 			/*
1155 			 * Access the inode's block array.  However, there is
1156 			 * no block array if the inode is flagged DIRECTDATA.
1157 			 */
1158 			if (parent->data &&
1159 			    (parent->data->ipdata.meta.op_flags &
1160 			     HAMMER2_OPFLAG_DIRECTDATA) == 0) {
1161 				base = &parent->data->
1162 					ipdata.u.blockset.blockref[0];
1163 			} else {
1164 				base = NULL;
1165 			}
1166 			count = HAMMER2_SET_COUNT;
1167 			break;
1168 		case HAMMER2_BREF_TYPE_INDIRECT:
1169 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1170 			if (parent->data)
1171 				base = &parent->data->npdata[0];
1172 			else
1173 				base = NULL;
1174 			count = parent->bytes / sizeof(hammer2_blockref_t);
1175 			break;
1176 		case HAMMER2_BREF_TYPE_VOLUME:
1177 			base = &chain->hmp->voldata.sroot_blockset.blockref[0];
1178 			count = HAMMER2_SET_COUNT;
1179 			break;
1180 		case HAMMER2_BREF_TYPE_FREEMAP:
1181 			base = &parent->data->npdata[0];
1182 			count = HAMMER2_SET_COUNT;
1183 			break;
1184 		default:
1185 			base = NULL;
1186 			count = 0;
1187 			panic("hammer2_flush_core: "
1188 			      "unrecognized blockref type: %d",
1189 			      parent->bref.type);
1190 		}
1191 
1192 		/*
1193 		 * Blocktable updates
1194 		 *
1195 		 * We synchronize pending statistics at this time.  Delta
1196 		 * adjustments designated for the current and upper level
1197 		 * are synchronized.
1198 		 */
1199 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
1200 			if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
1201 				hammer2_spin_ex(&parent->core.spin);
1202 				hammer2_base_delete(parent, base, count, chain);
1203 				hammer2_spin_unex(&parent->core.spin);
1204 				/* base_delete clears both bits */
1205 			} else {
1206 				atomic_clear_int(&chain->flags,
1207 						 HAMMER2_CHAIN_BMAPUPD);
1208 			}
1209 		}
1210 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
1211 			hammer2_spin_ex(&parent->core.spin);
1212 			hammer2_base_insert(parent, base, count,
1213 					    chain, &chain->bref);
1214 			hammer2_spin_unex(&parent->core.spin);
1215 			/* base_insert sets BMAPPED */
1216 		}
1217 	}
1218 skipupdate:
1219 	if (parent)
1220 		hammer2_chain_unlock(parent);
1221 
1222 	/*
1223 	 * Final cleanup after flush
1224 	 */
1225 done:
1226 	KKASSERT(chain->refs > 0);
1227 	if (hammer2_debug & 0x200) {
1228 		if (info->debug == chain)
1229 			info->debug = NULL;
1230 	}
1231 }
1232 
1233 /*
1234  * Flush recursion helper, called from flush_core, calls flush_core.
1235  *
1236  * Flushes the children of the caller's chain (info->parent), restricted
1237  * by sync_tid.  Set info->domodify if the child's blockref must propagate
1238  * back up to the parent.
1239  *
1240  * This function may set info->error as a side effect.
1241  *
1242  * Ripouts can move child from rbtree to dbtree or dbq but the caller's
1243  * flush scan order prevents any chains from being lost.  A child can be
1244  * executes more than once.
1245  *
1246  * WARNING! If we do not call hammer2_flush_core() we must update
1247  *	    bref.mirror_tid ourselves to indicate that the flush has
1248  *	    processed the child.
1249  *
1250  * WARNING! parent->core spinlock is held on entry and return.
1251  */
1252 static int
1253 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
1254 {
1255 	hammer2_flush_info_t *info = data;
1256 	hammer2_chain_t *parent = info->parent;
1257 
1258 #ifdef HAMMER2_SCAN_DEBUG
1259 	++info->scan_count;
1260 	if (child->flags & HAMMER2_CHAIN_MODIFIED)
1261 		++info->scan_mod_count;
1262 	if (child->flags & HAMMER2_CHAIN_UPDATE)
1263 		++info->scan_upd_count;
1264 	if (child->flags & HAMMER2_CHAIN_ONFLUSH)
1265 		++info->scan_onf_count;
1266 #endif
1267 
1268 	/*
1269 	 * (child can never be fchain or vchain so a special check isn't
1270 	 *  needed).
1271 	 *
1272 	 * We must ref the child before unlocking the spinlock.
1273 	 *
1274 	 * The caller has added a ref to the parent so we can temporarily
1275 	 * unlock it in order to lock the child.  However, if it no longer
1276 	 * winds up being the child of the parent we must skip this child.
1277 	 *
1278 	 * NOTE! chain locking errors are fatal.  They are never out-of-space
1279 	 *	 errors.
1280 	 */
1281 	hammer2_chain_ref(child);
1282 	hammer2_spin_unex(&parent->core.spin);
1283 
1284 	hammer2_chain_ref_hold(parent);
1285 	hammer2_chain_unlock(parent);
1286 	hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1287 	if (child->parent != parent) {
1288 		kprintf("LOST CHILD1 %p->%p (actual parent %p)\n",
1289 			parent, child, child->parent);
1290 		goto done;
1291 	}
1292 	if (child->error) {
1293 		kprintf("CHILD ERROR DURING FLUSH LOCK %p->%p\n",
1294 			parent, child);
1295 		info->error |= child->error;
1296 		goto done;
1297 	}
1298 
1299 	/*
1300 	 * Must propagate the DESTROY flag downwards, otherwise the
1301 	 * parent could end up never being removed because it will
1302 	 * be requeued to the flusher if it survives this run due to
1303 	 * the flag.
1304 	 */
1305 	if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
1306 		atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY);
1307 #ifdef HAMMER2_SCAN_DEBUG
1308 	if (child->flags & HAMMER2_CHAIN_DESTROY)
1309 		++info->scan_del_count;
1310 #endif
1311 
1312 	/*
1313 	 * Recurse and collect deferral data.  We're in the media flush,
1314 	 * this can cross PFS boundaries.
1315 	 */
1316 	if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1317 #ifdef HAMMER2_SCAN_DEBUG
1318 		if (child->bref.type < 7)
1319 			++info->scan_btype[child->bref.type];
1320 #endif
1321 		++info->depth;
1322 		hammer2_flush_core(info, child, info->flags);
1323 		--info->depth;
1324 	} else if (hammer2_debug & 0x200) {
1325 		if (info->debug == NULL)
1326 			info->debug = child;
1327 		++info->depth;
1328 		hammer2_flush_core(info, child, info->flags);
1329 		--info->depth;
1330 		if (info->debug == child)
1331 			info->debug = NULL;
1332 	}
1333 
1334 done:
1335 	/*
1336 	 * Relock to continue the loop.
1337 	 */
1338 	hammer2_chain_unlock(child);
1339 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1340 	hammer2_chain_drop_unhold(parent);
1341 	if (parent->error) {
1342 		kprintf("PARENT ERROR DURING FLUSH LOCK %p->%p\n",
1343 			parent, child);
1344 		info->error |= parent->error;
1345 	}
1346 	hammer2_chain_drop(child);
1347 	KKASSERT(info->parent == parent);
1348 	hammer2_spin_ex(&parent->core.spin);
1349 
1350 	return (0);
1351 }
1352 
1353 /*
1354  * flush helper (backend threaded)
1355  *
1356  * Flushes chain topology for the specified inode.
1357  *
1358  * If HAMMER2_XOP_FLUSH is set we flush all chains from the current inode
1359  * through but stop at sub-inodes (we flush the inode chains for sub-inodes,
1360  * but do not go further as deeper modifications do not belong to the current
1361  * flush cycle).
1362  *
1363  * If HAMMER2_XOP_FLUSH is not set we flush the current inode's chains only
1364  * and do not recurse through sub-inodes, including not including those
1365  * sub-inodes.
1366  *
1367  * Remember that HAMMER2 is currently using a flat inode model, so directory
1368  * hierarchies do not translate to inode hierarchies.  PFS ROOTs, however,
1369  * do.
1370  *
1371  * chain->parent can be NULL, usually due to destroy races.
1372  *
1373  * Primarily called from vfs_sync().
1374  */
1375 void
1376 hammer2_inode_xop_flush(hammer2_thread_t *thr, hammer2_xop_t *arg)
1377 {
1378 	hammer2_xop_flush_t *xop = &arg->xop_flush;
1379 	hammer2_chain_t *chain;
1380 	hammer2_chain_t *parent;
1381 	hammer2_dev_t *hmp;
1382 	int flush_error = 0;
1383 	int fsync_error = 0;
1384 	int total_error = 0;
1385 	int j;
1386 	int xflags;
1387 	int ispfsroot = 0;
1388 
1389 	xflags = HAMMER2_FLUSH_TOP;
1390 	if (xop->head.flags & HAMMER2_XOP_INODE_STOP)
1391 		xflags |= HAMMER2_FLUSH_INODE_STOP;
1392 
1393 	/*
1394 	 * Flush core chains
1395 	 */
1396 	chain = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1397 				    HAMMER2_RESOLVE_ALWAYS);
1398 	if (chain) {
1399 		hmp = chain->hmp;
1400 		if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) ||
1401 		    TAILQ_FIRST(&hmp->flushq) != NULL) {
1402 			hammer2_flush(chain, xflags);
1403 			parent = chain->parent;
1404 			if (parent)
1405 				hammer2_chain_setflush(parent);
1406 		}
1407 		if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY)
1408 			ispfsroot = 1;
1409 		hammer2_chain_unlock(chain);
1410 		hammer2_chain_drop(chain);
1411 		chain = NULL;
1412 	} else {
1413 		hmp = NULL;
1414 	}
1415 
1416 	/*
1417 	 * Only flush the volume header if asked to, plus the inode must also
1418 	 * be the PFS root.
1419 	 */
1420 	if ((xop->head.flags & HAMMER2_XOP_VOLHDR) == 0)
1421 		goto skip;
1422 	if (ispfsroot == 0)
1423 		goto skip;
1424 
1425 	/*
1426 	 * Flush volume roots.  Avoid replication, we only want to
1427 	 * flush each hammer2_dev (hmp) once.
1428 	 */
1429 	for (j = thr->clindex - 1; j >= 0; --j) {
1430 		if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) {
1431 			if (chain->hmp == hmp) {
1432 				chain = NULL;	/* safety */
1433 				goto skip;
1434 			}
1435 		}
1436 	}
1437 	chain = NULL;	/* safety */
1438 
1439 	/*
1440 	 * spmp transaction.  The super-root is never directly mounted so
1441 	 * there shouldn't be any vnodes, let alone any dirty vnodes
1442 	 * associated with it, so we shouldn't have to mess around with any
1443 	 * vnode flushes here.
1444 	 */
1445 	hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1446 
1447 	/*
1448 	 * Media mounts have two 'roots', vchain for the topology
1449 	 * and fchain for the free block table.  Flush both.
1450 	 *
1451 	 * Note that the topology and free block table are handled
1452 	 * independently, so the free block table can wind up being
1453 	 * ahead of the topology.  We depend on the bulk free scan
1454 	 * code to deal with any loose ends.
1455 	 *
1456 	 * vchain and fchain do not error on-lock since their data does
1457 	 * not have to be re-read from media.
1458 	 */
1459 	hammer2_chain_ref(&hmp->vchain);
1460 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1461 	hammer2_chain_ref(&hmp->fchain);
1462 	hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1463 	if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1464 		/*
1465 		 * This will also modify vchain as a side effect,
1466 		 * mark vchain as modified now.
1467 		 */
1468 		hammer2_voldata_modify(hmp);
1469 		chain = &hmp->fchain;
1470 		flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1471 		KKASSERT(chain == &hmp->fchain);
1472 	}
1473 	hammer2_chain_unlock(&hmp->fchain);
1474 	hammer2_chain_unlock(&hmp->vchain);
1475 	hammer2_chain_drop(&hmp->fchain);
1476 	/* vchain dropped down below */
1477 
1478 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1479 	if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1480 		chain = &hmp->vchain;
1481 		flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1482 		KKASSERT(chain == &hmp->vchain);
1483 	}
1484 	hammer2_chain_unlock(&hmp->vchain);
1485 	hammer2_chain_drop(&hmp->vchain);
1486 
1487 	/*
1488 	 * We can't safely flush the volume header until we have
1489 	 * flushed any device buffers which have built up.
1490 	 *
1491 	 * XXX this isn't being incremental
1492 	 */
1493 	vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1494 	fsync_error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1495 	vn_unlock(hmp->devvp);
1496 	if (fsync_error || flush_error) {
1497 		kprintf("hammer2: sync error fsync=%d h2flush=0x%04x dev=%s\n",
1498 			fsync_error, flush_error, hmp->devrepname);
1499 	}
1500 
1501 	/*
1502 	 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1503 	 * volume header needs synchronization via hmp->volsync.
1504 	 *
1505 	 * XXX synchronize the flag & data with only this flush XXX
1506 	 */
1507 	if (fsync_error == 0 && flush_error == 0 &&
1508 	    (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1509 		struct buf *bp;
1510 		int vol_error = 0;
1511 
1512 		/*
1513 		 * Synchronize the disk before flushing the volume
1514 		 * header.
1515 		 */
1516 		bp = getpbuf(NULL);
1517 		bp->b_bio1.bio_offset = 0;
1518 		bp->b_bufsize = 0;
1519 		bp->b_bcount = 0;
1520 		bp->b_cmd = BUF_CMD_FLUSH;
1521 		bp->b_bio1.bio_done = biodone_sync;
1522 		bp->b_bio1.bio_flags |= BIO_SYNC;
1523 		vn_strategy(hmp->devvp, &bp->b_bio1);
1524 		fsync_error = biowait(&bp->b_bio1, "h2vol");
1525 		relpbuf(bp, NULL);
1526 
1527 		/*
1528 		 * Then we can safely flush the version of the
1529 		 * volume header synchronized by the flush code.
1530 		 */
1531 		j = hmp->volhdrno + 1;
1532 		if (j < 0)
1533 			j = 0;
1534 		if (j >= HAMMER2_NUM_VOLHDRS)
1535 			j = 0;
1536 		if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1537 		    hmp->volsync.volu_size) {
1538 			j = 0;
1539 		}
1540 		if (hammer2_debug & 0x8000) {
1541 			/* debug only, avoid syslogd loop */
1542 			kprintf("sync volhdr %d %jd\n",
1543 				j, (intmax_t)hmp->volsync.volu_size);
1544 		}
1545 		bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1546 			    HAMMER2_PBUFSIZE, GETBLK_KVABIO, 0);
1547 		atomic_clear_int(&hmp->vchain.flags,
1548 				 HAMMER2_CHAIN_VOLUMESYNC);
1549 		bkvasync(bp);
1550 		bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1551 		vol_error = bwrite(bp);
1552 		hmp->volhdrno = j;
1553 		if (vol_error)
1554 			fsync_error = vol_error;
1555 	}
1556 	if (flush_error)
1557 		total_error = flush_error;
1558 	if (fsync_error)
1559 		total_error = hammer2_errno_to_error(fsync_error);
1560 
1561 	hammer2_trans_done(hmp->spmp, 0);  /* spmp trans */
1562 skip:
1563 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, total_error);
1564 }
1565