xref: /dragonfly/sys/vfs/hammer2/hammer2_flush.c (revision c6f73aab)
1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  *			TRANSACTION AND FLUSH HANDLING
37  *
38  * Deceptively simple but actually fairly difficult to implement properly is
39  * how I would describe it.
40  *
41  * Flushing generally occurs bottom-up but requires a top-down scan to
42  * locate chains with MODIFIED and/or UPDATE bits set.  The ONFLUSH flag
43  * tells how to recurse downward to find these chains.
44  */
45 
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/uuid.h>
52 
53 #include "hammer2.h"
54 
55 #define FLUSH_DEBUG 0
56 
57 #define HAMMER2_FLUSH_DEPTH_LIMIT       10      /* stack recursion limit */
58 
59 
60 /*
61  * Recursively flush the specified chain.  The chain is locked and
62  * referenced by the caller and will remain so on return.  The chain
63  * will remain referenced throughout but can temporarily lose its
64  * lock during the recursion to avoid unnecessarily stalling user
65  * processes.
66  */
67 struct hammer2_flush_info {
68 	hammer2_chain_t *parent;
69 	int		depth;
70 	int		diddeferral;
71 	int		cache_index;
72 	hammer2_tid_t	mtid;
73 	struct h2_flush_list flushq;
74 	hammer2_chain_t	*debug;
75 };
76 
77 typedef struct hammer2_flush_info hammer2_flush_info_t;
78 
79 static void hammer2_flush_core(hammer2_flush_info_t *info,
80 				hammer2_chain_t *chain, int deleting);
81 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
82 
83 /*
84  * Any per-pfs transaction initialization goes here.
85  */
86 void
87 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
88 {
89 }
90 
91 /*
92  * Transaction support for any modifying operation.  Transactions are used
93  * in the pmp layer by the frontend and in the spmp layer by the backend.
94  *
95  * 0			- Normal transaction, interlocked against flush
96  *			  transaction.
97  *
98  * TRANS_ISFLUSH	- Flush transaction, interlocked against normal
99  *			  transaction.
100  *
101  * TRANS_BUFCACHE	- Buffer cache transaction, no interlock.
102  *
103  * Initializing a new transaction allocates a transaction ID.  Typically
104  * passed a pmp (hmp passed as NULL), indicating a cluster transaction.  Can
105  * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
106  * media target.  The latter mode is used by the recovery code.
107  *
108  * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
109  * other is a set of any number of concurrent filesystem operations.  We
110  * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
111  * or we can have <running_flush> + <concurrent_fs_ops>.
112  *
113  * During a flush, new fs_ops are only blocked until the fs_ops prior to
114  * the flush complete.  The new fs_ops can then run concurrent with the flush.
115  *
116  * Buffer-cache transactions operate as fs_ops but never block.  A
117  * buffer-cache flush will run either before or after the current pending
118  * flush depending on its state.
119  */
120 void
121 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
122 {
123 	uint32_t oflags;
124 	uint32_t nflags;
125 	int dowait;
126 
127 	for (;;) {
128 		oflags = pmp->trans.flags;
129 		cpu_ccfence();
130 		dowait = 0;
131 
132 		if (flags & HAMMER2_TRANS_ISFLUSH) {
133 			/*
134 			 * Requesting flush transaction.  Wait for all
135 			 * currently running transactions to finish.
136 			 */
137 			if (oflags & HAMMER2_TRANS_MASK) {
138 				nflags = oflags | HAMMER2_TRANS_FPENDING |
139 						  HAMMER2_TRANS_WAITING;
140 				dowait = 1;
141 			} else {
142 				nflags = (oflags | flags) + 1;
143 			}
144 		} else if (flags & HAMMER2_TRANS_BUFCACHE) {
145 			/*
146 			 * Requesting strategy transaction.  Generally
147 			 * allowed in all situations unless a flush
148 			 * is running without the preflush flag.
149 			 */
150 			if ((oflags & (HAMMER2_TRANS_ISFLUSH |
151 				       HAMMER2_TRANS_PREFLUSH)) ==
152 			    HAMMER2_TRANS_ISFLUSH) {
153 				nflags = oflags | HAMMER2_TRANS_WAITING;
154 				dowait = 1;
155 			} else {
156 				nflags = (oflags | flags) + 1;
157 			}
158 		} else {
159 			/*
160 			 * Requesting normal transaction.  Wait for any
161 			 * flush to finish before allowing.
162 			 */
163 			if (oflags & HAMMER2_TRANS_ISFLUSH) {
164 				nflags = oflags | HAMMER2_TRANS_WAITING;
165 				dowait = 1;
166 			} else {
167 				nflags = (oflags | flags) + 1;
168 			}
169 		}
170 		if (dowait)
171 			tsleep_interlock(&pmp->trans.sync_wait, 0);
172 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
173 			if (dowait == 0)
174 				break;
175 			tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
176 			       "h2trans", hz);
177 		} else {
178 			cpu_pause();
179 		}
180 		/* retry */
181 	}
182 }
183 
184 /*
185  * Start a sub-transaction, there is no 'subdone' function.  This will
186  * issue a new modify_tid (mtid) for the current transaction and must
187  * be called for each XOP when multiple XOPs are run in sequence.
188  */
189 hammer2_tid_t
190 hammer2_trans_sub(hammer2_pfs_t *pmp)
191 {
192 	hammer2_tid_t mtid;
193 
194 	mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
195 
196 	return (mtid);
197 }
198 
199 /*
200  * Clears the PREFLUSH stage, called during a flush transaction after all
201  * logical buffer I/O has completed.
202  */
203 void
204 hammer2_trans_clear_preflush(hammer2_pfs_t *pmp)
205 {
206 	atomic_clear_int(&pmp->trans.flags, HAMMER2_TRANS_PREFLUSH);
207 }
208 
209 void
210 hammer2_trans_done(hammer2_pfs_t *pmp)
211 {
212 	uint32_t oflags;
213 	uint32_t nflags;
214 
215 	for (;;) {
216 		oflags = pmp->trans.flags;
217 		cpu_ccfence();
218 		KKASSERT(oflags & HAMMER2_TRANS_MASK);
219 		if ((oflags & HAMMER2_TRANS_MASK) == 1) {
220 			/*
221 			 * This was the last transaction
222 			 */
223 			nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH |
224 						  HAMMER2_TRANS_BUFCACHE |
225 						  HAMMER2_TRANS_PREFLUSH |
226 						  HAMMER2_TRANS_FPENDING |
227 						  HAMMER2_TRANS_WAITING);
228 		} else {
229 			/*
230 			 * Still transactions pending
231 			 */
232 			nflags = oflags - 1;
233 		}
234 		if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
235 			if ((nflags & HAMMER2_TRANS_MASK) == 0 &&
236 			    (oflags & HAMMER2_TRANS_WAITING)) {
237 				wakeup(&pmp->trans.sync_wait);
238 			}
239 			break;
240 		} else {
241 			cpu_pause();
242 		}
243 		/* retry */
244 	}
245 }
246 
247 /*
248  * Obtain new, unique inode number (not serialized by caller).
249  */
250 hammer2_tid_t
251 hammer2_trans_newinum(hammer2_pfs_t *pmp)
252 {
253 	hammer2_tid_t tid;
254 
255 	tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
256 
257 	return tid;
258 }
259 
260 /*
261  * Assert that a strategy call is ok here.  Strategy calls are legal
262  *
263  * (1) In a normal transaction.
264  * (2) In a flush transaction only if PREFLUSH is also set.
265  */
266 void
267 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
268 {
269 	KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
270 		 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
271 }
272 
273 
274 /*
275  * Chains undergoing destruction are removed from the in-memory topology.
276  * To avoid getting lost these chains are placed on the delayed flush
277  * queue which will properly dispose of them.
278  *
279  * We do this instead of issuing an immediate flush in order to give
280  * recursive deletions (rm -rf, etc) a chance to remove more of the
281  * hierarchy, potentially allowing an enormous amount of write I/O to
282  * be avoided.
283  */
284 void
285 hammer2_delayed_flush(hammer2_chain_t *chain)
286 {
287 	if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
288 		hammer2_spin_ex(&chain->hmp->list_spin);
289 		if ((chain->flags & (HAMMER2_CHAIN_DELAYED |
290 				     HAMMER2_CHAIN_DEFERRED)) == 0) {
291 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED |
292 						      HAMMER2_CHAIN_DEFERRED);
293 			TAILQ_INSERT_TAIL(&chain->hmp->flushq,
294 					  chain, flush_node);
295 			hammer2_chain_ref(chain);
296 		}
297 		hammer2_spin_unex(&chain->hmp->list_spin);
298 	}
299 }
300 
301 /*
302  * Flush the chain and all modified sub-chains through the specified
303  * synchronization point, propagating parent chain modifications, modify_tid,
304  * and mirror_tid updates back up as needed.
305  *
306  * Caller must have already vetted synchronization points to ensure they
307  * are properly flushed.  Only snapshots and cluster flushes can create
308  * these sorts of synchronization points.
309  *
310  * This routine can be called from several places but the most important
311  * is from VFS_SYNC.
312  *
313  * chain is locked on call and will remain locked on return.  The chain's
314  * UPDATE flag indicates that its parent's block table (which is not yet
315  * part of the flush) should be updated.  The chain may be replaced by
316  * the call if it was modified.
317  */
318 void
319 hammer2_flush(hammer2_chain_t *chain, hammer2_tid_t mtid, int istop)
320 {
321 	hammer2_chain_t *scan;
322 	hammer2_flush_info_t info;
323 	hammer2_dev_t *hmp;
324 	int loops;
325 
326 	/*
327 	 * Execute the recursive flush and handle deferrals.
328 	 *
329 	 * Chains can be ridiculously long (thousands deep), so to
330 	 * avoid blowing out the kernel stack the recursive flush has a
331 	 * depth limit.  Elements at the limit are placed on a list
332 	 * for re-execution after the stack has been popped.
333 	 */
334 	bzero(&info, sizeof(info));
335 	TAILQ_INIT(&info.flushq);
336 	info.cache_index = -1;
337 	info.mtid = mtid;
338 
339 	/*
340 	 * Calculate parent (can be NULL), if not NULL the flush core
341 	 * expects the parent to be referenced so it can easily lock/unlock
342 	 * it without it getting ripped up.
343 	 */
344 	if ((info.parent = chain->parent) != NULL)
345 		hammer2_chain_ref(info.parent);
346 
347 	/*
348 	 * Extra ref needed because flush_core expects it when replacing
349 	 * chain.
350 	 */
351 	hammer2_chain_ref(chain);
352 	hmp = chain->hmp;
353 	loops = 0;
354 
355 	for (;;) {
356 		/*
357 		 * Move hmp->flushq to info.flushq if non-empty so it can
358 		 * be processed.
359 		 */
360 		if (TAILQ_FIRST(&hmp->flushq) != NULL) {
361 			hammer2_spin_ex(&chain->hmp->list_spin);
362 			TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node);
363 			hammer2_spin_unex(&chain->hmp->list_spin);
364 		}
365 
366 		/*
367 		 * Unwind deep recursions which had been deferred.  This
368 		 * can leave the FLUSH_* bits set for these chains, which
369 		 * will be handled when we [re]flush chain after the unwind.
370 		 */
371 		while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) {
372 			KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
373 			TAILQ_REMOVE(&info.flushq, scan, flush_node);
374 			atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED |
375 						       HAMMER2_CHAIN_DELAYED);
376 
377 			/*
378 			 * Now that we've popped back up we can do a secondary
379 			 * recursion on the deferred elements.
380 			 *
381 			 * NOTE: hammer2_flush() may replace scan.
382 			 */
383 			if (hammer2_debug & 0x0040)
384 				kprintf("deferred flush %p\n", scan);
385 			hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
386 			hammer2_flush(scan, mtid, 0);
387 			hammer2_chain_unlock(scan);
388 			hammer2_chain_drop(scan);	/* ref from deferral */
389 		}
390 
391 		/*
392 		 * [re]flush chain.
393 		 */
394 		info.diddeferral = 0;
395 		hammer2_flush_core(&info, chain, istop);
396 
397 		/*
398 		 * Only loop if deep recursions have been deferred.
399 		 */
400 		if (TAILQ_EMPTY(&info.flushq))
401 			break;
402 
403 		if (++loops % 1000 == 0) {
404 			kprintf("hammer2_flush: excessive loops on %p\n",
405 				chain);
406 			if (hammer2_debug & 0x100000)
407 				Debugger("hell4");
408 		}
409 	}
410 	hammer2_chain_drop(chain);
411 	if (info.parent)
412 		hammer2_chain_drop(info.parent);
413 }
414 
415 /*
416  * This is the core of the chain flushing code.  The chain is locked by the
417  * caller and must also have an extra ref on it by the caller, and remains
418  * locked and will have an extra ref on return.  Upon return, the caller can
419  * test the UPDATE bit on the child to determine if the parent needs updating.
420  *
421  * (1) Determine if this node is a candidate for the flush, return if it is
422  *     not.  fchain and vchain are always candidates for the flush.
423  *
424  * (2) If we recurse too deep the chain is entered onto the deferral list and
425  *     the current flush stack is aborted until after the deferral list is
426  *     run.
427  *
428  * (3) Recursively flush live children (rbtree).  This can create deferrals.
429  *     A successful flush clears the MODIFIED and UPDATE bits on the children
430  *     and typically causes the parent to be marked MODIFIED as the children
431  *     update the parent's block table.  A parent might already be marked
432  *     MODIFIED due to a deletion (whos blocktable update in the parent is
433  *     handled by the frontend), or if the parent itself is modified by the
434  *     frontend for other reasons.
435  *
436  * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
437  *     Deleted-but-open inodes can still be individually flushed via the
438  *     filesystem syncer.
439  *
440  * (5) Note that an unmodified child may still need the block table in its
441  *     parent updated (e.g. rename/move).  The child will have UPDATE set
442  *     in this case.
443  *
444  *			WARNING ON BREF MODIFY_TID/MIRROR_TID
445  *
446  * blockref.modify_tid is consistent only within a PFS, and will not be
447  * consistent during synchronization.  mirror_tid is consistent across the
448  * block device regardless of the PFS.
449  */
450 static void
451 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
452 		   int istop)
453 {
454 	hammer2_chain_t *parent;
455 	hammer2_dev_t *hmp;
456 	int diddeferral;
457 
458 	/*
459 	 * (1) Optimize downward recursion to locate nodes needing action.
460 	 *     Nothing to do if none of these flags are set.
461 	 */
462 	if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
463 		if (hammer2_debug & 0x200) {
464 			if (info->debug == NULL)
465 				info->debug = chain;
466 		} else {
467 			return;
468 		}
469 	}
470 
471 	hmp = chain->hmp;
472 	diddeferral = info->diddeferral;
473 	parent = info->parent;		/* can be NULL */
474 
475 	/*
476 	 * Downward search recursion
477 	 */
478 	if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) {
479 		/*
480 		 * Already deferred.
481 		 */
482 		++info->diddeferral;
483 	} else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
484 		/*
485 		 * Recursion depth reached.
486 		 */
487 		KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0);
488 		hammer2_chain_ref(chain);
489 		TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
490 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
491 		++info->diddeferral;
492 	} else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) && istop == 0) {
493 		/*
494 		 * We do not recurse through PFSROOTs.  PFSROOT flushes are
495 		 * handled by the related pmp's (whether mounted or not,
496 		 * including during recovery).
497 		 *
498 		 * But we must still process the PFSROOT chains for block
499 		 * table updates in their parent (which IS part of our flush).
500 		 *
501 		 * Note that the volume root, vchain, does not set this flag.
502 		 */
503 		;
504 	} else if (chain->flags & HAMMER2_CHAIN_ONFLUSH) {
505 		/*
506 		 * Downward recursion search (actual flush occurs bottom-up).
507 		 * pre-clear ONFLUSH.  It can get set again due to races,
508 		 * which we want so the scan finds us again in the next flush.
509 		 * These races can also include
510 		 *
511 		 * Flush recursions stop at PFSROOT boundaries.  Each PFS
512 		 * must be individually flushed and then the root must
513 		 * be flushed.
514 		 */
515 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
516 		info->parent = chain;
517 		hammer2_spin_ex(&chain->core.spin);
518 		RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
519 			NULL, hammer2_flush_recurse, info);
520 		hammer2_spin_unex(&chain->core.spin);
521 		info->parent = parent;
522 		if (info->diddeferral)
523 			hammer2_chain_setflush(chain);
524 	}
525 
526 	/*
527 	 * Now we are in the bottom-up part of the recursion.
528 	 *
529 	 * Do not update chain if lower layers were deferred.
530 	 */
531 	if (info->diddeferral)
532 		goto done;
533 
534 	/*
535 	 * Propagate the DESTROY flag downwards.  This dummies up the flush
536 	 * code and tries to invalidate related buffer cache buffers to
537 	 * avoid the disk write.
538 	 */
539 	if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
540 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
541 
542 	/*
543 	 * Chain was already modified or has become modified, flush it out.
544 	 */
545 again:
546 	if ((hammer2_debug & 0x200) &&
547 	    info->debug &&
548 	    (chain->flags & (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_UPDATE))) {
549 		hammer2_chain_t *scan = chain;
550 
551 		kprintf("DISCONNECTED FLUSH %p->%p\n", info->debug, chain);
552 		while (scan) {
553 			kprintf("    chain %p [%08x] bref=%016jx:%02x\n",
554 				scan, scan->flags,
555 				scan->bref.key, scan->bref.type);
556 			if (scan == info->debug)
557 				break;
558 			scan = scan->parent;
559 		}
560 	}
561 
562 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
563 		/*
564 		 * Dispose of the modified bit.
565 		 *
566 		 * UPDATE should already be set.
567 		 * bref.mirror_tid should already be set.
568 		 */
569 		KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
570 			 chain == &hmp->vchain);
571 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
572 
573 		/*
574 		 * Manage threads waiting for excessive dirty memory to
575 		 * be retired.
576 		 */
577 		if (chain->pmp)
578 			hammer2_pfs_memory_wakeup(chain->pmp);
579 
580 		if ((chain->flags & HAMMER2_CHAIN_UPDATE) ||
581 		    chain == &hmp->vchain ||
582 		    chain == &hmp->fchain) {
583 			/*
584 			 * Drop the ref from the MODIFIED bit we cleared,
585 			 * net -1 ref.
586 			 */
587 			hammer2_chain_drop(chain);
588 		} else {
589 			/*
590 			 * Drop the ref from the MODIFIED bit we cleared and
591 			 * set a ref for the UPDATE bit we are setting.  Net
592 			 * 0 refs.
593 			 */
594 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
595 		}
596 
597 		/*
598 		 * Issue the flush.  This is indirect via the DIO.
599 		 *
600 		 * NOTE: A DELETED node that reaches this point must be
601 		 *	 flushed for synchronization point consistency.
602 		 *
603 		 * NOTE: Even though MODIFIED was already set, the related DIO
604 		 *	 might not be dirty due to a system buffer cache
605 		 *	 flush and must be set dirty if we are going to make
606 		 *	 further modifications to the buffer.  Chains with
607 		 *	 embedded data don't need this.
608 		 */
609 		if (hammer2_debug & 0x1000) {
610 			kprintf("Flush %p.%d %016jx/%d data=%016jx",
611 				chain, chain->bref.type,
612 				(uintmax_t)chain->bref.key,
613 				chain->bref.keybits,
614 				(uintmax_t)chain->bref.data_off);
615 		}
616 		if (hammer2_debug & 0x2000) {
617 			Debugger("Flush hell");
618 		}
619 
620 		/*
621 		 * Update chain CRCs for flush.
622 		 *
623 		 * NOTE: Volume headers are NOT flushed here as they require
624 		 *	 special processing.
625 		 */
626 		switch(chain->bref.type) {
627 		case HAMMER2_BREF_TYPE_FREEMAP:
628 			/*
629 			 * Update the volume header's freemap_tid to the
630 			 * freemap's flushing mirror_tid.
631 			 *
632 			 * (note: embedded data, do not call setdirty)
633 			 */
634 			KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
635 			KKASSERT(chain == &hmp->fchain);
636 			hmp->voldata.freemap_tid = chain->bref.mirror_tid;
637 			kprintf("sync freemap mirror_tid %08jx\n",
638 				(intmax_t)chain->bref.mirror_tid);
639 
640 			/*
641 			 * The freemap can be flushed independently of the
642 			 * main topology, but for the case where it is
643 			 * flushed in the same transaction, and flushed
644 			 * before vchain (a case we want to allow for
645 			 * performance reasons), make sure modifications
646 			 * made during the flush under vchain use a new
647 			 * transaction id.
648 			 *
649 			 * Otherwise the mount recovery code will get confused.
650 			 */
651 			++hmp->voldata.mirror_tid;
652 			break;
653 		case HAMMER2_BREF_TYPE_VOLUME:
654 			/*
655 			 * The free block table is flushed by
656 			 * hammer2_vfs_sync() before it flushes vchain.
657 			 * We must still hold fchain locked while copying
658 			 * voldata to volsync, however.
659 			 *
660 			 * (note: embedded data, do not call setdirty)
661 			 */
662 			hammer2_chain_lock(&hmp->fchain,
663 					   HAMMER2_RESOLVE_ALWAYS);
664 			hammer2_voldata_lock(hmp);
665 			kprintf("sync volume  mirror_tid %08jx\n",
666 				(intmax_t)chain->bref.mirror_tid);
667 
668 			/*
669 			 * Update the volume header's mirror_tid to the
670 			 * main topology's flushing mirror_tid.  It is
671 			 * possible that voldata.mirror_tid is already
672 			 * beyond bref.mirror_tid due to the bump we made
673 			 * above in BREF_TYPE_FREEMAP.
674 			 */
675 			if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
676 				hmp->voldata.mirror_tid =
677 					chain->bref.mirror_tid;
678 			}
679 
680 			/*
681 			 * The volume header is flushed manually by the
682 			 * syncer, not here.  All we do here is adjust the
683 			 * crc's.
684 			 */
685 			KKASSERT(chain->data != NULL);
686 			KKASSERT(chain->dio == NULL);
687 
688 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
689 				hammer2_icrc32(
690 					(char *)&hmp->voldata +
691 					 HAMMER2_VOLUME_ICRC1_OFF,
692 					HAMMER2_VOLUME_ICRC1_SIZE);
693 			hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
694 				hammer2_icrc32(
695 					(char *)&hmp->voldata +
696 					 HAMMER2_VOLUME_ICRC0_OFF,
697 					HAMMER2_VOLUME_ICRC0_SIZE);
698 			hmp->voldata.icrc_volheader =
699 				hammer2_icrc32(
700 					(char *)&hmp->voldata +
701 					 HAMMER2_VOLUME_ICRCVH_OFF,
702 					HAMMER2_VOLUME_ICRCVH_SIZE);
703 
704 			kprintf("syncvolhdr %016jx %016jx\n",
705 				hmp->voldata.mirror_tid,
706 				hmp->vchain.bref.mirror_tid);
707 			hmp->volsync = hmp->voldata;
708 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
709 			hammer2_voldata_unlock(hmp);
710 			hammer2_chain_unlock(&hmp->fchain);
711 			break;
712 		case HAMMER2_BREF_TYPE_DATA:
713 			/*
714 			 * Data elements have already been flushed via the
715 			 * logical file buffer cache.  Their hash was set in
716 			 * the bref by the vop_write code.  Do not re-dirty.
717 			 *
718 			 * Make sure any device buffer(s) have been flushed
719 			 * out here (there aren't usually any to flush) XXX.
720 			 */
721 			break;
722 		case HAMMER2_BREF_TYPE_INDIRECT:
723 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
724 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
725 			/*
726 			 * Buffer I/O will be cleaned up when the volume is
727 			 * flushed (but the kernel is free to flush it before
728 			 * then, as well).
729 			 */
730 			KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
731 			hammer2_chain_setcheck(chain, chain->data);
732 			break;
733 		case HAMMER2_BREF_TYPE_INODE:
734 			/*
735 			 * NOTE: We must call io_setdirty() to make any late
736 			 *	 changes to the inode data, the system might
737 			 *	 have already flushed the buffer.
738 			 */
739 			if (chain->data->ipdata.meta.op_flags &
740 			    HAMMER2_OPFLAG_PFSROOT) {
741 				/*
742 				 * non-NULL pmp if mounted as a PFS.  We must
743 				 * sync fields cached in the pmp? XXX
744 				 */
745 				hammer2_inode_data_t *ipdata;
746 
747 				hammer2_io_setdirty(chain->dio);
748 				ipdata = &chain->data->ipdata;
749 				if (chain->pmp) {
750 					ipdata->meta.pfs_inum =
751 						chain->pmp->inode_tid;
752 				}
753 			} else {
754 				/* can't be mounted as a PFS */
755 			}
756 
757 			KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
758 			hammer2_chain_setcheck(chain, chain->data);
759 			break;
760 		default:
761 			KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
762 			panic("hammer2_flush_core: unsupported "
763 			      "embedded bref %d",
764 			      chain->bref.type);
765 			/* NOT REACHED */
766 		}
767 
768 		/*
769 		 * If the chain was destroyed try to avoid unnecessary I/O.
770 		 * (this only really works if the DIO system buffer is the
771 		 * same size as chain->bytes).
772 		 */
773 		if ((chain->flags & HAMMER2_CHAIN_DESTROY) && chain->dio) {
774 			hammer2_io_setinval(chain->dio, chain->bytes);
775 		}
776 	}
777 
778 	/*
779 	 * If UPDATE is set the parent block table may need to be updated.
780 	 *
781 	 * NOTE: UPDATE may be set on vchain or fchain in which case
782 	 *	 parent could be NULL.  It's easiest to allow the case
783 	 *	 and test for NULL.  parent can also wind up being NULL
784 	 *	 due to a deletion so we need to handle the case anyway.
785 	 *
786 	 * If no parent exists we can just clear the UPDATE bit.  If the
787 	 * chain gets reattached later on the bit will simply get set
788 	 * again.
789 	 */
790 	if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL) {
791 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
792 		hammer2_chain_drop(chain);
793 	}
794 
795 	/*
796 	 * The chain may need its blockrefs updated in the parent.  This
797 	 * requires some fancy footwork.
798 	 */
799 	if (chain->flags & HAMMER2_CHAIN_UPDATE) {
800 		hammer2_blockref_t *base;
801 		int count;
802 
803 		/*
804 		 * Both parent and chain must be locked.  This requires
805 		 * temporarily unlocking the chain.  We have to deal with
806 		 * the case where the chain might be reparented or modified
807 		 * while it was unlocked.
808 		 */
809 		hammer2_chain_unlock(chain);
810 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
811 		hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
812 		if (chain->parent != parent) {
813 			kprintf("PARENT MISMATCH ch=%p p=%p/%p\n",
814 				chain, chain->parent, parent);
815 			hammer2_chain_unlock(parent);
816 			goto done;
817 		}
818 
819 		/*
820 		 * Check race condition.  If someone got in and modified
821 		 * it again while it was unlocked, we have to loop up.
822 		 */
823 		if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
824 			hammer2_chain_unlock(parent);
825 			kprintf("hammer2_flush: chain %p flush-mod race\n",
826 				chain);
827 			goto again;
828 		}
829 
830 		/*
831 		 * Clear UPDATE flag, mark parent modified, update its
832 		 * modify_tid if necessary, and adjust the parent blockmap.
833 		 */
834 		if (chain->flags & HAMMER2_CHAIN_UPDATE) {
835 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
836 			hammer2_chain_drop(chain);
837 		}
838 
839 		/*
840 		 * (optional code)
841 		 *
842 		 * Avoid actually modifying and updating the parent if it
843 		 * was flagged for destruction.  This can greatly reduce
844 		 * disk I/O in large tree removals because the
845 		 * hammer2_io_setinval() call in the upward recursion
846 		 * (see MODIFIED code above) can only handle a few cases.
847 		 */
848 		if (parent->flags & HAMMER2_CHAIN_DESTROY) {
849 			if (parent->bref.modify_tid < chain->bref.modify_tid) {
850 				parent->bref.modify_tid =
851 					chain->bref.modify_tid;
852 			}
853 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
854 							HAMMER2_CHAIN_BMAPUPD);
855 			hammer2_chain_unlock(parent);
856 			goto skipupdate;
857 		}
858 
859 		/*
860 		 * We are updating the parent's blockmap, the parent must
861 		 * be set modified.
862 		 */
863 		hammer2_chain_modify(parent, info->mtid, 0);
864 		if (parent->bref.modify_tid < chain->bref.modify_tid)
865 			parent->bref.modify_tid = chain->bref.modify_tid;
866 
867 		/*
868 		 * Calculate blockmap pointer
869 		 */
870 		switch(parent->bref.type) {
871 		case HAMMER2_BREF_TYPE_INODE:
872 			/*
873 			 * Access the inode's block array.  However, there is
874 			 * no block array if the inode is flagged DIRECTDATA.
875 			 */
876 			if (parent->data &&
877 			    (parent->data->ipdata.meta.op_flags &
878 			     HAMMER2_OPFLAG_DIRECTDATA) == 0) {
879 				base = &parent->data->
880 					ipdata.u.blockset.blockref[0];
881 			} else {
882 				base = NULL;
883 			}
884 			count = HAMMER2_SET_COUNT;
885 			break;
886 		case HAMMER2_BREF_TYPE_INDIRECT:
887 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
888 			if (parent->data)
889 				base = &parent->data->npdata[0];
890 			else
891 				base = NULL;
892 			count = parent->bytes / sizeof(hammer2_blockref_t);
893 			break;
894 		case HAMMER2_BREF_TYPE_VOLUME:
895 			base = &chain->hmp->voldata.sroot_blockset.blockref[0];
896 			count = HAMMER2_SET_COUNT;
897 			break;
898 		case HAMMER2_BREF_TYPE_FREEMAP:
899 			base = &parent->data->npdata[0];
900 			count = HAMMER2_SET_COUNT;
901 			break;
902 		default:
903 			base = NULL;
904 			count = 0;
905 			panic("hammer2_flush_core: "
906 			      "unrecognized blockref type: %d",
907 			      parent->bref.type);
908 		}
909 
910 		/*
911 		 * Blocktable updates
912 		 *
913 		 * We synchronize pending statistics at this time.  Delta
914 		 * adjustments designated for the current and upper level
915 		 * are synchronized.
916 		 */
917 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
918 			if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
919 				hammer2_spin_ex(&parent->core.spin);
920 				hammer2_base_delete(parent, base, count,
921 						    &info->cache_index, chain);
922 				hammer2_spin_unex(&parent->core.spin);
923 				/* base_delete clears both bits */
924 			} else {
925 				atomic_clear_int(&chain->flags,
926 						 HAMMER2_CHAIN_BMAPUPD);
927 			}
928 		}
929 		if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
930 			hammer2_spin_ex(&parent->core.spin);
931 			hammer2_base_insert(parent, base, count,
932 					    &info->cache_index, chain);
933 			hammer2_spin_unex(&parent->core.spin);
934 			/* base_insert sets BMAPPED */
935 		}
936 		hammer2_chain_unlock(parent);
937 	}
938 skipupdate:
939 	;
940 
941 	/*
942 	 * Final cleanup after flush
943 	 */
944 done:
945 	KKASSERT(chain->refs > 0);
946 	if (hammer2_debug & 0x200) {
947 		if (info->debug == chain)
948 			info->debug = NULL;
949 	}
950 }
951 
952 /*
953  * Flush recursion helper, called from flush_core, calls flush_core.
954  *
955  * Flushes the children of the caller's chain (info->parent), restricted
956  * by sync_tid.  Set info->domodify if the child's blockref must propagate
957  * back up to the parent.
958  *
959  * Ripouts can move child from rbtree to dbtree or dbq but the caller's
960  * flush scan order prevents any chains from being lost.  A child can be
961  * executes more than once.
962  *
963  * WARNING! If we do not call hammer2_flush_core() we must update
964  *	    bref.mirror_tid ourselves to indicate that the flush has
965  *	    processed the child.
966  *
967  * WARNING! parent->core spinlock is held on entry and return.
968  */
969 static int
970 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
971 {
972 	hammer2_flush_info_t *info = data;
973 	hammer2_chain_t *parent = info->parent;
974 
975 	/*
976 	 * (child can never be fchain or vchain so a special check isn't
977 	 *  needed).
978 	 *
979 	 * We must ref the child before unlocking the spinlock.
980 	 *
981 	 * The caller has added a ref to the parent so we can temporarily
982 	 * unlock it in order to lock the child.
983 	 */
984 	hammer2_chain_ref(child);
985 	hammer2_spin_unex(&parent->core.spin);
986 
987 	hammer2_chain_unlock(parent);
988 	hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
989 
990 	/*
991 	 * Recurse and collect deferral data.  We're in the media flush,
992 	 * this can cross PFS boundaries.
993 	 */
994 	if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
995 		++info->depth;
996 		hammer2_flush_core(info, child, 0);
997 		--info->depth;
998 	} else if (hammer2_debug & 0x200) {
999 		if (info->debug == NULL)
1000 			info->debug = child;
1001 		++info->depth;
1002 		hammer2_flush_core(info, child, 0);
1003 		--info->depth;
1004 		if (info->debug == child)
1005 			info->debug = NULL;
1006 	}
1007 
1008 	/*
1009 	 * Relock to continue the loop
1010 	 */
1011 	hammer2_chain_unlock(child);
1012 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1013 	hammer2_chain_drop(child);
1014 	KKASSERT(info->parent == parent);
1015 	hammer2_spin_ex(&parent->core.spin);
1016 
1017 	return (0);
1018 }
1019 
1020 /*
1021  * flush helper (backend threaded)
1022  *
1023  * Flushes core chains, issues disk sync, flushes volume roots.
1024  *
1025  * Primarily called from vfs_sync().
1026  */
1027 void
1028 hammer2_inode_xop_flush(hammer2_xop_t *arg, int clindex)
1029 {
1030 	hammer2_xop_flush_t *xop = &arg->xop_flush;
1031 	hammer2_chain_t *chain;
1032 	hammer2_chain_t *parent;
1033 	hammer2_dev_t *hmp;
1034 	int error = 0;
1035 	int total_error = 0;
1036 	int j;
1037 
1038 	/*
1039 	 * Flush core chains
1040 	 */
1041 	chain = hammer2_inode_chain(xop->head.ip, clindex,
1042 				    HAMMER2_RESOLVE_ALWAYS);
1043 	if (chain) {
1044 		hmp = chain->hmp;
1045 		if (chain->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1046 			hammer2_flush(chain, xop->head.mtid, 1);
1047 			parent = chain->parent;
1048 			KKASSERT(chain->pmp != parent->pmp);
1049 			hammer2_chain_setflush(parent);
1050 		}
1051 		hammer2_chain_unlock(chain);
1052 		hammer2_chain_drop(chain);
1053 		chain = NULL;
1054 	} else {
1055 		hmp = NULL;
1056 	}
1057 
1058 	/*
1059 	 * Flush volume roots.  Avoid replication, we only want to
1060 	 * flush each hammer2_dev (hmp) once.
1061 	 */
1062 	for (j = clindex - 1; j >= 0; --j) {
1063 		if ((chain = xop->head.ip->cluster.array[j].chain) != NULL) {
1064 			if (chain->hmp == hmp) {
1065 				chain = NULL;	/* safety */
1066 				goto skip;
1067 			}
1068 		}
1069 	}
1070 	chain = NULL;	/* safety */
1071 
1072 	/*
1073 	 * spmp transaction.  The super-root is never directly mounted so
1074 	 * there shouldn't be any vnodes, let alone any dirty vnodes
1075 	 * associated with it.
1076 	 */
1077 	hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1078 
1079 	/*
1080 	 * Media mounts have two 'roots', vchain for the topology
1081 	 * and fchain for the free block table.  Flush both.
1082 	 *
1083 	 * Note that the topology and free block table are handled
1084 	 * independently, so the free block table can wind up being
1085 	 * ahead of the topology.  We depend on the bulk free scan
1086 	 * code to deal with any loose ends.
1087 	 */
1088 	hammer2_chain_ref(&hmp->vchain);
1089 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1090 	hammer2_chain_ref(&hmp->fchain);
1091 	hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1092 	if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1093 		/*
1094 		 * This will also modify vchain as a side effect,
1095 		 * mark vchain as modified now.
1096 		 */
1097 		hammer2_voldata_modify(hmp);
1098 		chain = &hmp->fchain;
1099 		hammer2_flush(chain, xop->head.mtid, 1);
1100 		KKASSERT(chain == &hmp->fchain);
1101 	}
1102 	hammer2_chain_unlock(&hmp->fchain);
1103 	hammer2_chain_unlock(&hmp->vchain);
1104 	hammer2_chain_drop(&hmp->fchain);
1105 	/* vchain dropped down below */
1106 
1107 	hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1108 	if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1109 		chain = &hmp->vchain;
1110 		hammer2_flush(chain, xop->head.mtid, 1);
1111 		KKASSERT(chain == &hmp->vchain);
1112 	}
1113 	hammer2_chain_unlock(&hmp->vchain);
1114 	hammer2_chain_drop(&hmp->vchain);
1115 
1116 	error = 0;
1117 
1118 	/*
1119 	 * We can't safely flush the volume header until we have
1120 	 * flushed any device buffers which have built up.
1121 	 *
1122 	 * XXX this isn't being incremental
1123 	 */
1124 	vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1125 	error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1126 	vn_unlock(hmp->devvp);
1127 
1128 	/*
1129 	 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1130 	 * volume header needs synchronization via hmp->volsync.
1131 	 *
1132 	 * XXX synchronize the flag & data with only this flush XXX
1133 	 */
1134 	if (error == 0 &&
1135 	    (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1136 		struct buf *bp;
1137 
1138 		/*
1139 		 * Synchronize the disk before flushing the volume
1140 		 * header.
1141 		 */
1142 		bp = getpbuf(NULL);
1143 		bp->b_bio1.bio_offset = 0;
1144 		bp->b_bufsize = 0;
1145 		bp->b_bcount = 0;
1146 		bp->b_cmd = BUF_CMD_FLUSH;
1147 		bp->b_bio1.bio_done = biodone_sync;
1148 		bp->b_bio1.bio_flags |= BIO_SYNC;
1149 		vn_strategy(hmp->devvp, &bp->b_bio1);
1150 		biowait(&bp->b_bio1, "h2vol");
1151 		relpbuf(bp, NULL);
1152 
1153 		/*
1154 		 * Then we can safely flush the version of the
1155 		 * volume header synchronized by the flush code.
1156 		 */
1157 		j = hmp->volhdrno + 1;
1158 		if (j >= HAMMER2_NUM_VOLHDRS)
1159 			j = 0;
1160 		if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1161 		    hmp->volsync.volu_size) {
1162 			j = 0;
1163 		}
1164 		kprintf("sync volhdr %d %jd\n",
1165 			j, (intmax_t)hmp->volsync.volu_size);
1166 		bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1167 			    HAMMER2_PBUFSIZE, 0, 0);
1168 		atomic_clear_int(&hmp->vchain.flags,
1169 				 HAMMER2_CHAIN_VOLUMESYNC);
1170 		bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1171 		bawrite(bp);
1172 		hmp->volhdrno = j;
1173 	}
1174 	if (error)
1175 		total_error = error;
1176 
1177 	hammer2_trans_done(hmp->spmp);  /* spmp trans */
1178 skip:
1179 	error = hammer2_xop_feed(&xop->head, NULL, clindex, total_error);
1180 }
1181