xref: /dragonfly/sys/vfs/hammer2/hammer2_chain.c (revision e7d467f4)
1 /*
2  * Copyright (c) 2011-2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  * This subsystem implements most of the core support functions for
37  * the hammer2_chain and hammer2_chain_core structures.
38  *
39  * Chains represent the filesystem media topology in-memory.  Any given
40  * chain can represent an inode, indirect block, data, or other types
41  * of blocks.
42  *
43  * This module provides APIs for direct and indirect block searches,
44  * iterations, recursions, creation, deletion, replication, and snapshot
45  * views (used by the flush and snapshot code).
46  *
47  * Generally speaking any modification made to a chain must propagate all
48  * the way back to the volume header, issuing copy-on-write updates to the
49  * blockref tables all the way up.  Any chain except the volume header itself
50  * can be flushed to disk at any time, in any order.  None of it matters
51  * until we get to the point where we want to synchronize the volume header
52  * (see the flush code).
53  *
54  * The chain structure supports snapshot views in time, which are primarily
55  * used until the related data and meta-data is flushed to allow the
56  * filesystem to make snapshots without requiring it to first flush,
57  * and to allow the filesystem flush and modify the filesystem concurrently
58  * with minimal or no stalls.
59  */
60 #include <sys/cdefs.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/types.h>
64 #include <sys/lock.h>
65 #include <sys/uuid.h>
66 
67 #include "hammer2.h"
68 
69 static int hammer2_indirect_optimize;	/* XXX SYSCTL */
70 
71 static hammer2_chain_t *hammer2_chain_create_indirect(
72 		hammer2_trans_t *trans, hammer2_chain_t *parent,
73 		hammer2_key_t key, int keybits, int *errorp);
74 
75 /*
76  * We use a red-black tree to guarantee safe lookups under shared locks.
77  *
78  * Chains can be overloaded onto the same index, creating a different
79  * view of a blockref table based on a transaction id.  The RBTREE
80  * deconflicts the view by sub-sorting on delete_tid.
81  *
82  * NOTE: Any 'current' chain which is not yet deleted will have a
83  *	 delete_tid of HAMMER2_MAX_TID (0xFFF....FFFLLU).
84  */
85 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
86 
87 int
88 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
89 {
90 	if (chain1->index < chain2->index)
91 		return(-1);
92 	if (chain1->index > chain2->index)
93 		return(1);
94 	if (chain1->delete_tid < chain2->delete_tid)
95 		return(-1);
96 	if (chain1->delete_tid > chain2->delete_tid)
97 		return(1);
98 	return(0);
99 }
100 
101 /*
102  * Flag chain->parent SUBMODIFIED recursively up to the root.  The
103  * recursion can terminate when a parent is encountered with SUBMODIFIED
104  * already set.  The flag is NOT set on the passed-in chain.
105  *
106  * This can be confusing because even though chains are multi-homed,
107  * each chain has a specific idea of its parent (chain->parent) which
108  * is singly-homed.
109  *
110  * This flag is used by the flusher's downward recursion to detect
111  * modifications and can only be cleared bottom-up.
112  *
113  * The parent pointer is protected by all the modified children below it
114  * and cannot be changed until they have all been flushed.  However, setsubmod
115  * operations on new modifications can race flushes in progress, so we use
116  * the chain->core->cst.spin lock to handle collisions.
117  */
118 void
119 hammer2_chain_parent_setsubmod(hammer2_chain_t *chain)
120 {
121 	hammer2_chain_t *parent;
122 	hammer2_chain_core_t *core;
123 
124 	while ((parent = chain->parent) != NULL) {
125 		core = parent->core;
126 		spin_lock(&core->cst.spin);
127 		if (parent->flags & HAMMER2_CHAIN_SUBMODIFIED) {
128 			spin_unlock(&core->cst.spin);
129 			break;
130 		}
131 		atomic_set_int(&parent->flags, HAMMER2_CHAIN_SUBMODIFIED);
132 		spin_unlock(&core->cst.spin);
133 		chain = parent;
134 	}
135 }
136 
137 /*
138  * Allocate a new disconnected chain element representing the specified
139  * bref.  chain->refs is set to 1 and the passed bref is copied to
140  * chain->bref.  chain->bytes is derived from the bref.
141  *
142  * chain->core is NOT allocated and the media data and bp pointers are left
143  * NULL.  The caller must call chain_core_alloc() to allocate or associate
144  * a core with the chain.
145  *
146  * NOTE: Returns a referenced but unlocked (because there is no core) chain.
147  */
148 hammer2_chain_t *
149 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_blockref_t *bref)
150 {
151 	hammer2_chain_t *chain;
152 	u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
153 
154 	/*
155 	 * Construct the appropriate system structure.
156 	 */
157 	switch(bref->type) {
158 	case HAMMER2_BREF_TYPE_INODE:
159 	case HAMMER2_BREF_TYPE_INDIRECT:
160 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
161 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
162 	case HAMMER2_BREF_TYPE_DATA:
163 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
164 		chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
165 		break;
166 	case HAMMER2_BREF_TYPE_VOLUME:
167 		chain = NULL;
168 		panic("hammer2_chain_alloc volume type illegal for op");
169 	default:
170 		chain = NULL;
171 		panic("hammer2_chain_alloc: unrecognized blockref type: %d",
172 		      bref->type);
173 	}
174 
175 	chain->hmp = hmp;
176 	chain->bref = *bref;
177 	chain->index = -1;		/* not yet assigned */
178 	chain->bytes = bytes;
179 	chain->refs = 1;
180 	chain->flags = HAMMER2_CHAIN_ALLOCATED;
181 	chain->delete_tid = HAMMER2_MAX_TID;
182 
183 	return (chain);
184 }
185 
186 /*
187  * Associate an existing core with the chain or allocate a new core.
188  *
189  * The core is not locked.  No additional refs on the chain are made.
190  */
191 void
192 hammer2_chain_core_alloc(hammer2_chain_t *chain, hammer2_chain_core_t *core)
193 {
194 	KKASSERT(chain->core == NULL);
195 
196 	if (core == NULL) {
197 		core = kmalloc(sizeof(*core), chain->hmp->mchain,
198 			       M_WAITOK | M_ZERO);
199 		RB_INIT(&core->rbtree);
200 		core->sharecnt = 1;
201 		chain->core = core;
202 		ccms_cst_init(&core->cst, chain);
203 	} else {
204 		atomic_add_int(&core->sharecnt, 1);
205 		chain->core = core;
206 	}
207 }
208 
209 /*
210  * Deallocate a chain after the caller has transitioned its refs to 0
211  * and disassociated it from its parent.
212  *
213  * We must drop sharecnt on the core (if any) and handle its 1->0 transition
214  * too.
215  */
216 static void
217 hammer2_chain_dealloc(hammer2_chain_t *chain)
218 {
219 	hammer2_chain_core_t *core;
220 
221 	/*
222 	 * Chain's flags are expected to be sane.
223 	 */
224 	KKASSERT((chain->flags & (HAMMER2_CHAIN_MOVED |
225 				  HAMMER2_CHAIN_MODIFIED |
226 				  HAMMER2_CHAIN_ONRBTREE)) == 0);
227 	KKASSERT(chain->duplink == NULL);
228 
229 	/*
230 	 * Disconnect chain->core from chain and free core if it was the
231 	 * last core.  If any children are present in the core's rbtree
232 	 * they cannot have a pointer to our chain by definition because
233 	 * our chain's refs have dropped to 0.  If this is the last sharecnt
234 	 * on core, then core's rbtree must be empty by definition.
235 	 */
236 	if ((core = chain->core) != NULL) {
237 		/*
238 		 * Other chains may reference the same core so the core's
239 		 * spinlock is needed to safely disconnect it.
240 		 */
241 		spin_lock(&core->cst.spin);
242 		chain->core = NULL;
243 		if (atomic_fetchadd_int(&core->sharecnt, -1) == 1) {
244 			spin_unlock(&core->cst.spin);
245 			KKASSERT(RB_EMPTY(&core->rbtree));
246 			KKASSERT(core->cst.count == 0);
247 			KKASSERT(core->cst.upgrade == 0);
248 			kfree(core, chain->hmp->mchain);
249 		} else {
250 			spin_unlock(&core->cst.spin);
251 		}
252 		core = NULL;		/* safety */
253 	}
254 
255 	/*
256 	 * Finally free the structure and return for possible recursion.
257 	 */
258 	hammer2_chain_free(chain);
259 }
260 
261 /*
262  * Free a disconnected chain element.
263  */
264 void
265 hammer2_chain_free(hammer2_chain_t *chain)
266 {
267 	hammer2_mount_t *hmp = chain->hmp;
268 
269 	switch(chain->bref.type) {
270 	case HAMMER2_BREF_TYPE_VOLUME:
271 		chain->data = NULL;
272 		break;
273 	case HAMMER2_BREF_TYPE_INODE:
274 		if (chain->data) {
275 			kfree(chain->data, hmp->minode);
276 			chain->data = NULL;
277 		}
278 		break;
279 	default:
280 		KKASSERT(chain->data == NULL);
281 		break;
282 	}
283 
284 	KKASSERT(chain->core == NULL);
285 	KKASSERT(chain->bp == NULL);
286 	chain->hmp = NULL;
287 
288 	if (chain->flags & HAMMER2_CHAIN_ALLOCATED)
289 		kfree(chain, hmp->mchain);
290 }
291 
292 /*
293  * Add a reference to a chain element, preventing its destruction.
294  */
295 void
296 hammer2_chain_ref(hammer2_chain_t *chain)
297 {
298 	atomic_add_int(&chain->refs, 1);
299 }
300 
301 /*
302  * Drop the caller's reference to the chain.  When the ref count drops to
303  * zero this function will disassociate the chain from its parent and
304  * deallocate it, then recursely drop the parent using the implied ref
305  * from the chain's chain->parent.
306  *
307  * WARNING! Just because we are able to deallocate a chain doesn't mean
308  *	    that chain->core->rbtree is empty.  There can still be a sharecnt
309  *	    on chain->core and RBTREE entries that refer to different parents.
310  */
311 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
312 
313 void
314 hammer2_chain_drop(hammer2_chain_t *chain)
315 {
316 	u_int refs;
317 
318 	while (chain) {
319 		refs = chain->refs;
320 		cpu_ccfence();
321 		KKASSERT(refs > 0);
322 
323 		if (refs == 1) {
324 			if (chain->parent) {
325 				chain = hammer2_chain_lastdrop(chain);
326 				/* recursively drop parent or retry same */
327 			} else if (atomic_cmpset_int(&chain->refs, 1, 0)) {
328 				hammer2_chain_dealloc(chain);
329 				chain = NULL;
330 				/* no parent to recurse on */
331 			} else {
332 				/* retry the same chain */
333 			}
334 		} else {
335 			if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
336 				break;
337 			/* retry the same chain */
338 		}
339 	}
340 }
341 
342 /*
343  * Safe handling of the 1->0 transition on chain when the chain has a
344  * parent.
345  *
346  * NOTE: A chain can only be removed from its parent core's RBTREE on
347  *	 the 1->0 transition by definition.  No other code is allowed
348  *	 to remove chain from its RBTREE, so no race is possible.
349  */
350 static
351 hammer2_chain_t *
352 hammer2_chain_lastdrop(hammer2_chain_t *chain)
353 {
354 	hammer2_chain_t *parent;
355 	hammer2_chain_core_t *parent_core;
356 
357 	parent = chain->parent;
358 	parent_core = parent->core;
359 	KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
360 
361 	spin_lock(&parent_core->cst.spin);
362 	if (atomic_cmpset_int(&chain->refs, 1, 0)) {
363 		RB_REMOVE(hammer2_chain_tree, &parent_core->rbtree, chain);
364 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
365 		chain->parent = NULL;	/* NULL field, must drop implied ref */
366 		spin_unlock(&parent_core->cst.spin);
367 		if (chain->duplink) {
368 			hammer2_chain_drop(chain->duplink);
369 			chain->duplink = NULL;
370 		}
371 		hammer2_chain_dealloc(chain);
372 		chain = parent;		/* recursively drop parent */
373 	} else {
374 		spin_unlock(&parent_core->cst.spin);
375 	}
376 	return (chain);
377 }
378 
379 /*
380  * Ref and lock a chain element, acquiring its data with I/O if necessary,
381  * and specify how you would like the data to be resolved.
382  *
383  * Returns 0 on success or an error code if the data could not be acquired.
384  * The chain element is locked either way.
385  *
386  * The lock is allowed to recurse, multiple locking ops will aggregate
387  * the requested resolve types.  Once data is assigned it will not be
388  * removed until the last unlock.
389  *
390  * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
391  *			   (typically used to avoid device/logical buffer
392  *			    aliasing for data)
393  *
394  * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
395  *			   the INITIAL-create state (indirect blocks only).
396  *
397  *			   Do not resolve data elements for DATA chains.
398  *			   (typically used to avoid device/logical buffer
399  *			    aliasing for data)
400  *
401  * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
402  *
403  * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
404  *			   it will be locked exclusive.
405  *
406  * NOTE: Embedded elements (volume header, inodes) are always resolved
407  *	 regardless.
408  *
409  * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
410  *	 element will instantiate and zero its buffer, and flush it on
411  *	 release.
412  *
413  * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
414  *	 so as not to instantiate a device buffer, which could alias against
415  *	 a logical file buffer.  However, if ALWAYS is specified the
416  *	 device buffer will be instantiated anyway.
417  *
418  * WARNING! If data must be fetched a shared lock will temporarily be
419  *	    upgraded to exclusive.  However, a deadlock can occur if
420  *	    the caller owns more than one shared lock.
421  */
422 int
423 hammer2_chain_lock(hammer2_chain_t *chain, int how)
424 {
425 	hammer2_mount_t *hmp;
426 	hammer2_chain_core_t *core;
427 	hammer2_blockref_t *bref;
428 	hammer2_off_t pbase;
429 	hammer2_off_t peof;
430 	ccms_state_t ostate;
431 	size_t boff;
432 	size_t bbytes;
433 	int error;
434 	char *bdata;
435 
436 	/*
437 	 * Ref and lock the element.  Recursive locks are allowed.
438 	 */
439 	if ((how & HAMMER2_RESOLVE_NOREF) == 0)
440 		hammer2_chain_ref(chain);
441 	hmp = chain->hmp;
442 	KKASSERT(hmp != NULL);
443 
444 	/*
445 	 * Get the appropriate lock.
446 	 */
447 	core = chain->core;
448 	if (how & HAMMER2_RESOLVE_SHARED)
449 		ccms_thread_lock(&core->cst, CCMS_STATE_SHARED);
450 	else
451 		ccms_thread_lock(&core->cst, CCMS_STATE_EXCLUSIVE);
452 
453 	/*
454 	 * If we already have a valid data pointer no further action is
455 	 * necessary.
456 	 */
457 	if (chain->data)
458 		return (0);
459 
460 	/*
461 	 * Do we have to resolve the data?
462 	 */
463 	switch(how & HAMMER2_RESOLVE_MASK) {
464 	case HAMMER2_RESOLVE_NEVER:
465 		return(0);
466 	case HAMMER2_RESOLVE_MAYBE:
467 		if (chain->flags & HAMMER2_CHAIN_INITIAL)
468 			return(0);
469 		if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
470 			return(0);
471 		if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
472 			return(0);
473 		/* fall through */
474 	case HAMMER2_RESOLVE_ALWAYS:
475 		break;
476 	}
477 
478 	/*
479 	 * Upgrade to an exclusive lock so we can safely manipulate the
480 	 * buffer cache.  If another thread got to it before us we
481 	 * can just return.
482 	 */
483 	ostate = ccms_thread_lock_upgrade(&core->cst);
484 	if (chain->data) {
485 		ccms_thread_lock_restore(&core->cst, ostate);
486 		return (0);
487 	}
488 
489 	/*
490 	 * We must resolve to a device buffer, either by issuing I/O or
491 	 * by creating a zero-fill element.  We do not mark the buffer
492 	 * dirty when creating a zero-fill element (the hammer2_chain_modify()
493 	 * API must still be used to do that).
494 	 *
495 	 * The device buffer is variable-sized in powers of 2 down
496 	 * to HAMMER2_MINALLOCSIZE (typically 1K).  A 64K physical storage
497 	 * chunk always contains buffers of the same size. (XXX)
498 	 *
499 	 * The minimum physical IO size may be larger than the variable
500 	 * block size.
501 	 */
502 	bref = &chain->bref;
503 
504 	if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
505 		bbytes = HAMMER2_MINIOSIZE;
506 	pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
507 	peof = (pbase + HAMMER2_PBUFSIZE64) & ~HAMMER2_PBUFMASK64;
508 	boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
509 	KKASSERT(pbase != 0);
510 
511 	/*
512 	 * The getblk() optimization can only be used on newly created
513 	 * elements if the physical block size matches the request.
514 	 */
515 	if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
516 	    chain->bytes == bbytes) {
517 		chain->bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
518 		error = 0;
519 	} else if (hammer2_cluster_enable) {
520 		error = cluster_read(hmp->devvp, peof, pbase, bbytes,
521 				     HAMMER2_PBUFSIZE, HAMMER2_PBUFSIZE,
522 				     &chain->bp);
523 	} else {
524 		error = bread(hmp->devvp, pbase, bbytes, &chain->bp);
525 	}
526 
527 	if (error) {
528 		kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
529 			(intmax_t)pbase, error);
530 		bqrelse(chain->bp);
531 		chain->bp = NULL;
532 		ccms_thread_lock_restore(&core->cst, ostate);
533 		return (error);
534 	}
535 
536 	/*
537 	 * Zero the data area if the chain is in the INITIAL-create state.
538 	 * Mark the buffer for bdwrite().
539 	 */
540 	bdata = (char *)chain->bp->b_data + boff;
541 	if (chain->flags & HAMMER2_CHAIN_INITIAL) {
542 		bzero(bdata, chain->bytes);
543 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
544 	}
545 
546 	/*
547 	 * Setup the data pointer, either pointing it to an embedded data
548 	 * structure and copying the data from the buffer, or pointing it
549 	 * into the buffer.
550 	 *
551 	 * The buffer is not retained when copying to an embedded data
552 	 * structure in order to avoid potential deadlocks or recursions
553 	 * on the same physical buffer.
554 	 */
555 	switch (bref->type) {
556 	case HAMMER2_BREF_TYPE_VOLUME:
557 		/*
558 		 * Copy data from bp to embedded buffer
559 		 */
560 		panic("hammer2_chain_lock: called on unresolved volume header");
561 #if 0
562 		/* NOT YET */
563 		KKASSERT(pbase == 0);
564 		KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
565 		bcopy(bdata, &hmp->voldata, chain->bytes);
566 		chain->data = (void *)&hmp->voldata;
567 		bqrelse(chain->bp);
568 		chain->bp = NULL;
569 #endif
570 		break;
571 	case HAMMER2_BREF_TYPE_INODE:
572 		/*
573 		 * Copy data from bp to embedded buffer, do not retain the
574 		 * device buffer.
575 		 */
576 		KKASSERT(chain->bytes == sizeof(chain->data->ipdata));
577 		chain->data = kmalloc(sizeof(chain->data->ipdata),
578 				      hmp->minode, M_WAITOK | M_ZERO);
579 		bcopy(bdata, &chain->data->ipdata, chain->bytes);
580 		bqrelse(chain->bp);
581 		chain->bp = NULL;
582 		break;
583 	case HAMMER2_BREF_TYPE_INDIRECT:
584 	case HAMMER2_BREF_TYPE_DATA:
585 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
586 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
587 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
588 	default:
589 		/*
590 		 * Point data at the device buffer and leave bp intact.
591 		 */
592 		chain->data = (void *)bdata;
593 		break;
594 	}
595 
596 	/*
597 	 * Make sure the bp is not specifically owned by this thread before
598 	 * restoring to a possibly shared lock, so another hammer2 thread
599 	 * can release it.
600 	 */
601 	if (chain->bp)
602 		BUF_KERNPROC(chain->bp);
603 	ccms_thread_lock_restore(&core->cst, ostate);
604 	return (0);
605 }
606 
607 /*
608  * Unlock and deref a chain element.
609  *
610  * On the last lock release any non-embedded data (chain->bp) will be
611  * retired.
612  */
613 void
614 hammer2_chain_unlock(hammer2_chain_t *chain)
615 {
616 	hammer2_chain_core_t *core = chain->core;
617 	long *counterp;
618 
619 	/*
620 	 * Release the CST lock but with a special 1->0 transition case
621 	 * to also drop the refs on chain.  Multiple CST locks only
622 	 *
623 	 * Returns non-zero if lock references remain.  When zero is
624 	 * returned the last lock reference is retained and any shared
625 	 * lock is upgraded to an exclusive lock for final disposition.
626 	 */
627 	if (ccms_thread_unlock_zero(&core->cst)) {
628 		KKASSERT(chain->refs > 1);
629 		atomic_add_int(&chain->refs, -1);
630 		return;
631 	}
632 
633 	/*
634 	 * Shortcut the case if the data is embedded or not resolved.
635 	 *
636 	 * Do NOT NULL out chain->data (e.g. inode data), it might be
637 	 * dirty.
638 	 *
639 	 * The DIRTYBP flag is non-applicable in this situation and can
640 	 * be cleared to keep the flags state clean.
641 	 */
642 	if (chain->bp == NULL) {
643 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
644 		ccms_thread_unlock(&core->cst);
645 		hammer2_chain_drop(chain);
646 		return;
647 	}
648 
649 	/*
650 	 * Statistics
651 	 */
652 	if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
653 		;
654 	} else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
655 		switch(chain->bref.type) {
656 		case HAMMER2_BREF_TYPE_DATA:
657 			counterp = &hammer2_ioa_file_write;
658 			break;
659 		case HAMMER2_BREF_TYPE_INODE:
660 			counterp = &hammer2_ioa_meta_write;
661 			break;
662 		case HAMMER2_BREF_TYPE_INDIRECT:
663 			counterp = &hammer2_ioa_indr_write;
664 			break;
665 		case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
666 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
667 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
668 			counterp = &hammer2_ioa_fmap_write;
669 			break;
670 		default:
671 			counterp = &hammer2_ioa_volu_write;
672 			break;
673 		}
674 		++*counterp;
675 	} else {
676 		switch(chain->bref.type) {
677 		case HAMMER2_BREF_TYPE_DATA:
678 			counterp = &hammer2_iod_file_write;
679 			break;
680 		case HAMMER2_BREF_TYPE_INODE:
681 			counterp = &hammer2_iod_meta_write;
682 			break;
683 		case HAMMER2_BREF_TYPE_INDIRECT:
684 			counterp = &hammer2_iod_indr_write;
685 			break;
686 		case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
687 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
688 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
689 			counterp = &hammer2_iod_fmap_write;
690 			break;
691 		default:
692 			counterp = &hammer2_iod_volu_write;
693 			break;
694 		}
695 		++*counterp;
696 	}
697 
698 	/*
699 	 * Clean out the bp.
700 	 *
701 	 * If a device buffer was used for data be sure to destroy the
702 	 * buffer when we are done to avoid aliases (XXX what about the
703 	 * underlying VM pages?).
704 	 *
705 	 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
706 	 *	 is possible.
707 	 */
708 	if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
709 		chain->bp->b_flags |= B_RELBUF;
710 
711 	/*
712 	 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
713 	 * or not.  The flag will get re-set when chain_modify() is called,
714 	 * even if MODIFIED is already set, allowing the OS to retire the
715 	 * buffer independent of a hammer2 flus.
716 	 */
717 	chain->data = NULL;
718 	if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
719 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
720 		if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
721 			atomic_clear_int(&chain->flags,
722 					 HAMMER2_CHAIN_IOFLUSH);
723 			chain->bp->b_flags |= B_RELBUF;
724 			cluster_awrite(chain->bp);
725 		} else {
726 			chain->bp->b_flags |= B_CLUSTEROK;
727 			bdwrite(chain->bp);
728 		}
729 	} else {
730 		if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
731 			atomic_clear_int(&chain->flags,
732 					 HAMMER2_CHAIN_IOFLUSH);
733 			chain->bp->b_flags |= B_RELBUF;
734 			brelse(chain->bp);
735 		} else {
736 			/* bp might still be dirty */
737 			bqrelse(chain->bp);
738 		}
739 	}
740 	chain->bp = NULL;
741 	ccms_thread_unlock(&core->cst);
742 	hammer2_chain_drop(chain);
743 }
744 
745 /*
746  * Resize the chain's physical storage allocation in-place.  This may
747  * replace the passed-in chain with a new chain.
748  *
749  * Chains can be resized smaller without reallocating the storage.
750  * Resizing larger will reallocate the storage.
751  *
752  * Must be passed an exclusively locked parent and chain, returns a new
753  * exclusively locked chain at the same index and unlocks the old chain.
754  * Flushes the buffer if necessary.
755  *
756  * If you want the resize code to copy the data to the new block then the
757  * caller should lock the chain RESOLVE_MAYBE or RESOLVE_ALWAYS.
758  *
759  * If the caller already holds a logical buffer containing the data and
760  * intends to bdwrite() that buffer resolve with RESOLVE_NEVER.  The resize
761  * operation will then not copy the (stale) data from the media.
762  *
763  * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
764  * to avoid instantiating a device buffer that conflicts with the vnode
765  * data buffer.
766  *
767  * XXX flags currently ignored, uses chain->bp to detect data/no-data.
768  * XXX return error if cannot resize.
769  */
770 void
771 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
772 		     struct buf *bp,
773 		     hammer2_chain_t *parent, hammer2_chain_t **chainp,
774 		     int nradix, int flags)
775 {
776 	hammer2_mount_t *hmp = trans->hmp;
777 	hammer2_chain_t *chain = *chainp;
778 	struct buf *nbp;
779 	hammer2_off_t pbase;
780 	size_t obytes;
781 	size_t nbytes;
782 	size_t bbytes;
783 	int boff;
784 	char *bdata;
785 	int error;
786 
787 	/*
788 	 * Only data and indirect blocks can be resized for now.
789 	 * (The volu root, inodes, and freemap elements use a fixed size).
790 	 */
791 	KKASSERT(chain != &hmp->vchain);
792 	KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
793 		 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
794 
795 	/*
796 	 * Nothing to do if the element is already the proper size
797 	 */
798 	obytes = chain->bytes;
799 	nbytes = 1U << nradix;
800 	if (obytes == nbytes)
801 		return;
802 
803 	/*
804 	 * Delete the old chain and duplicate it at the same (parent, index),
805 	 * returning a new chain.  This allows the old chain to still be
806 	 * used by the flush code.  Duplication occurs in-place.
807 	 *
808 	 * NOTE: If we are not crossing a synchronization point the
809 	 *	 duplication code will simply reuse the existing chain
810 	 *	 structure.
811 	 */
812 	hammer2_chain_delete(trans, parent, chain);
813 	hammer2_chain_duplicate(trans, parent, chain->index, &chain);
814 
815 	/*
816 	 * Set MODIFIED and add a chain ref to prevent destruction.  Both
817 	 * modified flags share the same ref.  (duplicated chains do not
818 	 * start out MODIFIED unless possibly if the duplication code
819 	 * decided to reuse the existing chain as-is).
820 	 *
821 	 * If the chain is already marked MODIFIED then we can safely
822 	 * return the previous allocation to the pool without having to
823 	 * worry about snapshots.
824 	 */
825 	if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
826 		atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
827 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
828 		hammer2_chain_ref(chain);
829 	} else {
830 		hammer2_freemap_free(hmp, chain->bref.data_off,
831 				     chain->bref.type);
832 	}
833 
834 	/*
835 	 * Relocate the block, even if making it smaller (because different
836 	 * block sizes may be in different regions).
837 	 */
838 	chain->bref.data_off = hammer2_freemap_alloc(hmp, chain->bref.type,
839 						     nbytes);
840 	chain->bytes = nbytes;
841 	/*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
842 
843 	/*
844 	 * The device buffer may be larger than the allocation size.
845 	 */
846 	if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
847 		bbytes = HAMMER2_MINIOSIZE;
848 	pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
849 	boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
850 
851 	/*
852 	 * Only copy the data if resolved, otherwise the caller is
853 	 * responsible.
854 	 */
855 	if (chain->bp) {
856 		KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
857 			 chain->bref.type == HAMMER2_BREF_TYPE_DATA);
858 		KKASSERT(chain != &hmp->vchain);	/* safety */
859 
860 		/*
861 		 * The getblk() optimization can only be used if the
862 		 * physical block size matches the request.
863 		 */
864 		if (nbytes == bbytes) {
865 			nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
866 			error = 0;
867 		} else {
868 			error = bread(hmp->devvp, pbase, bbytes, &nbp);
869 			KKASSERT(error == 0);
870 		}
871 		bdata = (char *)nbp->b_data + boff;
872 
873 		/*
874 		 * chain->bp and chain->data represent the on-disk version
875 		 * of the data, where as the passed-in bp is usually a
876 		 * more up-to-date logical buffer.  However, there is no
877 		 * need to synchronize the more up-to-date data in (bp)
878 		 * as it will do that on its own when it flushes.
879 		 */
880 		if (nbytes < obytes) {
881 			bcopy(chain->data, bdata, nbytes);
882 		} else {
883 			bcopy(chain->data, bdata, obytes);
884 			bzero(bdata + obytes, nbytes - obytes);
885 		}
886 
887 		/*
888 		 * NOTE: The INITIAL state of the chain is left intact.
889 		 *	 We depend on hammer2_chain_modify() to do the
890 		 *	 right thing.
891 		 *
892 		 * NOTE: We set B_NOCACHE to throw away the previous bp and
893 		 *	 any VM backing store, even if it was dirty.
894 		 *	 Otherwise we run the risk of a logical/device
895 		 *	 conflict on reallocation.
896 		 */
897 		chain->bp->b_flags |= B_RELBUF | B_NOCACHE;
898 		brelse(chain->bp);
899 		chain->bp = nbp;
900 		chain->data = (void *)bdata;
901 		hammer2_chain_modify(trans, chain, 0);
902 	}
903 
904 	/*
905 	 * Make sure the chain is marked MOVED and SUBMOD is set in the
906 	 * parent(s) so the adjustments are picked up by flush.
907 	 */
908 	if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
909 		hammer2_chain_ref(chain);
910 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
911 	}
912 	hammer2_chain_parent_setsubmod(chain);
913 }
914 
915 /*
916  * Convert a locked chain that was retrieved read-only to read-write.
917  *
918  * If not already marked modified a new physical block will be allocated
919  * and assigned to the bref.
920  *
921  * If already modified and the new modification crosses a synchronization
922  * point the chain is duplicated in order to allow the flush to synchronize
923  * the old chain.  The new chain replaces the old.
924  *
925  * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
926  *		     level or the COW operation will not work.
927  *
928  * Data blocks	   - The chain is usually locked RESOLVE_NEVER so as not to
929  *		     run the data through the device buffers.
930  *
931  * This function may return a different chain than was passed, in which case
932  * the old chain will be unlocked and the new chain will be locked.
933  */
934 void
935 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t *chain, int flags)
936 {
937 	hammer2_mount_t *hmp = trans->hmp;
938 	hammer2_off_t pbase;
939 	struct buf *nbp;
940 	int error;
941 	size_t bbytes;
942 	size_t boff;
943 	void *bdata;
944 
945 	/*
946 	 * modify_tid is only update for primary modifications, not for
947 	 * propagated brefs.  mirror_tid will be updated regardless during
948 	 * the flush, no need to set it here.
949 	 */
950 	if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
951 		chain->bref.modify_tid = trans->sync_tid;
952 
953 	/*
954 	 * If the chain is already marked MODIFIED we can just return.
955 	 *
956 	 * However, it is possible that a prior lock/modify sequence
957 	 * retired the buffer.  During this lock/modify sequence MODIFIED
958 	 * may still be set but the buffer could wind up clean.  Since
959 	 * the caller is going to modify the buffer further we have to
960 	 * be sure that DIRTYBP is set again.
961 	 */
962 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
963 		if ((flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
964 		    chain->bp == NULL) {
965 			goto skip1;
966 		}
967 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
968 		return;
969 	}
970 
971 	/*
972 	 * Set MODIFIED and add a chain ref to prevent destruction.  Both
973 	 * modified flags share the same ref.
974 	 */
975 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
976 	hammer2_chain_ref(chain);
977 
978 	/*
979 	 * We must allocate the copy-on-write block.
980 	 *
981 	 * If the data is embedded no other action is required.
982 	 *
983 	 * If the data is not embedded we acquire and clear the
984 	 * new block.  If chain->data is not NULL we then do the
985 	 * copy-on-write.  chain->data will then be repointed to the new
986 	 * buffer and the old buffer will be released.
987 	 *
988 	 * For newly created elements with no prior allocation we go
989 	 * through the copy-on-write steps except without the copying part.
990 	 */
991 	if (chain != &hmp->vchain) {
992 		if ((hammer2_debug & 0x0001) &&
993 		    (chain->bref.data_off & HAMMER2_OFF_MASK)) {
994 			kprintf("Replace %d\n", chain->bytes);
995 		}
996 		chain->bref.data_off =
997 			hammer2_freemap_alloc(hmp, chain->bref.type,
998 					      chain->bytes);
999 		/* XXX failed allocation */
1000 	}
1001 
1002 	/*
1003 	 * If data instantiation is optional and the chain has no current
1004 	 * data association (typical for DATA and newly-created INDIRECT
1005 	 * elements), don't instantiate the buffer now.
1006 	 */
1007 	if ((flags & HAMMER2_MODIFY_OPTDATA) && chain->bp == NULL)
1008 		goto skip2;
1009 
1010 skip1:
1011 	/*
1012 	 * Setting the DIRTYBP flag will cause the buffer to be dirtied or
1013 	 * written-out on unlock.  This bit is independent of the MODIFIED
1014 	 * bit because the chain may still need meta-data adjustments done
1015 	 * by virtue of MODIFIED for its parent, and the buffer can be
1016 	 * flushed out (possibly multiple times) by the OS before that.
1017 	 *
1018 	 * Clearing the INITIAL flag (for indirect blocks) indicates that
1019 	 * a zero-fill buffer has been instantiated.
1020 	 */
1021 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1022 	atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1023 
1024 	/*
1025 	 * We currently should never instantiate a device buffer for a
1026 	 * file data chain.  (We definitely can for a freemap chain).
1027 	 */
1028 	KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
1029 
1030 	/*
1031 	 * Execute COW operation
1032 	 */
1033 	switch(chain->bref.type) {
1034 	case HAMMER2_BREF_TYPE_VOLUME:
1035 	case HAMMER2_BREF_TYPE_INODE:
1036 		/*
1037 		 * The data is embedded, no copy-on-write operation is
1038 		 * needed.
1039 		 */
1040 		KKASSERT(chain->bp == NULL);
1041 		break;
1042 	case HAMMER2_BREF_TYPE_DATA:
1043 	case HAMMER2_BREF_TYPE_INDIRECT:
1044 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1045 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1046 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1047 		/*
1048 		 * Perform the copy-on-write operation
1049 		 */
1050 		KKASSERT(chain != &hmp->vchain);	/* safety */
1051 		/*
1052 		 * The device buffer may be larger than the allocation size.
1053 		 */
1054 		if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
1055 			bbytes = HAMMER2_MINIOSIZE;
1056 		pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
1057 		boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
1058 
1059 		/*
1060 		 * The getblk() optimization can only be used if the
1061 		 * physical block size matches the request.
1062 		 */
1063 		if (chain->bytes == bbytes) {
1064 			nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
1065 			error = 0;
1066 		} else {
1067 			error = bread(hmp->devvp, pbase, bbytes, &nbp);
1068 			KKASSERT(error == 0);
1069 		}
1070 		bdata = (char *)nbp->b_data + boff;
1071 
1072 		/*
1073 		 * Copy or zero-fill on write depending on whether
1074 		 * chain->data exists or not.
1075 		 */
1076 		if (chain->data) {
1077 			bcopy(chain->data, bdata, chain->bytes);
1078 			KKASSERT(chain->bp != NULL);
1079 		} else {
1080 			bzero(bdata, chain->bytes);
1081 		}
1082 		if (chain->bp) {
1083 			chain->bp->b_flags |= B_RELBUF;
1084 			brelse(chain->bp);
1085 		}
1086 		chain->bp = nbp;
1087 		chain->data = bdata;
1088 		break;
1089 	default:
1090 		panic("hammer2_chain_modify: illegal non-embedded type %d",
1091 		      chain->bref.type);
1092 		break;
1093 
1094 	}
1095 skip2:
1096 	if ((flags & HAMMER2_MODIFY_NOSUB) == 0)
1097 		hammer2_chain_parent_setsubmod(chain);
1098 }
1099 
1100 /*
1101  * Mark the volume as having been modified.  This short-cut version
1102  * does not have to lock the volume's chain, which allows the ioctl
1103  * code to make adjustments to connections without deadlocking.  XXX
1104  *
1105  * No ref is made on vchain when flagging it MODIFIED.
1106  */
1107 void
1108 hammer2_modify_volume(hammer2_mount_t *hmp)
1109 {
1110 	hammer2_voldata_lock(hmp);
1111 	hammer2_voldata_unlock(hmp, 1);
1112 }
1113 
1114 /*
1115  * Locate an in-memory chain.  The parent must be locked.  The in-memory
1116  * chain is returned with a reference and without a lock, or NULL
1117  * if not found.
1118  *
1119  * NOTE: A chain on-media might exist for this index when NULL is returned.
1120  *
1121  * NOTE: Can only be used to locate chains which have not been deleted.
1122  */
1123 hammer2_chain_t *
1124 hammer2_chain_find(hammer2_chain_t *parent, int index)
1125 {
1126 	hammer2_chain_t dummy;
1127 	hammer2_chain_t *chain;
1128 
1129 	dummy.flags = 0;
1130 	dummy.index = index;
1131 	dummy.delete_tid = HAMMER2_MAX_TID;
1132 	spin_lock(&parent->core->cst.spin);
1133 	chain = RB_FIND(hammer2_chain_tree, &parent->core->rbtree, &dummy);
1134 	if (chain)
1135 		hammer2_chain_ref(chain);
1136 	spin_unlock(&parent->core->cst.spin);
1137 
1138 	return (chain);
1139 }
1140 
1141 /*
1142  * Return a locked chain structure with all associated data acquired.
1143  * (if LOOKUP_NOLOCK is requested the returned chain is only referenced).
1144  *
1145  * Caller must hold the parent locked shared or exclusive since we may
1146  * need the parent's bref array to find our block.
1147  *
1148  * The returned child is locked as requested.  If NOLOCK, the returned
1149  * child is still at least referenced.
1150  */
1151 hammer2_chain_t *
1152 hammer2_chain_get(hammer2_chain_t *parent, int index, int flags)
1153 {
1154 	hammer2_blockref_t *bref;
1155 	hammer2_mount_t *hmp = parent->hmp;
1156 	hammer2_chain_t *chain;
1157 	hammer2_chain_t dummy;
1158 	int how;
1159 
1160 	/*
1161 	 * Figure out how to lock.  MAYBE can be used to optimized
1162 	 * the initial-create state for indirect blocks.
1163 	 */
1164 	if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
1165 		how = HAMMER2_RESOLVE_NEVER;
1166 	else
1167 		how = HAMMER2_RESOLVE_MAYBE;
1168 	if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1169 		how |= HAMMER2_RESOLVE_SHARED;
1170 
1171 retry:
1172 	/*
1173 	 * First see if we have a (possibly modified) chain element cached
1174 	 * for this (parent, index).  Acquire the data if necessary.
1175 	 *
1176 	 * If chain->data is non-NULL the chain should already be marked
1177 	 * modified.
1178 	 */
1179 	dummy.flags = 0;
1180 	dummy.index = index;
1181 	dummy.delete_tid = HAMMER2_MAX_TID;
1182 	spin_lock(&parent->core->cst.spin);
1183 	chain = RB_FIND(hammer2_chain_tree, &parent->core->rbtree, &dummy);
1184 	if (chain) {
1185 		hammer2_chain_ref(chain);
1186 		spin_unlock(&parent->core->cst.spin);
1187 		if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0)
1188 			hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
1189 		return(chain);
1190 	}
1191 	spin_unlock(&parent->core->cst.spin);
1192 
1193 	/*
1194 	 * The parent chain must not be in the INITIAL state.
1195 	 */
1196 	if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1197 		panic("hammer2_chain_get: Missing bref(1)");
1198 		/* NOT REACHED */
1199 	}
1200 
1201 	/*
1202 	 * No RBTREE entry found, lookup the bref and issue I/O (switch on
1203 	 * the parent's bref to determine where and how big the array is).
1204 	 */
1205 	switch(parent->bref.type) {
1206 	case HAMMER2_BREF_TYPE_INODE:
1207 		KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1208 		bref = &parent->data->ipdata.u.blockset.blockref[index];
1209 		break;
1210 	case HAMMER2_BREF_TYPE_INDIRECT:
1211 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1212 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1213 		KKASSERT(parent->data != NULL);
1214 		KKASSERT(index >= 0 &&
1215 			 index < parent->bytes / sizeof(hammer2_blockref_t));
1216 		bref = &parent->data->npdata.blockref[index];
1217 		break;
1218 	case HAMMER2_BREF_TYPE_VOLUME:
1219 		KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1220 		bref = &hmp->voldata.sroot_blockset.blockref[index];
1221 		break;
1222 	default:
1223 		bref = NULL;
1224 		panic("hammer2_chain_get: unrecognized blockref type: %d",
1225 		      parent->bref.type);
1226 	}
1227 	if (bref->type == 0) {
1228 		panic("hammer2_chain_get: Missing bref(2)");
1229 		/* NOT REACHED */
1230 	}
1231 
1232 	/*
1233 	 * Allocate a chain structure representing the existing media
1234 	 * entry.  Resulting chain has one ref and is not locked.
1235 	 *
1236 	 * The locking operation we do later will issue I/O to read it.
1237 	 */
1238 	chain = hammer2_chain_alloc(hmp, bref);
1239 	hammer2_chain_core_alloc(chain, NULL);	/* ref'd chain returned */
1240 
1241 	/*
1242 	 * Link the chain into its parent.  A spinlock is required to safely
1243 	 * access the RBTREE, and it is possible to collide with another
1244 	 * hammer2_chain_get() operation because the caller might only hold
1245 	 * a shared lock on the parent.
1246 	 */
1247 	KKASSERT(parent->refs > 0);
1248 	spin_lock(&parent->core->cst.spin);
1249 	chain->parent = parent;
1250 	chain->index = index;
1251 	if (RB_INSERT(hammer2_chain_tree, &parent->core->rbtree, chain)) {
1252 		chain->parent = NULL;
1253 		chain->index = -1;
1254 		spin_unlock(&parent->core->cst.spin);
1255 		hammer2_chain_drop(chain);
1256 		goto retry;
1257 	}
1258 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
1259 	hammer2_chain_ref(parent);		/* chain->parent ref */
1260 	spin_unlock(&parent->core->cst.spin);
1261 
1262 	/*
1263 	 * Our new chain is referenced but NOT locked.  Lock the chain
1264 	 * below.  The locking operation also resolves its data.
1265 	 *
1266 	 * If NOLOCK is set the release will release the one-and-only lock.
1267 	 */
1268 	if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1269 		hammer2_chain_lock(chain, how);	/* recusive lock */
1270 		hammer2_chain_drop(chain);	/* excess ref */
1271 	}
1272 	return (chain);
1273 }
1274 
1275 /*
1276  * Lookup initialization/completion API
1277  */
1278 hammer2_chain_t *
1279 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1280 {
1281 	if (flags & HAMMER2_LOOKUP_SHARED) {
1282 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1283 					   HAMMER2_RESOLVE_SHARED);
1284 	} else {
1285 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1286 	}
1287 	return (parent);
1288 }
1289 
1290 void
1291 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1292 {
1293 	if (parent)
1294 		hammer2_chain_unlock(parent);
1295 }
1296 
1297 
1298 /*
1299  * Locate any key between key_beg and key_end inclusive.  (*parentp)
1300  * typically points to an inode but can also point to a related indirect
1301  * block and this function will recurse upwards and find the inode again.
1302  *
1303  * WARNING!  THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER!  ANY KEY
1304  *	     WITHIN THE RANGE CAN BE RETURNED.  HOWEVER, AN ITERATION
1305  *	     WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN.
1306  *
1307  * (*parentp) must be exclusively locked and referenced and can be an inode
1308  * or an existing indirect block within the inode.
1309  *
1310  * On return (*parentp) will be modified to point at the deepest parent chain
1311  * element encountered during the search, as a helper for an insertion or
1312  * deletion.   The new (*parentp) will be locked and referenced and the old
1313  * will be unlocked and dereferenced (no change if they are both the same).
1314  *
1315  * The matching chain will be returned exclusively locked.  If NOLOCK is
1316  * requested the chain will be returned only referenced.
1317  *
1318  * NULL is returned if no match was found, but (*parentp) will still
1319  * potentially be adjusted.
1320  *
1321  * This function will also recurse up the chain if the key is not within the
1322  * current parent's range.  (*parentp) can never be set to NULL.  An iteration
1323  * can simply allow (*parentp) to float inside the loop.
1324  */
1325 hammer2_chain_t *
1326 hammer2_chain_lookup(hammer2_chain_t **parentp,
1327 		     hammer2_key_t key_beg, hammer2_key_t key_end,
1328 		     int flags)
1329 {
1330 	hammer2_mount_t *hmp;
1331 	hammer2_chain_t *parent;
1332 	hammer2_chain_t *chain;
1333 	hammer2_chain_t *tmp;
1334 	hammer2_blockref_t *base;
1335 	hammer2_blockref_t *bref;
1336 	hammer2_key_t scan_beg;
1337 	hammer2_key_t scan_end;
1338 	int count = 0;
1339 	int i;
1340 	int how_always = HAMMER2_RESOLVE_ALWAYS;
1341 	int how_maybe = HAMMER2_RESOLVE_MAYBE;
1342 
1343 	if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1344 		how_maybe |= HAMMER2_RESOLVE_SHARED;
1345 		how_always |= HAMMER2_RESOLVE_SHARED;
1346 	}
1347 
1348 	/*
1349 	 * Recurse (*parentp) upward if necessary until the parent completely
1350 	 * encloses the key range or we hit the inode.
1351 	 */
1352 	parent = *parentp;
1353 	hmp = parent->hmp;
1354 
1355 	while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1356 	       parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1357 		scan_beg = parent->bref.key;
1358 		scan_end = scan_beg +
1359 			   ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1360 		if (key_beg >= scan_beg && key_end <= scan_end)
1361 			break;
1362 		hammer2_chain_ref(parent);		/* ref old parent */
1363 		hammer2_chain_unlock(parent);		/* unlock old parent */
1364 		parent = parent->parent;
1365 							/* lock new parent */
1366 		hammer2_chain_lock(parent, how_maybe);
1367 		hammer2_chain_drop(*parentp);		/* drop old parent */
1368 		*parentp = parent;			/* new parent */
1369 	}
1370 
1371 again:
1372 	/*
1373 	 * Locate the blockref array.  Currently we do a fully associative
1374 	 * search through the array.
1375 	 */
1376 	switch(parent->bref.type) {
1377 	case HAMMER2_BREF_TYPE_INODE:
1378 		/*
1379 		 * Special shortcut for embedded data returns the inode
1380 		 * itself.  Callers must detect this condition and access
1381 		 * the embedded data (the strategy code does this for us).
1382 		 *
1383 		 * This is only applicable to regular files and softlinks.
1384 		 */
1385 		if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1386 			if (flags & HAMMER2_LOOKUP_NOLOCK)
1387 				hammer2_chain_ref(parent);
1388 			else
1389 				hammer2_chain_lock(parent, how_always);
1390 			return (parent);
1391 		}
1392 		base = &parent->data->ipdata.u.blockset.blockref[0];
1393 		count = HAMMER2_SET_COUNT;
1394 		break;
1395 	case HAMMER2_BREF_TYPE_INDIRECT:
1396 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1397 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1398 		/*
1399 		 * Optimize indirect blocks in the INITIAL state to avoid
1400 		 * I/O.
1401 		 */
1402 		if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1403 			base = NULL;
1404 		} else {
1405 			if (parent->data == NULL)
1406 				panic("parent->data is NULL");
1407 			base = &parent->data->npdata.blockref[0];
1408 		}
1409 		count = parent->bytes / sizeof(hammer2_blockref_t);
1410 		break;
1411 	case HAMMER2_BREF_TYPE_VOLUME:
1412 		base = &hmp->voldata.sroot_blockset.blockref[0];
1413 		count = HAMMER2_SET_COUNT;
1414 		break;
1415 	default:
1416 		panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1417 		      parent->bref.type);
1418 		base = NULL;	/* safety */
1419 		count = 0;	/* safety */
1420 	}
1421 
1422 	/*
1423 	 * If the element and key overlap we use the element.
1424 	 *
1425 	 * NOTE! Deleted elements are effectively invisible.  Deletions
1426 	 *	 proactively clear the parent bref to the deleted child
1427 	 *	 so we do not try to shadow here to avoid parent updates
1428 	 *	 (which would be difficult since multiple deleted elements
1429 	 *	 might represent different flush synchronization points).
1430 	 */
1431 	bref = NULL;
1432 	for (i = 0; i < count; ++i) {
1433 		tmp = hammer2_chain_find(parent, i);
1434 		if (tmp) {
1435 			KKASSERT((tmp->flags & HAMMER2_CHAIN_DELETED) == 0);
1436 			bref = &tmp->bref;
1437 			KKASSERT(bref->type != 0);
1438 		} else if (base == NULL || base[i].type == 0) {
1439 			continue;
1440 		} else {
1441 			bref = &base[i];
1442 		}
1443 		scan_beg = bref->key;
1444 		scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1445 		if (tmp)
1446 			hammer2_chain_drop(tmp);
1447 		if (key_beg <= scan_end && key_end >= scan_beg)
1448 			break;
1449 	}
1450 	if (i == count) {
1451 		if (key_beg == key_end)
1452 			return (NULL);
1453 		return (hammer2_chain_next(parentp, NULL,
1454 					   key_beg, key_end, flags));
1455 	}
1456 
1457 	/*
1458 	 * Acquire the new chain element.  If the chain element is an
1459 	 * indirect block we must search recursively.
1460 	 *
1461 	 * It is possible for the tmp chain above to be removed from
1462 	 * the RBTREE but the parent lock ensures it would not have been
1463 	 * destroyed from the media, so the chain_get() code will simply
1464 	 * reload it from the media in that case.
1465 	 */
1466 	chain = hammer2_chain_get(parent, i, flags);
1467 	if (chain == NULL)
1468 		return (NULL);
1469 
1470 	/*
1471 	 * If the chain element is an indirect block it becomes the new
1472 	 * parent and we loop on it.
1473 	 *
1474 	 * The parent always has to be locked with at least RESOLVE_MAYBE
1475 	 * so we can access its data.  It might need a fixup if the caller
1476 	 * passed incompatible flags.  Be careful not to cause a deadlock
1477 	 * as a data-load requires an exclusive lock.
1478 	 */
1479 	if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1480 	    chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1481 		hammer2_chain_unlock(parent);
1482 		*parentp = parent = chain;
1483 		if (flags & HAMMER2_LOOKUP_NOLOCK) {
1484 			hammer2_chain_lock(chain, how_maybe);
1485 			hammer2_chain_drop(chain);	/* excess ref */
1486 		} else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1487 			   chain->data == NULL) {
1488 			hammer2_chain_ref(chain);
1489 			hammer2_chain_unlock(chain);
1490 			hammer2_chain_lock(chain, how_maybe |
1491 						  HAMMER2_RESOLVE_NOREF);
1492 		}
1493 		goto again;
1494 	}
1495 
1496 	/*
1497 	 * All done, return the chain
1498 	 */
1499 	return (chain);
1500 }
1501 
1502 /*
1503  * After having issued a lookup we can iterate all matching keys.
1504  *
1505  * If chain is non-NULL we continue the iteration from just after it's index.
1506  *
1507  * If chain is NULL we assume the parent was exhausted and continue the
1508  * iteration at the next parent.
1509  *
1510  * parent must be locked on entry and remains locked throughout.  chain's
1511  * lock status must match flags.  Chain is always at least referenced.
1512  */
1513 hammer2_chain_t *
1514 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1515 		   hammer2_key_t key_beg, hammer2_key_t key_end,
1516 		   int flags)
1517 {
1518 	hammer2_mount_t *hmp;
1519 	hammer2_chain_t *parent;
1520 	hammer2_chain_t *tmp;
1521 	hammer2_blockref_t *base;
1522 	hammer2_blockref_t *bref;
1523 	hammer2_key_t scan_beg;
1524 	hammer2_key_t scan_end;
1525 	int i;
1526 	int how_maybe = HAMMER2_RESOLVE_MAYBE;
1527 	int count;
1528 
1529 	if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1530 		how_maybe |= HAMMER2_RESOLVE_SHARED;
1531 
1532 	parent = *parentp;
1533 	hmp = parent->hmp;
1534 
1535 again:
1536 	/*
1537 	 * Calculate the next index and recalculate the parent if necessary.
1538 	 */
1539 	if (chain) {
1540 		/*
1541 		 * Continue iteration within current parent.  If not NULL
1542 		 * the passed-in chain may or may not be locked, based on
1543 		 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1544 		 * or a prior next).
1545 		 */
1546 		i = chain->index + 1;
1547 		if (flags & HAMMER2_LOOKUP_NOLOCK)
1548 			hammer2_chain_drop(chain);
1549 		else
1550 			hammer2_chain_unlock(chain);
1551 
1552 		/*
1553 		 * Any scan where the lookup returned degenerate data embedded
1554 		 * in the inode has an invalid index and must terminate.
1555 		 */
1556 		if (chain == parent)
1557 			return(NULL);
1558 		chain = NULL;
1559 	} else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1560 		   parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1561 		/*
1562 		 * We reached the end of the iteration.
1563 		 */
1564 		return (NULL);
1565 	} else {
1566 		/*
1567 		 * Continue iteration with next parent unless the current
1568 		 * parent covers the range.
1569 		 */
1570 		hammer2_chain_t *nparent;
1571 
1572 		scan_beg = parent->bref.key;
1573 		scan_end = scan_beg +
1574 			    ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1575 		if (key_beg >= scan_beg && key_end <= scan_end)
1576 			return (NULL);
1577 
1578 		i = parent->index + 1;
1579 		nparent = parent->parent;
1580 		hammer2_chain_ref(nparent);	/* ref new parent */
1581 		hammer2_chain_unlock(parent);	/* unlock old parent */
1582 						/* lock new parent */
1583 		hammer2_chain_lock(nparent, how_maybe);
1584 		hammer2_chain_drop(nparent);	/* drop excess ref */
1585 		*parentp = parent = nparent;
1586 	}
1587 
1588 again2:
1589 	/*
1590 	 * Locate the blockref array.  Currently we do a fully associative
1591 	 * search through the array.
1592 	 */
1593 	switch(parent->bref.type) {
1594 	case HAMMER2_BREF_TYPE_INODE:
1595 		base = &parent->data->ipdata.u.blockset.blockref[0];
1596 		count = HAMMER2_SET_COUNT;
1597 		break;
1598 	case HAMMER2_BREF_TYPE_INDIRECT:
1599 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1600 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1601 		if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1602 			base = NULL;
1603 		} else {
1604 			KKASSERT(parent->data != NULL);
1605 			base = &parent->data->npdata.blockref[0];
1606 		}
1607 		count = parent->bytes / sizeof(hammer2_blockref_t);
1608 		break;
1609 	case HAMMER2_BREF_TYPE_VOLUME:
1610 		base = &hmp->voldata.sroot_blockset.blockref[0];
1611 		count = HAMMER2_SET_COUNT;
1612 		break;
1613 	default:
1614 		panic("hammer2_chain_next: unrecognized blockref type: %d",
1615 		      parent->bref.type);
1616 		base = NULL;	/* safety */
1617 		count = 0;	/* safety */
1618 		break;
1619 	}
1620 	KKASSERT(i <= count);
1621 
1622 	/*
1623 	 * Look for the key.  If we are unable to find a match and an exact
1624 	 * match was requested we return NULL.  If a range was requested we
1625 	 * run hammer2_chain_next() to iterate.
1626 	 *
1627 	 * NOTE! Deleted elements are effectively invisible.  Deletions
1628 	 *	 proactively clear the parent bref to the deleted child
1629 	 *	 so we do not try to shadow here to avoid parent updates
1630 	 *	 (which would be difficult since multiple deleted elements
1631 	 *	 might represent different flush synchronization points).
1632 	 */
1633 	bref = NULL;
1634 	while (i < count) {
1635 		tmp = hammer2_chain_find(parent, i);
1636 		if (tmp) {
1637 			KKASSERT((tmp->flags & HAMMER2_CHAIN_DELETED) == 0);
1638 			bref = &tmp->bref;
1639 		} else if (base == NULL || base[i].type == 0) {
1640 			++i;
1641 			continue;
1642 		} else {
1643 			bref = &base[i];
1644 		}
1645 		scan_beg = bref->key;
1646 		scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1647 		if (tmp)
1648 			hammer2_chain_drop(tmp);
1649 		if (key_beg <= scan_end && key_end >= scan_beg)
1650 			break;
1651 		++i;
1652 	}
1653 
1654 	/*
1655 	 * If we couldn't find a match recurse up a parent to continue the
1656 	 * search.
1657 	 */
1658 	if (i == count)
1659 		goto again;
1660 
1661 	/*
1662 	 * Acquire the new chain element.  If the chain element is an
1663 	 * indirect block we must search recursively.
1664 	 */
1665 	chain = hammer2_chain_get(parent, i, flags);
1666 	if (chain == NULL)
1667 		return (NULL);
1668 
1669 	/*
1670 	 * If the chain element is an indirect block it becomes the new
1671 	 * parent and we loop on it.
1672 	 *
1673 	 * The parent always has to be locked with at least RESOLVE_MAYBE
1674 	 * so we can access its data.  It might need a fixup if the caller
1675 	 * passed incompatible flags.  Be careful not to cause a deadlock
1676 	 * as a data-load requires an exclusive lock.
1677 	 */
1678 	if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1679 	    chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1680 		hammer2_chain_unlock(parent);
1681 		*parentp = parent = chain;
1682 		chain = NULL;
1683 		if (flags & HAMMER2_LOOKUP_NOLOCK) {
1684 			hammer2_chain_lock(parent, how_maybe);
1685 			hammer2_chain_drop(parent);	/* excess ref */
1686 		} else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1687 			   parent->data == NULL) {
1688 			hammer2_chain_ref(parent);
1689 			hammer2_chain_unlock(parent);
1690 			hammer2_chain_lock(parent, how_maybe |
1691 						   HAMMER2_RESOLVE_NOREF);
1692 		}
1693 		i = 0;
1694 		goto again2;
1695 	}
1696 
1697 	/*
1698 	 * All done, return chain
1699 	 */
1700 	return (chain);
1701 }
1702 
1703 /*
1704  * Create and return a new hammer2 system memory structure of the specified
1705  * key, type and size and insert it RELATIVE TO (PARENT).
1706  *
1707  * (parent) is typically either an inode or an indirect block, acquired
1708  * acquired as a side effect of issuing a prior failed lookup.  parent
1709  * must be locked and held.  Do not pass the inode chain to this function
1710  * unless that is the chain returned by the failed lookup.
1711  *
1712  * (chain) is either NULL, a newly allocated chain, or a chain allocated
1713  * via hammer2_chain_duplicate().  When not NULL, the passed-in chain must
1714  * NOT be attached to any parent, and will be attached by this function.
1715  * This mechanic is used by the rename code.
1716  *
1717  * Non-indirect types will automatically allocate indirect blocks as required
1718  * if the new item does not fit in the current (parent).
1719  *
1720  * Indirect types will move a portion of the existing blockref array in
1721  * (parent) into the new indirect type and then use one of the free slots
1722  * to emplace the new indirect type.
1723  *
1724  * A new locked chain element is returned of the specified type.  The
1725  * element may or may not have a data area associated with it:
1726  *
1727  *	VOLUME		not allowed here
1728  *	INODE		kmalloc()'d data area is set up
1729  *	INDIRECT	not allowed here
1730  *	DATA		no data area will be set-up (caller is expected
1731  *			to have logical buffers, we don't want to alias
1732  *			the data onto device buffers!).
1733  *
1734  * Requires an exclusively locked parent.
1735  */
1736 int
1737 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t *parent,
1738 		     hammer2_chain_t **chainp,
1739 		     hammer2_key_t key, int keybits, int type, size_t bytes)
1740 {
1741 	hammer2_mount_t *hmp;
1742 	hammer2_chain_t *chain;
1743 	hammer2_blockref_t dummy;
1744 	hammer2_blockref_t *base;
1745 	hammer2_chain_t dummy_chain;
1746 	int unlock_parent = 0;
1747 	int allocated = 0;
1748 	int error = 0;
1749 	int count;
1750 	int i;
1751 
1752 	KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
1753 	hmp = parent->hmp;
1754 	chain = *chainp;
1755 
1756 	if (chain == NULL) {
1757 		/*
1758 		 * First allocate media space and construct the dummy bref,
1759 		 * then allocate the in-memory chain structure.
1760 		 */
1761 		bzero(&dummy, sizeof(dummy));
1762 		dummy.type = type;
1763 		dummy.key = key;
1764 		dummy.keybits = keybits;
1765 		dummy.data_off = hammer2_allocsize(bytes);
1766 		dummy.methods = parent->bref.methods;
1767 		chain = hammer2_chain_alloc(hmp, &dummy);
1768 		hammer2_chain_core_alloc(chain, NULL);
1769 		ccms_thread_lock(&chain->core->cst, CCMS_STATE_EXCLUSIVE);
1770 		allocated = 1;
1771 
1772 		/*
1773 		 * We do NOT set INITIAL here (yet).  INITIAL is only
1774 		 * used for indirect blocks.
1775 		 *
1776 		 * Recalculate bytes to reflect the actual media block
1777 		 * allocation.
1778 		 */
1779 		bytes = (hammer2_off_t)1 <<
1780 			(int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
1781 		chain->bytes = bytes;
1782 
1783 		switch(type) {
1784 		case HAMMER2_BREF_TYPE_VOLUME:
1785 			panic("hammer2_chain_create: called with volume type");
1786 			break;
1787 		case HAMMER2_BREF_TYPE_INODE:
1788 			KKASSERT(bytes == HAMMER2_INODE_BYTES);
1789 			chain->data = kmalloc(sizeof(chain->data->ipdata),
1790 					      hmp->minode, M_WAITOK | M_ZERO);
1791 			break;
1792 		case HAMMER2_BREF_TYPE_INDIRECT:
1793 			panic("hammer2_chain_create: cannot be used to"
1794 			      "create indirect block");
1795 			break;
1796 		case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1797 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1798 			panic("hammer2_chain_create: cannot be used to"
1799 			      "create freemap root or node");
1800 			break;
1801 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1802 		case HAMMER2_BREF_TYPE_DATA:
1803 		default:
1804 			/* leave chain->data NULL */
1805 			KKASSERT(chain->data == NULL);
1806 			break;
1807 		}
1808 	} else {
1809 		/*
1810 		 * Potentially update the chain's key/keybits.
1811 		 */
1812 		chain->bref.key = key;
1813 		chain->bref.keybits = keybits;
1814 	}
1815 
1816 again:
1817 	/*
1818 	 * Locate a free blockref in the parent's array
1819 	 */
1820 	switch(parent->bref.type) {
1821 	case HAMMER2_BREF_TYPE_INODE:
1822 		KKASSERT((parent->data->ipdata.op_flags &
1823 			  HAMMER2_OPFLAG_DIRECTDATA) == 0);
1824 		KKASSERT(parent->data != NULL);
1825 		base = &parent->data->ipdata.u.blockset.blockref[0];
1826 		count = HAMMER2_SET_COUNT;
1827 		break;
1828 	case HAMMER2_BREF_TYPE_INDIRECT:
1829 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1830 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1831 		if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1832 			base = NULL;
1833 		} else {
1834 			KKASSERT(parent->data != NULL);
1835 			base = &parent->data->npdata.blockref[0];
1836 		}
1837 		count = parent->bytes / sizeof(hammer2_blockref_t);
1838 		break;
1839 	case HAMMER2_BREF_TYPE_VOLUME:
1840 		KKASSERT(parent->data != NULL);
1841 		base = &hmp->voldata.sroot_blockset.blockref[0];
1842 		count = HAMMER2_SET_COUNT;
1843 		break;
1844 	default:
1845 		panic("hammer2_chain_create: unrecognized blockref type: %d",
1846 		      parent->bref.type);
1847 		count = 0;
1848 		break;
1849 	}
1850 
1851 	/*
1852 	 * Scan for an unallocated bref, also skipping any slots occupied
1853 	 * by in-memory chain elements that may not yet have been updated
1854 	 * in the parent's bref array.
1855 	 *
1856 	 * We don't have to hold the spinlock to save an empty slot as
1857 	 * new slots can only transition from empty if the parent is
1858 	 * locked exclusively.
1859 	 */
1860 	bzero(&dummy_chain, sizeof(dummy_chain));
1861 	dummy_chain.delete_tid = HAMMER2_MAX_TID;
1862 
1863 	spin_lock(&parent->core->cst.spin);
1864 	for (i = 0; i < count; ++i) {
1865 		if (base == NULL) {
1866 			dummy_chain.index = i;
1867 			if (RB_FIND(hammer2_chain_tree,
1868 				    &parent->core->rbtree, &dummy_chain) == NULL) {
1869 				break;
1870 			}
1871 		} else if (base[i].type == 0) {
1872 			dummy_chain.index = i;
1873 			if (RB_FIND(hammer2_chain_tree,
1874 				    &parent->core->rbtree, &dummy_chain) == NULL) {
1875 				break;
1876 			}
1877 		}
1878 	}
1879 	spin_unlock(&parent->core->cst.spin);
1880 
1881 	/*
1882 	 * If no free blockref could be found we must create an indirect
1883 	 * block and move a number of blockrefs into it.  With the parent
1884 	 * locked we can safely lock each child in order to move it without
1885 	 * causing a deadlock.
1886 	 *
1887 	 * This may return the new indirect block or the old parent depending
1888 	 * on where the key falls.  NULL is returned on error.
1889 	 */
1890 	if (i == count) {
1891 		hammer2_chain_t *nparent;
1892 
1893 		nparent = hammer2_chain_create_indirect(trans, parent,
1894 							key, keybits,
1895 							&error);
1896 		if (nparent == NULL) {
1897 			if (allocated)
1898 				hammer2_chain_free(chain);
1899 			chain = NULL;
1900 			goto done;
1901 		}
1902 		if (parent != nparent) {
1903 			if (unlock_parent)
1904 				hammer2_chain_unlock(parent);
1905 			parent = nparent;
1906 			unlock_parent = 1;
1907 		}
1908 		goto again;
1909 	}
1910 
1911 	/*
1912 	 * Link the chain into its parent.  Later on we will have to set
1913 	 * the MOVED bit in situations where we don't mark the new chain
1914 	 * as being modified.
1915 	 */
1916 	if (chain->parent != NULL)
1917 		panic("hammer2: hammer2_chain_create: chain already connected");
1918 	KKASSERT(chain->parent == NULL);
1919 	KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
1920 
1921 	chain->parent = parent;
1922 	chain->index = i;
1923 	KKASSERT(parent->refs > 0);
1924 	spin_lock(&parent->core->cst.spin);
1925 	if (RB_INSERT(hammer2_chain_tree, &parent->core->rbtree, chain))
1926 		panic("hammer2_chain_link: collision");
1927 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
1928 	hammer2_chain_ref(parent);		/* chain->parent ref */
1929 	spin_unlock(&parent->core->cst.spin);
1930 
1931 	/*
1932 	 * (allocated) indicates that this is a newly-created chain element
1933 	 * rather than a renamed chain element.  In this situation we want
1934 	 * to place the chain element in the MODIFIED state.
1935 	 *
1936 	 * The data area will be set up as follows:
1937 	 *
1938 	 *	VOLUME		not allowed here.
1939 	 *
1940 	 *	INODE		embedded data are will be set-up.
1941 	 *
1942 	 *	INDIRECT	not allowed here.
1943 	 *
1944 	 *	DATA		no data area will be set-up (caller is expected
1945 	 *			to have logical buffers, we don't want to alias
1946 	 *			the data onto device buffers!).
1947 	 */
1948 	if (allocated) {
1949 		switch(chain->bref.type) {
1950 		case HAMMER2_BREF_TYPE_DATA:
1951 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1952 			hammer2_chain_modify(trans, chain,
1953 					     HAMMER2_MODIFY_OPTDATA);
1954 			break;
1955 		case HAMMER2_BREF_TYPE_INDIRECT:
1956 		case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1957 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1958 			/* not supported in this function */
1959 			panic("hammer2_chain_create: bad type");
1960 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1961 			hammer2_chain_modify(trans, chain,
1962 					     HAMMER2_MODIFY_OPTDATA);
1963 			break;
1964 		default:
1965 			hammer2_chain_modify(trans, chain, 0);
1966 			break;
1967 		}
1968 	} else {
1969 		/*
1970 		 * When reconnecting inodes we have to call setsubmod()
1971 		 * to ensure that its state propagates up the newly
1972 		 * connected parent.
1973 		 *
1974 		 * Make sure MOVED is set but do not update bref_flush.  If
1975 		 * the chain is undergoing modification bref_flush will be
1976 		 * updated when it gets flushed.  If it is not then the
1977 		 * bref may not have been flushed yet and we do not want to
1978 		 * set MODIFIED here as this could result in unnecessary
1979 		 * reallocations.
1980 		 */
1981 		if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1982 			hammer2_chain_ref(chain);
1983 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1984 		}
1985 		hammer2_chain_parent_setsubmod(chain);
1986 	}
1987 
1988 done:
1989 	*chainp = chain;
1990 	if (unlock_parent)
1991 		hammer2_chain_unlock(parent);
1992 	return (error);
1993 }
1994 
1995 /*
1996  * Replace (*chainp) with a duplicate.  The original *chainp is unlocked
1997  * and the replacement will be returned locked.  Both the original and the
1998  * new chain will share the same RBTREE (have the same chain->core), with
1999  * the new chain becoming the 'current' chain (meaning it is the first in
2000  * the linked list at core->chain_first).
2001  *
2002  * If (parent, i) then the new duplicated chain is inserted under the parent
2003  * at the specified index (the parent must not have a ref at that index).
2004  *
2005  * If (NULL, -1) then the new duplicated chain is not inserted anywhere,
2006  * similar to if it had just been chain_alloc()'d (suitable for passing into
2007  * hammer2_chain_create() after this function returns).
2008  *
2009  * NOTE! Duplication is used in order to retain the original topology to
2010  *	 support flush synchronization points.  Both the original and the
2011  *	 new chain will have the same transaction id and thus the operation
2012  *	 appears atomic on the media.
2013  */
2014 void
2015 hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t *parent,
2016 			int i, hammer2_chain_t **chainp)
2017 {
2018 	hammer2_mount_t *hmp = trans->hmp;
2019 	hammer2_blockref_t *base;
2020 	hammer2_chain_t *chain;
2021 	size_t bytes;
2022 	int count;
2023 
2024 	/*
2025 	 * First create a duplicate of the chain structure, associating
2026 	 * it with the same core, making it the same size, pointing it
2027 	 * to the same bref (the same media block), and copying any inline
2028 	 * data.
2029 	 */
2030 	KKASSERT(((*chainp)->flags & HAMMER2_CHAIN_INITIAL) == 0);
2031 	chain = hammer2_chain_alloc(hmp, &(*chainp)->bref);
2032 	hammer2_chain_core_alloc(chain, (*chainp)->core);
2033 
2034 	bytes = (hammer2_off_t)1 <<
2035 		(int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2036 	chain->bytes = bytes;
2037 
2038 	switch(chain->bref.type) {
2039 	case HAMMER2_BREF_TYPE_VOLUME:
2040 		panic("hammer2_chain_duplicate: cannot be called w/volhdr");
2041 		break;
2042 	case HAMMER2_BREF_TYPE_INODE:
2043 		KKASSERT(bytes == HAMMER2_INODE_BYTES);
2044 		if ((*chainp)->data) {
2045 			chain->data = kmalloc(sizeof(chain->data->ipdata),
2046 					      hmp->minode, M_WAITOK | M_ZERO);
2047 			chain->data->ipdata = (*chainp)->data->ipdata;
2048 		}
2049 		break;
2050 	case HAMMER2_BREF_TYPE_INDIRECT:
2051 #if 0
2052 		panic("hammer2_chain_duplicate: cannot be used to"
2053 		      "create an indirect block");
2054 #endif
2055 		break;
2056 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2057 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2058 		panic("hammer2_chain_duplicate: cannot be used to"
2059 		      "create a freemap root or node");
2060 		break;
2061 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2062 	case HAMMER2_BREF_TYPE_DATA:
2063 	default:
2064 		/* leave chain->data NULL */
2065 		KKASSERT(chain->data == NULL);
2066 		break;
2067 	}
2068 
2069 	/*
2070 	 * Both chains must be locked for us to be able to set the
2071 	 * duplink.  To avoid buffer cache deadlocks we do not try
2072 	 * to resolve the new chain until after we've unlocked the
2073 	 * old one.
2074 	 */
2075 	hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER);
2076 	KKASSERT((*chainp)->duplink == NULL);
2077 	(*chainp)->duplink = chain;	/* inherits excess ref from alloc */
2078 	hammer2_chain_unlock(*chainp);
2079 	*chainp = chain;
2080 	hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
2081 	hammer2_chain_unlock(chain);
2082 
2083 
2084 	/*
2085 	 * If parent is not NULL, insert into the parent at the requested
2086 	 * index.  The newly duplicated chain must be marked MOVED and
2087 	 * SUBMODIFIED set in its parent(s).
2088 	 */
2089 	if (parent) {
2090 		/*
2091 		 * Locate a free blockref in the parent's array
2092 		 */
2093 		KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
2094 		switch(parent->bref.type) {
2095 		case HAMMER2_BREF_TYPE_INODE:
2096 			KKASSERT((parent->data->ipdata.op_flags &
2097 				  HAMMER2_OPFLAG_DIRECTDATA) == 0);
2098 			KKASSERT(parent->data != NULL);
2099 			base = &parent->data->ipdata.u.blockset.blockref[0];
2100 			count = HAMMER2_SET_COUNT;
2101 			break;
2102 		case HAMMER2_BREF_TYPE_INDIRECT:
2103 		case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2104 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2105 			if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2106 				base = NULL;
2107 			} else {
2108 				KKASSERT(parent->data != NULL);
2109 				base = &parent->data->npdata.blockref[0];
2110 			}
2111 			count = parent->bytes / sizeof(hammer2_blockref_t);
2112 			break;
2113 		case HAMMER2_BREF_TYPE_VOLUME:
2114 			KKASSERT(parent->data != NULL);
2115 			base = &hmp->voldata.sroot_blockset.blockref[0];
2116 			count = HAMMER2_SET_COUNT;
2117 			break;
2118 		default:
2119 			panic("hammer2_chain_create: unrecognized "
2120 			      "blockref type: %d",
2121 			      parent->bref.type);
2122 			count = 0;
2123 			break;
2124 		}
2125 		KKASSERT(i >= 0 && i < count);
2126 		KKASSERT(base == NULL || base[i].type == 0);
2127 
2128 		chain->parent = parent;
2129 		chain->index = i;
2130 		KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2131 		KKASSERT(parent->refs > 0);
2132 		spin_lock(&parent->core->cst.spin);
2133 		if (RB_INSERT(hammer2_chain_tree, &parent->core->rbtree, chain))
2134 			panic("hammer2_chain_link: collision");
2135 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2136 		hammer2_chain_ref(parent);	/* chain->parent ref */
2137 		spin_unlock(&parent->core->cst.spin);
2138 
2139 		if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2140 			hammer2_chain_ref(chain);
2141 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2142 		}
2143 		hammer2_chain_parent_setsubmod(chain);
2144 	}
2145 }
2146 
2147 /*
2148  * Create an indirect block that covers one or more of the elements in the
2149  * current parent.  Either returns the existing parent with no locking or
2150  * ref changes or returns the new indirect block locked and referenced
2151  * and leaving the original parent lock/ref intact as well.
2152  *
2153  * If an error occurs, NULL is returned and *errorp is set to the error.
2154  *
2155  * The returned chain depends on where the specified key falls.
2156  *
2157  * The key/keybits for the indirect mode only needs to follow three rules:
2158  *
2159  * (1) That all elements underneath it fit within its key space and
2160  *
2161  * (2) That all elements outside it are outside its key space.
2162  *
2163  * (3) When creating the new indirect block any elements in the current
2164  *     parent that fit within the new indirect block's keyspace must be
2165  *     moved into the new indirect block.
2166  *
2167  * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
2168  *     keyspace the the current parent, but lookup/iteration rules will
2169  *     ensure (and must ensure) that rule (2) for all parents leading up
2170  *     to the nearest inode or the root volume header is adhered to.  This
2171  *     is accomplished by always recursing through matching keyspaces in
2172  *     the hammer2_chain_lookup() and hammer2_chain_next() API.
2173  *
2174  * The current implementation calculates the current worst-case keyspace by
2175  * iterating the current parent and then divides it into two halves, choosing
2176  * whichever half has the most elements (not necessarily the half containing
2177  * the requested key).
2178  *
2179  * We can also opt to use the half with the least number of elements.  This
2180  * causes lower-numbered keys (aka logical file offsets) to recurse through
2181  * fewer indirect blocks and higher-numbered keys to recurse through more.
2182  * This also has the risk of not moving enough elements to the new indirect
2183  * block and being forced to create several indirect blocks before the element
2184  * can be inserted.
2185  *
2186  * Must be called with an exclusively locked parent.
2187  */
2188 static
2189 hammer2_chain_t *
2190 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
2191 			      hammer2_key_t create_key, int create_bits,
2192 			      int *errorp)
2193 {
2194 	hammer2_mount_t *hmp = trans->hmp;
2195 	hammer2_blockref_t *base;
2196 	hammer2_blockref_t *bref;
2197 	hammer2_chain_t *chain;
2198 	hammer2_chain_t *ichain;
2199 	hammer2_chain_t dummy;
2200 	hammer2_key_t key = create_key;
2201 	int keybits = create_bits;
2202 	int locount = 0;
2203 	int hicount = 0;
2204 	int count;
2205 	int nbytes;
2206 	int i;
2207 
2208 	/*
2209 	 * Calculate the base blockref pointer or NULL if the chain
2210 	 * is known to be empty.  We need to calculate the array count
2211 	 * for RB lookups either way.
2212 	 */
2213 	KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
2214 	*errorp = 0;
2215 
2216 	hammer2_chain_modify(trans, parent, HAMMER2_MODIFY_OPTDATA);
2217 	if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2218 		base = NULL;
2219 
2220 		switch(parent->bref.type) {
2221 		case HAMMER2_BREF_TYPE_INODE:
2222 			count = HAMMER2_SET_COUNT;
2223 			break;
2224 		case HAMMER2_BREF_TYPE_INDIRECT:
2225 		case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2226 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2227 			count = parent->bytes / sizeof(hammer2_blockref_t);
2228 			break;
2229 		case HAMMER2_BREF_TYPE_VOLUME:
2230 			count = HAMMER2_SET_COUNT;
2231 			break;
2232 		default:
2233 			panic("hammer2_chain_create_indirect: "
2234 			      "unrecognized blockref type: %d",
2235 			      parent->bref.type);
2236 			count = 0;
2237 			break;
2238 		}
2239 	} else {
2240 		switch(parent->bref.type) {
2241 		case HAMMER2_BREF_TYPE_INODE:
2242 			base = &parent->data->ipdata.u.blockset.blockref[0];
2243 			count = HAMMER2_SET_COUNT;
2244 			break;
2245 		case HAMMER2_BREF_TYPE_INDIRECT:
2246 		case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2247 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2248 			base = &parent->data->npdata.blockref[0];
2249 			count = parent->bytes / sizeof(hammer2_blockref_t);
2250 			break;
2251 		case HAMMER2_BREF_TYPE_VOLUME:
2252 			base = &hmp->voldata.sroot_blockset.blockref[0];
2253 			count = HAMMER2_SET_COUNT;
2254 			break;
2255 		default:
2256 			panic("hammer2_chain_create_indirect: "
2257 			      "unrecognized blockref type: %d",
2258 			      parent->bref.type);
2259 			count = 0;
2260 			break;
2261 		}
2262 	}
2263 
2264 	/*
2265 	 * Scan for an unallocated bref, also skipping any slots occupied
2266 	 * by in-memory chain elements which may not yet have been updated
2267 	 * in the parent's bref array.
2268 	 *
2269 	 * Deleted elements are ignored.
2270 	 */
2271 	bzero(&dummy, sizeof(dummy));
2272 	dummy.delete_tid = HAMMER2_MAX_TID;
2273 
2274 	spin_lock(&parent->core->cst.spin);
2275 	for (i = 0; i < count; ++i) {
2276 		int nkeybits;
2277 
2278 		dummy.index = i;
2279 		chain = RB_FIND(hammer2_chain_tree, &parent->core->rbtree,
2280 				&dummy);
2281 		if (chain) {
2282 			KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2283 			bref = &chain->bref;
2284 		} else if (base && base[i].type) {
2285 			bref = &base[i];
2286 		} else {
2287 			continue;
2288 		}
2289 
2290 		/*
2291 		 * Expand our calculated key range (key, keybits) to fit
2292 		 * the scanned key.  nkeybits represents the full range
2293 		 * that we will later cut in half (two halves @ nkeybits - 1).
2294 		 */
2295 		nkeybits = keybits;
2296 		if (nkeybits < bref->keybits)
2297 			nkeybits = bref->keybits;
2298 		while (nkeybits < 64 &&
2299 		       (~(((hammer2_key_t)1 << nkeybits) - 1) &
2300 		        (key ^ bref->key)) != 0) {
2301 			++nkeybits;
2302 		}
2303 
2304 		/*
2305 		 * If the new key range is larger we have to determine
2306 		 * which side of the new key range the existing keys fall
2307 		 * under by checking the high bit, then collapsing the
2308 		 * locount into the hicount or vise-versa.
2309 		 */
2310 		if (keybits != nkeybits) {
2311 			if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
2312 				hicount += locount;
2313 				locount = 0;
2314 			} else {
2315 				locount += hicount;
2316 				hicount = 0;
2317 			}
2318 			keybits = nkeybits;
2319 		}
2320 
2321 		/*
2322 		 * The newly scanned key will be in the lower half or the
2323 		 * higher half of the (new) key range.
2324 		 */
2325 		if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
2326 			++hicount;
2327 		else
2328 			++locount;
2329 	}
2330 	spin_unlock(&parent->core->cst.spin);
2331 	bref = NULL;	/* now invalid (safety) */
2332 
2333 	/*
2334 	 * Adjust keybits to represent half of the full range calculated
2335 	 * above (radix 63 max)
2336 	 */
2337 	--keybits;
2338 
2339 	/*
2340 	 * Select whichever half contains the most elements.  Theoretically
2341 	 * we can select either side as long as it contains at least one
2342 	 * element (in order to ensure that a free slot is present to hold
2343 	 * the indirect block).
2344 	 */
2345 	key &= ~(((hammer2_key_t)1 << keybits) - 1);
2346 	if (hammer2_indirect_optimize) {
2347 		/*
2348 		 * Insert node for least number of keys, this will arrange
2349 		 * the first few blocks of a large file or the first few
2350 		 * inodes in a directory with fewer indirect blocks when
2351 		 * created linearly.
2352 		 */
2353 		if (hicount < locount && hicount != 0)
2354 			key |= (hammer2_key_t)1 << keybits;
2355 		else
2356 			key &= ~(hammer2_key_t)1 << keybits;
2357 	} else {
2358 		/*
2359 		 * Insert node for most number of keys, best for heavily
2360 		 * fragmented files.
2361 		 */
2362 		if (hicount > locount)
2363 			key |= (hammer2_key_t)1 << keybits;
2364 		else
2365 			key &= ~(hammer2_key_t)1 << keybits;
2366 	}
2367 
2368 	/*
2369 	 * How big should our new indirect block be?  It has to be at least
2370 	 * as large as its parent.
2371 	 */
2372 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
2373 		nbytes = HAMMER2_IND_BYTES_MIN;
2374 	else
2375 		nbytes = HAMMER2_IND_BYTES_MAX;
2376 	if (nbytes < count * sizeof(hammer2_blockref_t))
2377 		nbytes = count * sizeof(hammer2_blockref_t);
2378 
2379 	/*
2380 	 * Ok, create our new indirect block
2381 	 */
2382 	switch(parent->bref.type) {
2383 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2384 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2385 		dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
2386 		break;
2387 	default:
2388 		dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
2389 		break;
2390 	}
2391 	dummy.bref.key = key;
2392 	dummy.bref.keybits = keybits;
2393 	dummy.bref.data_off = hammer2_allocsize(nbytes);
2394 	dummy.bref.methods = parent->bref.methods;
2395 
2396 	ichain = hammer2_chain_alloc(hmp, &dummy.bref);
2397 	atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
2398 	hammer2_chain_core_alloc(ichain, NULL);
2399 	hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
2400 	hammer2_chain_drop(ichain);	/* excess ref from alloc */
2401 
2402 	/*
2403 	 * Iterate the original parent and move the matching brefs into
2404 	 * the new indirect block.
2405 	 *
2406 	 * XXX handle flushes.
2407 	 */
2408 	spin_lock(&parent->core->cst.spin);
2409 	for (i = 0; i < count; ++i) {
2410 		/*
2411 		 * For keying purposes access the bref from the media or
2412 		 * from our in-memory cache.  In cases where the in-memory
2413 		 * cache overrides the media the keyrefs will be the same
2414 		 * anyway so we can avoid checking the cache when the media
2415 		 * has a key.
2416 		 */
2417 		dummy.index = i;
2418 		chain = RB_FIND(hammer2_chain_tree, &parent->core->rbtree,
2419 				&dummy);
2420 		if (chain) {
2421 			KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2422 			bref = &chain->bref;
2423 		} else if (base && base[i].type) {
2424 			bref = &base[i];
2425 		} else {
2426 			if (ichain->index < 0)
2427 				ichain->index = i;
2428 			continue;
2429 		}
2430 
2431 		/*
2432 		 * Skip keys not in the chosen half (low or high), only bit
2433 		 * (keybits - 1) needs to be compared but for safety we
2434 		 * will compare all msb bits plus that bit again.
2435 		 */
2436 		if ((~(((hammer2_key_t)1 << keybits) - 1) &
2437 		    (key ^ bref->key)) != 0) {
2438 			continue;
2439 		}
2440 
2441 		/*
2442 		 * This element is being moved from the parent, its slot
2443 		 * is available for our new indirect block.
2444 		 */
2445 		if (ichain->index < 0)
2446 			ichain->index = i;
2447 
2448 		/*
2449 		 * Load the new indirect block by acquiring or allocating
2450 		 * the related chain entries, then move them to the new
2451 		 * parent (ichain) by deleting them from their old location
2452 		 * and inserting a duplicate of the chain and any modified
2453 		 * sub-chain in the new location.
2454 		 *
2455 		 * We must set MOVED in the chain being duplicated and
2456 		 * SUBMODIFIED in the parent(s) so the flush code knows
2457 		 * what is going on.  The latter is done after the loop.
2458 		 *
2459 		 * WARNING! chain->cst.spin must be held when chain->parent is
2460 		 *	    modified, even though we own the full blown lock,
2461 		 *	    to deal with setsubmod and rename races.
2462 		 *	    (XXX remove this req).
2463 		 */
2464 		spin_unlock(&parent->core->cst.spin);
2465 		chain = hammer2_chain_get(parent, i, HAMMER2_LOOKUP_NODATA);
2466 		hammer2_chain_delete(trans, parent, chain);
2467 		hammer2_chain_duplicate(trans, ichain, i, &chain);
2468 
2469 #if 0
2470 		if (base)
2471 			bzero(&base[i], sizeof(base[i]));
2472 #endif
2473 		hammer2_chain_unlock(chain);
2474 		KKASSERT(parent->refs > 0);
2475 		chain = NULL;
2476 		spin_lock(&parent->core->cst.spin);
2477 	}
2478 	spin_unlock(&parent->core->cst.spin);
2479 
2480 	/*
2481 	 * Insert the new indirect block into the parent now that we've
2482 	 * cleared out some entries in the parent.  We calculated a good
2483 	 * insertion index in the loop above (ichain->index).
2484 	 *
2485 	 * We don't have to set MOVED here because we mark ichain modified
2486 	 * down below (so the normal modified -> flush -> set-moved sequence
2487 	 * applies).
2488 	 *
2489 	 * The insertion shouldn't race as this is a completely new block
2490 	 * and the parent is locked.
2491 	 */
2492 	KKASSERT(ichain->index >= 0);
2493 	KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
2494 	spin_lock(&parent->core->cst.spin);
2495 	if (RB_INSERT(hammer2_chain_tree, &parent->core->rbtree, ichain))
2496 		panic("hammer2_chain_create_indirect: ichain insertion");
2497 	atomic_set_int(&ichain->flags, HAMMER2_CHAIN_ONRBTREE);
2498 	ichain->parent = parent;
2499 	hammer2_chain_ref(parent);	/* ichain->parent ref */
2500 	spin_unlock(&parent->core->cst.spin);
2501 
2502 	/*
2503 	 * Mark the new indirect block modified after insertion, which
2504 	 * will propagate up through parent all the way to the root and
2505 	 * also allocate the physical block in ichain for our caller,
2506 	 * and assign ichain->data to a pre-zero'd space (because there
2507 	 * is not prior data to copy into it).
2508 	 *
2509 	 * We have to set SUBMODIFIED in ichain's flags manually so the
2510 	 * flusher knows it has to recurse through it to get to all of
2511 	 * our moved blocks, then call setsubmod() to set the bit
2512 	 * recursively.
2513 	 */
2514 	hammer2_chain_modify(trans, ichain, HAMMER2_MODIFY_OPTDATA);
2515 	hammer2_chain_parent_setsubmod(ichain);
2516 	atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2517 
2518 	/*
2519 	 * Figure out what to return.
2520 	 */
2521 	if (create_bits > keybits) {
2522 		/*
2523 		 * Key being created is way outside the key range,
2524 		 * return the original parent.
2525 		 */
2526 		hammer2_chain_unlock(ichain);
2527 	} else if (~(((hammer2_key_t)1 << keybits) - 1) &
2528 		   (create_key ^ key)) {
2529 		/*
2530 		 * Key being created is outside the key range,
2531 		 * return the original parent.
2532 		 */
2533 		hammer2_chain_unlock(ichain);
2534 	} else {
2535 		/*
2536 		 * Otherwise its in the range, return the new parent.
2537 		 * (leave both the new and old parent locked).
2538 		 */
2539 		parent = ichain;
2540 	}
2541 
2542 	return(parent);
2543 }
2544 
2545 /*
2546  * Sets CHAIN_DELETED and CHAIN_MOVED in the chain being deleted and
2547  * remove the parent's bref reference to chain, generating a modification
2548  * on the parent.
2549  *
2550  * We do not attempt to defer adjustment of the parent bref to the chain
2551  * as this could become quite complex with multiple deletions / replacements.
2552  * Intead, a modification is generated in the parent which can cause it to
2553  * be duplicated if the current parent's data is required for a flush in
2554  * progress.
2555  *
2556  * NOTE: We can trivially adjust the parent if it is in the INITIAL state.
2557  *
2558  * NOTE: The flush code handles the actual removal of the chain from
2559  *	 the BTREE (also, depending on synchronization points, the
2560  *	 chain may still be relevant to the flush).
2561  *
2562  * NOTE: chain->delete_tid distinguishes deleted chains from live chains,
2563  *	 by setting it to something less than HAMMER2_MAX_TID the
2564  *	 chain_lookup(), chain_next(), and chain_get() functions will
2565  *	 not have visibility.
2566  *
2567  * This function is NOT recursive.  Any entity already pushed into the
2568  * chain (such as an inode) may still need visibility into its contents,
2569  * as well as the ability to read and modify the contents.  For example,
2570  * for an unlinked file which is still open.
2571  */
2572 void
2573 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
2574 		     hammer2_chain_t *chain)
2575 {
2576 	hammer2_mount_t *hmp = trans->hmp;
2577 	hammer2_blockref_t *base;
2578 	int count;
2579 
2580 	if (chain->parent != parent)
2581 		panic("hammer2_chain_delete: parent mismatch");
2582 	KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
2583 
2584 	/*
2585 	 * Nothing to do if already marked.
2586 	 */
2587 	if (chain->flags & HAMMER2_CHAIN_DELETED)
2588 		return;
2589 
2590 	/*
2591 	 * Mark the parent modified so our base[] pointer remains valid
2592 	 * while we move entries.  For the optimized indirect block
2593 	 * case mark the parent moved instead.
2594 	 *
2595 	 * Calculate the blockref reference in the parent and zero it out.
2596 	 */
2597 	switch(parent->bref.type) {
2598 	case HAMMER2_BREF_TYPE_INODE:
2599 		hammer2_chain_modify(trans, parent,
2600 				     HAMMER2_MODIFY_NO_MODIFY_TID);
2601 		base = &parent->data->ipdata.u.blockset.blockref[0];
2602 		count = HAMMER2_SET_COUNT;
2603 		break;
2604 	case HAMMER2_BREF_TYPE_INDIRECT:
2605 	case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2606 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2607 		hammer2_chain_modify(trans, parent,
2608 				     HAMMER2_MODIFY_OPTDATA |
2609 				     HAMMER2_MODIFY_NO_MODIFY_TID);
2610 		if (parent->flags & HAMMER2_CHAIN_INITIAL)
2611 			base = NULL;
2612 		else
2613 			base = &parent->data->npdata.blockref[0];
2614 		count = parent->bytes / sizeof(hammer2_blockref_t);
2615 		break;
2616 	case HAMMER2_BREF_TYPE_VOLUME:
2617 		hammer2_chain_modify(trans, parent,
2618 				     HAMMER2_MODIFY_NO_MODIFY_TID);
2619 		base = &hmp->voldata.sroot_blockset.blockref[0];
2620 		count = HAMMER2_SET_COUNT;
2621 		break;
2622 	default:
2623 		panic("hammer2_chain_delete: unrecognized blockref type: %d",
2624 		      parent->bref.type);
2625 		base = NULL;	/* NOT REACHED */
2626 		count = 0;	/* NOT REACHED */
2627 		break;		/* NOT REACHED */
2628 	}
2629 	KKASSERT(chain->index >= 0 && chain->index < count);
2630 
2631 	/*
2632 	 * Clean out the blockref immediately.
2633 	 */
2634 	if (base)
2635 		bzero(&base[chain->index], sizeof(*base));
2636 
2637 	/*
2638 	 * Must set MOVED along with DELETED for the flush code to recognize
2639 	 * the operation and properly disconnect the chain in-memory.
2640 	 *
2641 	 * The setting of DELETED causes finds, lookups, and _next iterations
2642 	 * to no longer recognize the chain.  RB_SCAN()s will still have
2643 	 * visibility (needed for flush serialization points).
2644 	 */
2645 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2646 	if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2647 		hammer2_chain_ref(chain);
2648 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2649 	}
2650 	chain->delete_tid = trans->sync_tid;
2651 	hammer2_chain_parent_setsubmod(chain);
2652 }
2653 
2654 void
2655 hammer2_chain_wait(hammer2_chain_t *chain)
2656 {
2657 	tsleep(chain, 0, "chnflw", 1);
2658 }
2659