xref: /dragonfly/sys/vfs/hammer2/hammer2_chain.c (revision c93b565c)
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * and Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  * This subsystem implements most of the core support functions for
37  * the hammer2_chain structure.
38  *
39  * Chains are the in-memory version on media objects (volume header, inodes,
40  * indirect blocks, data blocks, etc).  Chains represent a portion of the
41  * HAMMER2 topology.
42  *
43  * Chains are no-longer delete-duplicated.  Instead, the original in-memory
44  * chain will be moved along with its block reference (e.g. for things like
45  * renames, hardlink operations, modifications, etc), and will be indexed
46  * on a secondary list for flush handling instead of propagating a flag
47  * upward to the root.
48  *
49  * Concurrent front-end operations can still run against backend flushes
50  * as long as they do not cross the current flush boundary.  An operation
51  * running above the current flush (in areas not yet flushed) can become
52  * part of the current flush while ano peration running below the current
53  * flush can become part of the next flush.
54  */
55 #include <sys/cdefs.h>
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/types.h>
59 #include <sys/lock.h>
60 #include <sys/kern_syscall.h>
61 #include <sys/uuid.h>
62 
63 #include <crypto/sha2/sha2.h>
64 
65 #include "hammer2.h"
66 
67 static int hammer2_indirect_optimize;	/* XXX SYSCTL */
68 
69 static hammer2_chain_t *hammer2_chain_create_indirect(
70 		hammer2_trans_t *trans, hammer2_chain_t *parent,
71 		hammer2_key_t key, int keybits, int for_type, int *errorp);
72 static void hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop);
73 static hammer2_chain_t *hammer2_combined_find(
74 		hammer2_chain_t *parent,
75 		hammer2_blockref_t *base, int count,
76 		int *cache_indexp, hammer2_key_t *key_nextp,
77 		hammer2_key_t key_beg, hammer2_key_t key_end,
78 		hammer2_blockref_t **bresp);
79 
80 /*
81  * Basic RBTree for chains (core->rbtree and core->dbtree).  Chains cannot
82  * overlap in the RB trees.  Deleted chains are moved from rbtree to either
83  * dbtree or to dbq.
84  *
85  * Chains in delete-duplicate sequences can always iterate through core_entry
86  * to locate the live version of the chain.
87  */
88 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
89 
90 int
91 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
92 {
93 	hammer2_key_t c1_beg;
94 	hammer2_key_t c1_end;
95 	hammer2_key_t c2_beg;
96 	hammer2_key_t c2_end;
97 
98 	/*
99 	 * Compare chains.  Overlaps are not supposed to happen and catch
100 	 * any software issues early we count overlaps as a match.
101 	 */
102 	c1_beg = chain1->bref.key;
103 	c1_end = c1_beg + ((hammer2_key_t)1 << chain1->bref.keybits) - 1;
104 	c2_beg = chain2->bref.key;
105 	c2_end = c2_beg + ((hammer2_key_t)1 << chain2->bref.keybits) - 1;
106 
107 	if (c1_end < c2_beg)	/* fully to the left */
108 		return(-1);
109 	if (c1_beg > c2_end)	/* fully to the right */
110 		return(1);
111 	return(0);		/* overlap (must not cross edge boundary) */
112 }
113 
114 static __inline
115 int
116 hammer2_isclusterable(hammer2_chain_t *chain)
117 {
118 	if (hammer2_cluster_enable) {
119 		if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
120 		    chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
121 		    chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
122 			return(1);
123 		}
124 	}
125 	return(0);
126 }
127 
128 /*
129  * Make a chain visible to the flusher.  The flusher needs to be able to
130  * do flushes of a subdirectory chains or single files so it does a top-down
131  * recursion using the ONFLUSH flag for the recursion.  It locates MODIFIED
132  * or UPDATE chains and flushes back up the chain to the root.
133  */
134 void
135 hammer2_chain_setflush(hammer2_trans_t *trans, hammer2_chain_t *chain)
136 {
137 	hammer2_chain_t *parent;
138 
139 	if ((chain->flags & HAMMER2_CHAIN_ONFLUSH) == 0) {
140 		hammer2_spin_sh(&chain->core.spin);
141 		while ((chain->flags & HAMMER2_CHAIN_ONFLUSH) == 0) {
142 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
143 			if ((parent = chain->parent) == NULL)
144 				break;
145 			hammer2_spin_sh(&parent->core.spin);
146 			hammer2_spin_unsh(&chain->core.spin);
147 			chain = parent;
148 		}
149 		hammer2_spin_unsh(&chain->core.spin);
150 	}
151 }
152 
153 /*
154  * Allocate a new disconnected chain element representing the specified
155  * bref.  chain->refs is set to 1 and the passed bref is copied to
156  * chain->bref.  chain->bytes is derived from the bref.
157  *
158  * chain->pmp inherits pmp unless the chain is an inode (other than the
159  * super-root inode).
160  *
161  * NOTE: Returns a referenced but unlocked (because there is no core) chain.
162  */
163 hammer2_chain_t *
164 hammer2_chain_alloc(hammer2_dev_t *hmp, hammer2_pfs_t *pmp,
165 		    hammer2_trans_t *trans, hammer2_blockref_t *bref)
166 {
167 	hammer2_chain_t *chain;
168 	u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
169 
170 	/*
171 	 * Construct the appropriate system structure.
172 	 */
173 	switch(bref->type) {
174 	case HAMMER2_BREF_TYPE_INODE:
175 	case HAMMER2_BREF_TYPE_INDIRECT:
176 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
177 	case HAMMER2_BREF_TYPE_DATA:
178 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
179 		/*
180 		 * Chain's are really only associated with the hmp but we
181 		 * maintain a pmp association for per-mount memory tracking
182 		 * purposes.  The pmp can be NULL.
183 		 */
184 		chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
185 		break;
186 	case HAMMER2_BREF_TYPE_VOLUME:
187 	case HAMMER2_BREF_TYPE_FREEMAP:
188 		chain = NULL;
189 		panic("hammer2_chain_alloc volume type illegal for op");
190 	default:
191 		chain = NULL;
192 		panic("hammer2_chain_alloc: unrecognized blockref type: %d",
193 		      bref->type);
194 	}
195 
196 	/*
197 	 * Initialize the new chain structure.  pmp must be set to NULL for
198 	 * chains belonging to the super-root topology of a device mount.
199 	 */
200 	if (pmp == hmp->spmp)
201 		chain->pmp = NULL;
202 	else
203 		chain->pmp = pmp;
204 	chain->hmp = hmp;
205 	chain->bref = *bref;
206 	chain->bytes = bytes;
207 	chain->refs = 1;
208 	chain->flags = HAMMER2_CHAIN_ALLOCATED;
209 
210 	/*
211 	 * Set the PFS boundary flag if this chain represents a PFS root.
212 	 */
213 	if (bref->flags & HAMMER2_BREF_FLAG_PFSROOT)
214 		chain->flags |= HAMMER2_CHAIN_PFSBOUNDARY;
215 	hammer2_chain_core_init(chain);
216 
217 	return (chain);
218 }
219 
220 /*
221  * Initialize a chain's core structure.  This structure used to be allocated
222  * but is now embedded.
223  *
224  * The core is not locked.  No additional refs on the chain are made.
225  * (trans) must not be NULL if (core) is not NULL.
226  */
227 void
228 hammer2_chain_core_init(hammer2_chain_t *chain)
229 {
230 	hammer2_chain_core_t *core = &chain->core;
231 
232 	/*
233 	 * Fresh core under nchain (no multi-homing of ochain's
234 	 * sub-tree).
235 	 */
236 	RB_INIT(&core->rbtree);	/* live chains */
237 	hammer2_mtx_init(&core->lock, "h2chain");
238 }
239 
240 /*
241  * Add a reference to a chain element, preventing its destruction.
242  *
243  * (can be called with spinlock held)
244  */
245 void
246 hammer2_chain_ref(hammer2_chain_t *chain)
247 {
248 	atomic_add_int(&chain->refs, 1);
249 }
250 
251 /*
252  * Insert the chain in the core rbtree.
253  *
254  * Normal insertions are placed in the live rbtree.  Insertion of a deleted
255  * chain is a special case used by the flush code that is placed on the
256  * unstaged deleted list to avoid confusing the live view.
257  */
258 #define HAMMER2_CHAIN_INSERT_SPIN	0x0001
259 #define HAMMER2_CHAIN_INSERT_LIVE	0x0002
260 #define HAMMER2_CHAIN_INSERT_RACE	0x0004
261 
262 static
263 int
264 hammer2_chain_insert(hammer2_chain_t *parent, hammer2_chain_t *chain,
265 		     int flags, int generation)
266 {
267 	hammer2_chain_t *xchain;
268 	int error = 0;
269 
270 	if (flags & HAMMER2_CHAIN_INSERT_SPIN)
271 		hammer2_spin_ex(&parent->core.spin);
272 
273 	/*
274 	 * Interlocked by spinlock, check for race
275 	 */
276 	if ((flags & HAMMER2_CHAIN_INSERT_RACE) &&
277 	    parent->core.generation != generation) {
278 		error = EAGAIN;
279 		goto failed;
280 	}
281 
282 	/*
283 	 * Insert chain
284 	 */
285 	xchain = RB_INSERT(hammer2_chain_tree, &parent->core.rbtree, chain);
286 	KASSERT(xchain == NULL,
287 		("hammer2_chain_insert: collision %p %p", chain, xchain));
288 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
289 	chain->parent = parent;
290 	++parent->core.chain_count;
291 	++parent->core.generation;	/* XXX incs for _get() too, XXX */
292 
293 	/*
294 	 * We have to keep track of the effective live-view blockref count
295 	 * so the create code knows when to push an indirect block.
296 	 */
297 	if (flags & HAMMER2_CHAIN_INSERT_LIVE)
298 		atomic_add_int(&parent->core.live_count, 1);
299 failed:
300 	if (flags & HAMMER2_CHAIN_INSERT_SPIN)
301 		hammer2_spin_unex(&parent->core.spin);
302 	return error;
303 }
304 
305 /*
306  * Drop the caller's reference to the chain.  When the ref count drops to
307  * zero this function will try to disassociate the chain from its parent and
308  * deallocate it, then recursely drop the parent using the implied ref
309  * from the chain's chain->parent.
310  */
311 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
312 
313 void
314 hammer2_chain_drop(hammer2_chain_t *chain)
315 {
316 	u_int refs;
317 	u_int need = 0;
318 
319 	if (hammer2_debug & 0x200000)
320 		Debugger("drop");
321 
322 	if (chain->flags & HAMMER2_CHAIN_UPDATE)
323 		++need;
324 	if (chain->flags & HAMMER2_CHAIN_MODIFIED)
325 		++need;
326 	KKASSERT(chain->refs > need);
327 
328 	while (chain) {
329 		refs = chain->refs;
330 		cpu_ccfence();
331 		KKASSERT(refs > 0);
332 
333 		if (refs == 1) {
334 			chain = hammer2_chain_lastdrop(chain);
335 		} else {
336 			if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
337 				break;
338 			/* retry the same chain */
339 		}
340 	}
341 }
342 
343 /*
344  * Safe handling of the 1->0 transition on chain.  Returns a chain for
345  * recursive drop or NULL, possibly returning the same chain if the atomic
346  * op fails.
347  *
348  * Whem two chains need to be recursively dropped we use the chain
349  * we would otherwise free to placehold the additional chain.  It's a bit
350  * convoluted but we can't just recurse without potentially blowing out
351  * the kernel stack.
352  *
353  * The chain cannot be freed if it has any children.
354  *
355  * The core spinlock is allowed nest child-to-parent (not parent-to-child).
356  */
357 static
358 hammer2_chain_t *
359 hammer2_chain_lastdrop(hammer2_chain_t *chain)
360 {
361 	hammer2_pfs_t *pmp;
362 	hammer2_dev_t *hmp;
363 	hammer2_chain_t *parent;
364 	hammer2_chain_t *rdrop;
365 
366 	/*
367 	 * Spinlock the core and check to see if it is empty.  If it is
368 	 * not empty we leave chain intact with refs == 0.  The elements
369 	 * in core->rbtree are associated with other chains contemporary
370 	 * with ours but not with our chain directly.
371 	 */
372 	hammer2_spin_ex(&chain->core.spin);
373 
374 	/*
375 	 * We can't free non-stale chains with children until we are
376 	 * able to free the children because there might be a flush
377 	 * dependency.  Flushes of stale children (which should also
378 	 * have their deleted flag set) short-cut recursive flush
379 	 * dependencies and can be freed here.  Any flushes which run
380 	 * through stale children due to the flush synchronization
381 	 * point should have a FLUSH_* bit set in the chain and not
382 	 * reach lastdrop at this time.
383 	 *
384 	 * NOTE: We return (chain) on failure to retry.
385 	 */
386 	if (chain->core.chain_count) {
387 		if (atomic_cmpset_int(&chain->refs, 1, 0)) {
388 			hammer2_spin_unex(&chain->core.spin);
389 			chain = NULL;	/* success */
390 		} else {
391 			hammer2_spin_unex(&chain->core.spin);
392 		}
393 		return(chain);
394 	}
395 	/* no chains left under us */
396 
397 	/*
398 	 * chain->core has no children left so no accessors can get to our
399 	 * chain from there.  Now we have to lock the parent core to interlock
400 	 * remaining possible accessors that might bump chain's refs before
401 	 * we can safely drop chain's refs with intent to free the chain.
402 	 */
403 	hmp = chain->hmp;
404 	pmp = chain->pmp;	/* can be NULL */
405 	rdrop = NULL;
406 
407 	/*
408 	 * Spinlock the parent and try to drop the last ref on chain.
409 	 * On success remove chain from its parent, otherwise return NULL.
410 	 *
411 	 * (normal core locks are top-down recursive but we define core
412 	 *  spinlocks as bottom-up recursive, so this is safe).
413 	 */
414 	if ((parent = chain->parent) != NULL) {
415 		hammer2_spin_ex(&parent->core.spin);
416 		if (atomic_cmpset_int(&chain->refs, 1, 0) == 0) {
417 			/* 1->0 transition failed */
418 			hammer2_spin_unex(&parent->core.spin);
419 			hammer2_spin_unex(&chain->core.spin);
420 			return(chain);	/* retry */
421 		}
422 
423 		/*
424 		 * 1->0 transition successful, remove chain from its
425 		 * above core.
426 		 */
427 		if (chain->flags & HAMMER2_CHAIN_ONRBTREE) {
428 			RB_REMOVE(hammer2_chain_tree,
429 				  &parent->core.rbtree, chain);
430 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
431 			--parent->core.chain_count;
432 			chain->parent = NULL;
433 		}
434 
435 		/*
436 		 * If our chain was the last chain in the parent's core the
437 		 * core is now empty and its parent might have to be
438 		 * re-dropped if it has 0 refs.
439 		 */
440 		if (parent->core.chain_count == 0) {
441 			rdrop = parent;
442 			if (atomic_cmpset_int(&rdrop->refs, 0, 1) == 0) {
443 				rdrop = NULL;
444 			}
445 		}
446 		hammer2_spin_unex(&parent->core.spin);
447 		parent = NULL;	/* safety */
448 	}
449 
450 	/*
451 	 * Successful 1->0 transition and the chain can be destroyed now.
452 	 *
453 	 * We still have the core spinlock, and core's chain_count is 0.
454 	 * Any parent spinlock is gone.
455 	 */
456 	hammer2_spin_unex(&chain->core.spin);
457 	KKASSERT(RB_EMPTY(&chain->core.rbtree) &&
458 		 chain->core.chain_count == 0);
459 
460 	/*
461 	 * All spin locks are gone, finish freeing stuff.
462 	 */
463 	KKASSERT((chain->flags & (HAMMER2_CHAIN_UPDATE |
464 				  HAMMER2_CHAIN_MODIFIED)) == 0);
465 	hammer2_chain_drop_data(chain, 1);
466 
467 	KKASSERT(chain->dio == NULL);
468 
469 	/*
470 	 * Once chain resources are gone we can use the now dead chain
471 	 * structure to placehold what might otherwise require a recursive
472 	 * drop, because we have potentially two things to drop and can only
473 	 * return one directly.
474 	 */
475 	if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
476 		chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
477 		chain->hmp = NULL;
478 		kfree(chain, hmp->mchain);
479 	}
480 
481 	/*
482 	 * Possible chaining loop when parent re-drop needed.
483 	 */
484 	return(rdrop);
485 }
486 
487 /*
488  * On either last lock release or last drop
489  */
490 static void
491 hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop)
492 {
493 	/*hammer2_dev_t *hmp = chain->hmp;*/
494 
495 	switch(chain->bref.type) {
496 	case HAMMER2_BREF_TYPE_VOLUME:
497 	case HAMMER2_BREF_TYPE_FREEMAP:
498 		if (lastdrop)
499 			chain->data = NULL;
500 		break;
501 	default:
502 		KKASSERT(chain->data == NULL);
503 		break;
504 	}
505 }
506 
507 /*
508  * Lock a referenced chain element, acquiring its data with I/O if necessary,
509  * and specify how you would like the data to be resolved.
510  *
511  * If an I/O or other fatal error occurs, chain->error will be set to non-zero.
512  *
513  * The lock is allowed to recurse, multiple locking ops will aggregate
514  * the requested resolve types.  Once data is assigned it will not be
515  * removed until the last unlock.
516  *
517  * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
518  *			   (typically used to avoid device/logical buffer
519  *			    aliasing for data)
520  *
521  * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
522  *			   the INITIAL-create state (indirect blocks only).
523  *
524  *			   Do not resolve data elements for DATA chains.
525  *			   (typically used to avoid device/logical buffer
526  *			    aliasing for data)
527  *
528  * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
529  *
530  * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
531  *			   it will be locked exclusive.
532  *
533  * NOTE: Embedded elements (volume header, inodes) are always resolved
534  *	 regardless.
535  *
536  * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
537  *	 element will instantiate and zero its buffer, and flush it on
538  *	 release.
539  *
540  * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
541  *	 so as not to instantiate a device buffer, which could alias against
542  *	 a logical file buffer.  However, if ALWAYS is specified the
543  *	 device buffer will be instantiated anyway.
544  *
545  * WARNING! If data must be fetched a shared lock will temporarily be
546  *	    upgraded to exclusive.  However, a deadlock can occur if
547  *	    the caller owns more than one shared lock.
548  */
549 void
550 hammer2_chain_lock(hammer2_chain_t *chain, int how)
551 {
552 	hammer2_dev_t *hmp;
553 	hammer2_blockref_t *bref;
554 	hammer2_mtx_state_t ostate;
555 	char *bdata;
556 	int error;
557 
558 	/*
559 	 * Ref and lock the element.  Recursive locks are allowed.
560 	 */
561 	KKASSERT(chain->refs > 0);
562 	atomic_add_int(&chain->lockcnt, 1);
563 
564 	hmp = chain->hmp;
565 	KKASSERT(hmp != NULL);
566 
567 	/*
568 	 * Get the appropriate lock.
569 	 */
570 	if (how & HAMMER2_RESOLVE_SHARED)
571 		hammer2_mtx_sh(&chain->core.lock);
572 	else
573 		hammer2_mtx_ex(&chain->core.lock);
574 
575 	/*
576 	 * If we already have a valid data pointer no further action is
577 	 * necessary.
578 	 */
579 	if (chain->data)
580 		return;
581 
582 	/*
583 	 * Do we have to resolve the data?
584 	 */
585 	switch(how & HAMMER2_RESOLVE_MASK) {
586 	case HAMMER2_RESOLVE_NEVER:
587 		return;
588 	case HAMMER2_RESOLVE_MAYBE:
589 		if (chain->flags & HAMMER2_CHAIN_INITIAL)
590 			return;
591 		if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
592 			return;
593 #if 0
594 		if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE)
595 			return;
596 		if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
597 			return;
598 #endif
599 		/* fall through */
600 	case HAMMER2_RESOLVE_ALWAYS:
601 		break;
602 	}
603 
604 	/*
605 	 * Upgrade to an exclusive lock so we can safely manipulate the
606 	 * buffer cache.  If another thread got to it before us we
607 	 * can just return.
608 	 */
609 	ostate = hammer2_mtx_upgrade(&chain->core.lock);
610 	if (chain->data) {
611 		hammer2_mtx_downgrade(&chain->core.lock, ostate);
612 		return;
613 	}
614 
615 	/*
616 	 * We must resolve to a device buffer, either by issuing I/O or
617 	 * by creating a zero-fill element.  We do not mark the buffer
618 	 * dirty when creating a zero-fill element (the hammer2_chain_modify()
619 	 * API must still be used to do that).
620 	 *
621 	 * The device buffer is variable-sized in powers of 2 down
622 	 * to HAMMER2_MIN_ALLOC (typically 1K).  A 64K physical storage
623 	 * chunk always contains buffers of the same size. (XXX)
624 	 *
625 	 * The minimum physical IO size may be larger than the variable
626 	 * block size.
627 	 */
628 	bref = &chain->bref;
629 
630 	/*
631 	 * The getblk() optimization can only be used on newly created
632 	 * elements if the physical block size matches the request.
633 	 */
634 	if (chain->flags & HAMMER2_CHAIN_INITIAL) {
635 		error = hammer2_io_new(hmp, bref->data_off, chain->bytes,
636 					&chain->dio);
637 	} else {
638 		error = hammer2_io_bread(hmp, bref->data_off, chain->bytes,
639 					 &chain->dio);
640 		hammer2_adjreadcounter(&chain->bref, chain->bytes);
641 	}
642 	if (error) {
643 		chain->error = HAMMER2_ERROR_IO;
644 		kprintf("hammer2_chain_lock: I/O error %016jx: %d\n",
645 			(intmax_t)bref->data_off, error);
646 		hammer2_io_bqrelse(&chain->dio);
647 		hammer2_mtx_downgrade(&chain->core.lock, ostate);
648 		return;
649 	}
650 	chain->error = 0;
651 
652 	/*
653 	 * NOTE: A locked chain's data cannot be modified without first
654 	 *	 calling hammer2_chain_modify().
655 	 */
656 
657 	/*
658 	 * Clear INITIAL.  In this case we used io_new() and the buffer has
659 	 * been zero'd and marked dirty.
660 	 */
661 	bdata = hammer2_io_data(chain->dio, chain->bref.data_off);
662 	if (chain->flags & HAMMER2_CHAIN_INITIAL) {
663 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
664 		chain->bref.flags |= HAMMER2_BREF_FLAG_ZERO;
665 	} else if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
666 		/*
667 		 * check data not currently synchronized due to
668 		 * modification.  XXX assumes data stays in the buffer
669 		 * cache, which might not be true (need biodep on flush
670 		 * to calculate crc?  or simple crc?).
671 		 */
672 	} else {
673 		if (hammer2_chain_testcheck(chain, bdata) == 0) {
674 			kprintf("chain %016jx.%02x meth=%02x "
675 				"CHECK FAIL %08x (flags=%08x)\n",
676 				chain->bref.data_off,
677 				chain->bref.type,
678 				chain->bref.methods,
679 				hammer2_icrc32(bdata, chain->bytes),
680 				chain->flags);
681 			chain->error = HAMMER2_ERROR_CHECK;
682 		}
683 	}
684 
685 	/*
686 	 * Setup the data pointer, either pointing it to an embedded data
687 	 * structure and copying the data from the buffer, or pointing it
688 	 * into the buffer.
689 	 *
690 	 * The buffer is not retained when copying to an embedded data
691 	 * structure in order to avoid potential deadlocks or recursions
692 	 * on the same physical buffer.
693 	 */
694 	switch (bref->type) {
695 	case HAMMER2_BREF_TYPE_VOLUME:
696 	case HAMMER2_BREF_TYPE_FREEMAP:
697 		/*
698 		 * Copy data from bp to embedded buffer
699 		 */
700 		panic("hammer2_chain_lock: called on unresolved volume header");
701 		break;
702 	case HAMMER2_BREF_TYPE_INODE:
703 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
704 	case HAMMER2_BREF_TYPE_INDIRECT:
705 	case HAMMER2_BREF_TYPE_DATA:
706 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
707 	default:
708 		/*
709 		 * Point data at the device buffer and leave dio intact.
710 		 */
711 		chain->data = (void *)bdata;
712 		break;
713 	}
714 	hammer2_mtx_downgrade(&chain->core.lock, ostate);
715 }
716 
717 /*
718  * Unlock and deref a chain element.
719  *
720  * On the last lock release any non-embedded data (chain->dio) will be
721  * retired.
722  */
723 void
724 hammer2_chain_unlock(hammer2_chain_t *chain)
725 {
726 	hammer2_mtx_state_t ostate;
727 	long *counterp;
728 	u_int lockcnt;
729 
730 	/*
731 	 * If multiple locks are present (or being attempted) on this
732 	 * particular chain we can just unlock, drop refs, and return.
733 	 *
734 	 * Otherwise fall-through on the 1->0 transition.
735 	 */
736 	for (;;) {
737 		lockcnt = chain->lockcnt;
738 		KKASSERT(lockcnt > 0);
739 		cpu_ccfence();
740 		if (lockcnt > 1) {
741 			if (atomic_cmpset_int(&chain->lockcnt,
742 					      lockcnt, lockcnt - 1)) {
743 				hammer2_mtx_unlock(&chain->core.lock);
744 				return;
745 			}
746 		} else {
747 			if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
748 				break;
749 		}
750 		/* retry */
751 	}
752 
753 	/*
754 	 * On the 1->0 transition we upgrade the core lock (if necessary)
755 	 * to exclusive for terminal processing.  If after upgrading we find
756 	 * that lockcnt is non-zero, another thread is racing us and will
757 	 * handle the unload for us later on, so just cleanup and return
758 	 * leaving the data/io intact
759 	 *
760 	 * Otherwise if lockcnt is still 0 it is possible for it to become
761 	 * non-zero and race, but since we hold the core->lock exclusively
762 	 * all that will happen is that the chain will be reloaded after we
763 	 * unload it.
764 	 */
765 	ostate = hammer2_mtx_upgrade(&chain->core.lock);
766 	if (chain->lockcnt) {
767 		hammer2_mtx_unlock(&chain->core.lock);
768 		return;
769 	}
770 
771 	/*
772 	 * Shortcut the case if the data is embedded or not resolved.
773 	 *
774 	 * Do NOT NULL out chain->data (e.g. inode data), it might be
775 	 * dirty.
776 	 */
777 	if (chain->dio == NULL) {
778 		if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0)
779 			hammer2_chain_drop_data(chain, 0);
780 		hammer2_mtx_unlock(&chain->core.lock);
781 		return;
782 	}
783 
784 	/*
785 	 * Statistics
786 	 */
787 	if (hammer2_io_isdirty(chain->dio) == 0) {
788 		;
789 	} else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
790 		switch(chain->bref.type) {
791 		case HAMMER2_BREF_TYPE_DATA:
792 			counterp = &hammer2_ioa_file_write;
793 			break;
794 		case HAMMER2_BREF_TYPE_INODE:
795 			counterp = &hammer2_ioa_meta_write;
796 			break;
797 		case HAMMER2_BREF_TYPE_INDIRECT:
798 			counterp = &hammer2_ioa_indr_write;
799 			break;
800 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
801 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
802 			counterp = &hammer2_ioa_fmap_write;
803 			break;
804 		default:
805 			counterp = &hammer2_ioa_volu_write;
806 			break;
807 		}
808 		*counterp += chain->bytes;
809 	} else {
810 		switch(chain->bref.type) {
811 		case HAMMER2_BREF_TYPE_DATA:
812 			counterp = &hammer2_iod_file_write;
813 			break;
814 		case HAMMER2_BREF_TYPE_INODE:
815 			counterp = &hammer2_iod_meta_write;
816 			break;
817 		case HAMMER2_BREF_TYPE_INDIRECT:
818 			counterp = &hammer2_iod_indr_write;
819 			break;
820 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
821 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
822 			counterp = &hammer2_iod_fmap_write;
823 			break;
824 		default:
825 			counterp = &hammer2_iod_volu_write;
826 			break;
827 		}
828 		*counterp += chain->bytes;
829 	}
830 
831 	/*
832 	 * Clean out the dio.
833 	 *
834 	 * If a device buffer was used for data be sure to destroy the
835 	 * buffer when we are done to avoid aliases (XXX what about the
836 	 * underlying VM pages?).
837 	 *
838 	 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
839 	 *	 is possible.
840 	 *
841 	 * NOTE: The isdirty check tracks whether we have to bdwrite() the
842 	 *	 buffer or not.  The buffer might already be dirty.  The
843 	 *	 flag is re-set when chain_modify() is called, even if
844 	 *	 MODIFIED is already set, allowing the OS to retire the
845 	 *	 buffer independent of a hammer2 flush.
846 	 */
847 	chain->data = NULL;
848 	if ((chain->flags & HAMMER2_CHAIN_IOFLUSH) &&
849 	    hammer2_io_isdirty(chain->dio)) {
850 		hammer2_io_bawrite(&chain->dio);
851 	} else {
852 		hammer2_io_bqrelse(&chain->dio);
853 	}
854 	hammer2_mtx_unlock(&chain->core.lock);
855 }
856 
857 /*
858  * This counts the number of live blockrefs in a block array and
859  * also calculates the point at which all remaining blockrefs are empty.
860  * This routine can only be called on a live chain (DUPLICATED flag not set).
861  *
862  * NOTE: Flag is not set until after the count is complete, allowing
863  *	 callers to test the flag without holding the spinlock.
864  *
865  * NOTE: If base is NULL the related chain is still in the INITIAL
866  *	 state and there are no blockrefs to count.
867  *
868  * NOTE: live_count may already have some counts accumulated due to
869  *	 creation and deletion and could even be initially negative.
870  */
871 void
872 hammer2_chain_countbrefs(hammer2_chain_t *chain,
873 			 hammer2_blockref_t *base, int count)
874 {
875 	hammer2_spin_ex(&chain->core.spin);
876         if ((chain->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0) {
877 		if (base) {
878 			while (--count >= 0) {
879 				if (base[count].type)
880 					break;
881 			}
882 			chain->core.live_zero = count + 1;
883 			while (count >= 0) {
884 				if (base[count].type)
885 					atomic_add_int(&chain->core.live_count,
886 						       1);
887 				--count;
888 			}
889 		} else {
890 			chain->core.live_zero = 0;
891 		}
892 		/* else do not modify live_count */
893 		atomic_set_int(&chain->core.flags, HAMMER2_CORE_COUNTEDBREFS);
894 	}
895 	hammer2_spin_unex(&chain->core.spin);
896 }
897 
898 /*
899  * Resize the chain's physical storage allocation in-place.  This function does
900  * not adjust the data pointer and must be followed by (typically) a
901  * hammer2_chain_modify() call to copy any old data over and adjust the
902  * data pointer.
903  *
904  * Chains can be resized smaller without reallocating the storage.  Resizing
905  * larger will reallocate the storage.  Excess or prior storage is reclaimed
906  * asynchronously at a later time.
907  *
908  * Must be passed an exclusively locked parent and chain.
909  *
910  * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
911  * to avoid instantiating a device buffer that conflicts with the vnode data
912  * buffer.  However, because H2 can compress or encrypt data, the chain may
913  * have a dio assigned to it in those situations, and they do not conflict.
914  *
915  * XXX return error if cannot resize.
916  */
917 void
918 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
919 		     hammer2_chain_t *parent, hammer2_chain_t *chain,
920 		     int nradix, int flags)
921 {
922 	hammer2_dev_t *hmp;
923 	size_t obytes;
924 	size_t nbytes;
925 
926 	hmp = chain->hmp;
927 
928 	/*
929 	 * Only data and indirect blocks can be resized for now.
930 	 * (The volu root, inodes, and freemap elements use a fixed size).
931 	 */
932 	KKASSERT(chain != &hmp->vchain);
933 	KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
934 		 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
935 
936 	/*
937 	 * Nothing to do if the element is already the proper size
938 	 */
939 	obytes = chain->bytes;
940 	nbytes = 1U << nradix;
941 	if (obytes == nbytes)
942 		return;
943 	chain->data_count += (ssize_t)(nbytes - obytes);
944 
945 	/*
946 	 * Make sure the old data is instantiated so we can copy it.  If this
947 	 * is a data block, the device data may be superfluous since the data
948 	 * might be in a logical block, but compressed or encrypted data is
949 	 * another matter.
950 	 *
951 	 * NOTE: The modify will set BMAPUPD for us if BMAPPED is set.
952 	 */
953 	hammer2_chain_modify(trans, chain, 0);
954 
955 	/*
956 	 * Relocate the block, even if making it smaller (because different
957 	 * block sizes may be in different regions).
958 	 *
959 	 * (data blocks only, we aren't copying the storage here).
960 	 */
961 	hammer2_freemap_alloc(trans, chain, nbytes);
962 	chain->bytes = nbytes;
963 	/*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
964 
965 	/*
966 	 * We don't want the followup chain_modify() to try to copy data
967 	 * from the old (wrong-sized) buffer.  It won't know how much to
968 	 * copy.  This case should only occur during writes when the
969 	 * originator already has the data to write in-hand.
970 	 */
971 	if (chain->dio) {
972 		KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA);
973 		hammer2_io_brelse(&chain->dio);
974 		chain->data = NULL;
975 	}
976 }
977 
978 void
979 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t *chain, int flags)
980 {
981 	hammer2_blockref_t obref;
982 	hammer2_dev_t *hmp;
983 	hammer2_io_t *dio;
984 	int error;
985 	int wasinitial;
986 	int newmod;
987 	char *bdata;
988 
989 	hmp = chain->hmp;
990 	obref = chain->bref;
991 	KKASSERT((chain->flags & HAMMER2_CHAIN_FICTITIOUS) == 0);
992 
993 	/*
994 	 * Data is not optional for freemap chains (we must always be sure
995 	 * to copy the data on COW storage allocations).
996 	 */
997 	if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
998 	    chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
999 		KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) ||
1000 			 (flags & HAMMER2_MODIFY_OPTDATA) == 0);
1001 	}
1002 
1003 	/*
1004 	 * Data must be resolved if already assigned unless explicitly
1005 	 * flagged otherwise.
1006 	 */
1007 	if (chain->data == NULL && (flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1008 	    (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) {
1009 		hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1010 		hammer2_chain_unlock(chain);
1011 	}
1012 
1013 	/*
1014 	 * Otherwise do initial-chain handling.  Set MODIFIED to indicate
1015 	 * that the chain has been modified.  Set UPDATE to ensure that
1016 	 * the blockref is updated in the parent.
1017 	 */
1018 	if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1019 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1020 		hammer2_chain_ref(chain);
1021 		hammer2_pfs_memory_inc(chain->pmp);	/* can be NULL */
1022 		newmod = 1;
1023 	} else {
1024 		newmod = 0;
1025 	}
1026 	if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) {
1027 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1028 		hammer2_chain_ref(chain);
1029 	}
1030 
1031 	/*
1032 	 * The modification or re-modification requires an allocation and
1033 	 * possible COW.
1034 	 *
1035 	 * We normally always allocate new storage here.  If storage exists
1036 	 * and MODIFY_NOREALLOC is passed in, we do not allocate new storage.
1037 	 */
1038 	if (chain != &hmp->vchain && chain != &hmp->fchain) {
1039 		if ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0 ||
1040 		     ((flags & HAMMER2_MODIFY_NOREALLOC) == 0 && newmod)
1041 		) {
1042 			hammer2_freemap_alloc(trans, chain, chain->bytes);
1043 			/* XXX failed allocation */
1044 		}
1045 	}
1046 
1047 	/*
1048 	 * Update mirror_tid and modify_tid.
1049 	 *
1050 	 * NOTE: modify_tid updates can be suppressed with a flag.  This is
1051 	 *	 used by the slave synchronization code to delay updating
1052 	 *	 modify_tid in higher-level objects until lower-level objects
1053 	 *	 have been synchronized.
1054 	 *
1055 	 * NOTE: chain->pmp could be the device spmp.
1056 	 */
1057 	chain->bref.mirror_tid = hmp->voldata.mirror_tid + 1;
1058 	if (chain->pmp && (trans->flags & HAMMER2_TRANS_KEEPMODIFY) == 0)
1059 		chain->bref.modify_tid = chain->pmp->modify_tid + 1;
1060 
1061 	/*
1062 	 * Set BMAPUPD to tell the flush code that an existing blockmap entry
1063 	 * requires updating as well as to tell the delete code that the
1064 	 * chain's blockref might not exactly match (in terms of physical size
1065 	 * or block offset) the one in the parent's blocktable.  The base key
1066 	 * of course will still match.
1067 	 */
1068 	if (chain->flags & HAMMER2_CHAIN_BMAPPED)
1069 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_BMAPUPD);
1070 
1071 	/*
1072 	 * Short-cut data blocks which the caller does not need an actual
1073 	 * data reference to (aka OPTDATA), as long as the chain does not
1074 	 * already have a data pointer to the data.  This generally means
1075 	 * that the modifications are being done via the logical buffer cache.
1076 	 * The INITIAL flag relates only to the device data buffer and thus
1077 	 * remains unchange in this situation.
1078 	 */
1079 	if (chain->bref.type == HAMMER2_BREF_TYPE_DATA &&
1080 	    (flags & HAMMER2_MODIFY_OPTDATA) &&
1081 	    chain->data == NULL) {
1082 		goto skip2;
1083 	}
1084 
1085 	/*
1086 	 * Clearing the INITIAL flag (for indirect blocks) indicates that
1087 	 * we've processed the uninitialized storage allocation.
1088 	 *
1089 	 * If this flag is already clear we are likely in a copy-on-write
1090 	 * situation but we have to be sure NOT to bzero the storage if
1091 	 * no data is present.
1092 	 */
1093 	if (chain->flags & HAMMER2_CHAIN_INITIAL) {
1094 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1095 		wasinitial = 1;
1096 	} else {
1097 		wasinitial = 0;
1098 	}
1099 
1100 	/*
1101 	 * Instantiate data buffer and possibly execute COW operation
1102 	 */
1103 	switch(chain->bref.type) {
1104 	case HAMMER2_BREF_TYPE_VOLUME:
1105 	case HAMMER2_BREF_TYPE_FREEMAP:
1106 		/*
1107 		 * The data is embedded, no copy-on-write operation is
1108 		 * needed.
1109 		 */
1110 		KKASSERT(chain->dio == NULL);
1111 		break;
1112 	case HAMMER2_BREF_TYPE_INODE:
1113 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1114 	case HAMMER2_BREF_TYPE_DATA:
1115 	case HAMMER2_BREF_TYPE_INDIRECT:
1116 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1117 		/*
1118 		 * Perform the copy-on-write operation
1119 		 *
1120 		 * zero-fill or copy-on-write depending on whether
1121 		 * chain->data exists or not and set the dirty state for
1122 		 * the new buffer.  hammer2_io_new() will handle the
1123 		 * zero-fill.
1124 		 */
1125 		KKASSERT(chain != &hmp->vchain && chain != &hmp->fchain);
1126 
1127 		if (wasinitial) {
1128 			error = hammer2_io_new(hmp, chain->bref.data_off,
1129 					       chain->bytes, &dio);
1130 		} else {
1131 			error = hammer2_io_bread(hmp, chain->bref.data_off,
1132 						 chain->bytes, &dio);
1133 		}
1134 		hammer2_adjreadcounter(&chain->bref, chain->bytes);
1135 
1136 		/*
1137 		 * If an I/O error occurs make sure callers cannot accidently
1138 		 * modify the old buffer's contents and corrupt the filesystem.
1139 		 */
1140 		if (error) {
1141 			kprintf("hammer2_chain_modify: hmp=%p I/O error\n",
1142 				hmp);
1143 			chain->error = HAMMER2_ERROR_IO;
1144 			hammer2_io_brelse(&dio);
1145 			hammer2_io_brelse(&chain->dio);
1146 			chain->data = NULL;
1147 			break;
1148 		}
1149 		chain->error = 0;
1150 		bdata = hammer2_io_data(dio, chain->bref.data_off);
1151 
1152 		if (chain->data) {
1153 			KKASSERT(chain->dio != NULL);
1154 			if (chain->data != (void *)bdata) {
1155 				bcopy(chain->data, bdata, chain->bytes);
1156 			}
1157 		} else if (wasinitial == 0) {
1158 			/*
1159 			 * We have a problem.  We were asked to COW but
1160 			 * we don't have any data to COW with!
1161 			 */
1162 			panic("hammer2_chain_modify: having a COW %p\n",
1163 			      chain);
1164 		}
1165 
1166 		/*
1167 		 * Retire the old buffer, replace with the new.  Dirty or
1168 		 * redirty the new buffer.
1169 		 *
1170 		 * WARNING! The system buffer cache may have already flushed
1171 		 *	    the buffer, so we must be sure to [re]dirty it
1172 		 *	    for further modification.
1173 		 */
1174 		if (chain->dio)
1175 			hammer2_io_brelse(&chain->dio);
1176 		chain->data = (void *)bdata;
1177 		chain->dio = dio;
1178 		hammer2_io_setdirty(dio);	/* modified by bcopy above */
1179 		break;
1180 	default:
1181 		panic("hammer2_chain_modify: illegal non-embedded type %d",
1182 		      chain->bref.type);
1183 		break;
1184 
1185 	}
1186 skip2:
1187 	/*
1188 	 * setflush on parent indicating that the parent must recurse down
1189 	 * to us.  Do not call on chain itself which might already have it
1190 	 * set.
1191 	 */
1192 	if (chain->parent)
1193 		hammer2_chain_setflush(trans, chain->parent);
1194 }
1195 
1196 /*
1197  * Volume header data locks
1198  */
1199 void
1200 hammer2_voldata_lock(hammer2_dev_t *hmp)
1201 {
1202 	lockmgr(&hmp->vollk, LK_EXCLUSIVE);
1203 }
1204 
1205 void
1206 hammer2_voldata_unlock(hammer2_dev_t *hmp)
1207 {
1208 	lockmgr(&hmp->vollk, LK_RELEASE);
1209 }
1210 
1211 void
1212 hammer2_voldata_modify(hammer2_dev_t *hmp)
1213 {
1214 	if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1215 		atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
1216 		hammer2_chain_ref(&hmp->vchain);
1217 		hammer2_pfs_memory_inc(hmp->vchain.pmp);
1218 	}
1219 }
1220 
1221 /*
1222  * This function returns the chain at the nearest key within the specified
1223  * range.  The returned chain will be referenced but not locked.
1224  *
1225  * This function will recurse through chain->rbtree as necessary and will
1226  * return a *key_nextp suitable for iteration.  *key_nextp is only set if
1227  * the iteration value is less than the current value of *key_nextp.
1228  *
1229  * The caller should use (*key_nextp) to calculate the actual range of
1230  * the returned element, which will be (key_beg to *key_nextp - 1), because
1231  * there might be another element which is superior to the returned element
1232  * and overlaps it.
1233  *
1234  * (*key_nextp) can be passed as key_beg in an iteration only while non-NULL
1235  * chains continue to be returned.  On EOF (*key_nextp) may overflow since
1236  * it will wind up being (key_end + 1).
1237  *
1238  * WARNING!  Must be called with child's spinlock held.  Spinlock remains
1239  *	     held through the operation.
1240  */
1241 struct hammer2_chain_find_info {
1242 	hammer2_chain_t		*best;
1243 	hammer2_key_t		key_beg;
1244 	hammer2_key_t		key_end;
1245 	hammer2_key_t		key_next;
1246 };
1247 
1248 static int hammer2_chain_find_cmp(hammer2_chain_t *child, void *data);
1249 static int hammer2_chain_find_callback(hammer2_chain_t *child, void *data);
1250 
1251 static
1252 hammer2_chain_t *
1253 hammer2_chain_find(hammer2_chain_t *parent, hammer2_key_t *key_nextp,
1254 			  hammer2_key_t key_beg, hammer2_key_t key_end)
1255 {
1256 	struct hammer2_chain_find_info info;
1257 
1258 	info.best = NULL;
1259 	info.key_beg = key_beg;
1260 	info.key_end = key_end;
1261 	info.key_next = *key_nextp;
1262 
1263 	RB_SCAN(hammer2_chain_tree, &parent->core.rbtree,
1264 		hammer2_chain_find_cmp, hammer2_chain_find_callback,
1265 		&info);
1266 	*key_nextp = info.key_next;
1267 #if 0
1268 	kprintf("chain_find %p %016jx:%016jx next=%016jx\n",
1269 		parent, key_beg, key_end, *key_nextp);
1270 #endif
1271 
1272 	return (info.best);
1273 }
1274 
1275 static
1276 int
1277 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1278 {
1279 	struct hammer2_chain_find_info *info = data;
1280 	hammer2_key_t child_beg;
1281 	hammer2_key_t child_end;
1282 
1283 	child_beg = child->bref.key;
1284 	child_end = child_beg + ((hammer2_key_t)1 << child->bref.keybits) - 1;
1285 
1286 	if (child_end < info->key_beg)
1287 		return(-1);
1288 	if (child_beg > info->key_end)
1289 		return(1);
1290 	return(0);
1291 }
1292 
1293 static
1294 int
1295 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1296 {
1297 	struct hammer2_chain_find_info *info = data;
1298 	hammer2_chain_t *best;
1299 	hammer2_key_t child_end;
1300 
1301 	/*
1302 	 * WARNING! Do not discard DUPLICATED chains, it is possible that
1303 	 *	    we are catching an insertion half-way done.  If a
1304 	 *	    duplicated chain turns out to be the best choice the
1305 	 *	    caller will re-check its flags after locking it.
1306 	 *
1307 	 * WARNING! Layerq is scanned forwards, exact matches should keep
1308 	 *	    the existing info->best.
1309 	 */
1310 	if ((best = info->best) == NULL) {
1311 		/*
1312 		 * No previous best.  Assign best
1313 		 */
1314 		info->best = child;
1315 	} else if (best->bref.key <= info->key_beg &&
1316 		   child->bref.key <= info->key_beg) {
1317 		/*
1318 		 * Illegal overlap.
1319 		 */
1320 		KKASSERT(0);
1321 		/*info->best = child;*/
1322 	} else if (child->bref.key < best->bref.key) {
1323 		/*
1324 		 * Child has a nearer key and best is not flush with key_beg.
1325 		 * Set best to child.  Truncate key_next to the old best key.
1326 		 */
1327 		info->best = child;
1328 		if (info->key_next > best->bref.key || info->key_next == 0)
1329 			info->key_next = best->bref.key;
1330 	} else if (child->bref.key == best->bref.key) {
1331 		/*
1332 		 * If our current best is flush with the child then this
1333 		 * is an illegal overlap.
1334 		 *
1335 		 * key_next will automatically be limited to the smaller of
1336 		 * the two end-points.
1337 		 */
1338 		KKASSERT(0);
1339 		info->best = child;
1340 	} else {
1341 		/*
1342 		 * Keep the current best but truncate key_next to the child's
1343 		 * base.
1344 		 *
1345 		 * key_next will also automatically be limited to the smaller
1346 		 * of the two end-points (probably not necessary for this case
1347 		 * but we do it anyway).
1348 		 */
1349 		if (info->key_next > child->bref.key || info->key_next == 0)
1350 			info->key_next = child->bref.key;
1351 	}
1352 
1353 	/*
1354 	 * Always truncate key_next based on child's end-of-range.
1355 	 */
1356 	child_end = child->bref.key + ((hammer2_key_t)1 << child->bref.keybits);
1357 	if (child_end && (info->key_next > child_end || info->key_next == 0))
1358 		info->key_next = child_end;
1359 
1360 	return(0);
1361 }
1362 
1363 /*
1364  * Retrieve the specified chain from a media blockref, creating the
1365  * in-memory chain structure which reflects it.
1366  *
1367  * To handle insertion races pass the INSERT_RACE flag along with the
1368  * generation number of the core.  NULL will be returned if the generation
1369  * number changes before we have a chance to insert the chain.  Insert
1370  * races can occur because the parent might be held shared.
1371  *
1372  * Caller must hold the parent locked shared or exclusive since we may
1373  * need the parent's bref array to find our block.
1374  *
1375  * WARNING! chain->pmp is always set to NULL for any chain representing
1376  *	    part of the super-root topology.
1377  */
1378 hammer2_chain_t *
1379 hammer2_chain_get(hammer2_chain_t *parent, int generation,
1380 		  hammer2_blockref_t *bref)
1381 {
1382 	hammer2_dev_t *hmp = parent->hmp;
1383 	hammer2_chain_t *chain;
1384 	int error;
1385 
1386 	/*
1387 	 * Allocate a chain structure representing the existing media
1388 	 * entry.  Resulting chain has one ref and is not locked.
1389 	 */
1390 	if (bref->flags & HAMMER2_BREF_FLAG_PFSROOT)
1391 		chain = hammer2_chain_alloc(hmp, NULL, NULL, bref);
1392 	else
1393 		chain = hammer2_chain_alloc(hmp, parent->pmp, NULL, bref);
1394 	/* ref'd chain returned */
1395 
1396 	/*
1397 	 * Flag that the chain is in the parent's blockmap so delete/flush
1398 	 * knows what to do with it.
1399 	 */
1400 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_BMAPPED);
1401 
1402 	/*
1403 	 * Link the chain into its parent.  A spinlock is required to safely
1404 	 * access the RBTREE, and it is possible to collide with another
1405 	 * hammer2_chain_get() operation because the caller might only hold
1406 	 * a shared lock on the parent.
1407 	 */
1408 	KKASSERT(parent->refs > 0);
1409 	error = hammer2_chain_insert(parent, chain,
1410 				     HAMMER2_CHAIN_INSERT_SPIN |
1411 				     HAMMER2_CHAIN_INSERT_RACE,
1412 				     generation);
1413 	if (error) {
1414 		KKASSERT((chain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
1415 		kprintf("chain %p get race\n", chain);
1416 		hammer2_chain_drop(chain);
1417 		chain = NULL;
1418 	} else {
1419 		KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
1420 	}
1421 
1422 	/*
1423 	 * Return our new chain referenced but not locked, or NULL if
1424 	 * a race occurred.
1425 	 */
1426 	return (chain);
1427 }
1428 
1429 /*
1430  * Lookup initialization/completion API
1431  */
1432 hammer2_chain_t *
1433 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1434 {
1435 	hammer2_chain_ref(parent);
1436 	if (flags & HAMMER2_LOOKUP_SHARED) {
1437 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1438 					   HAMMER2_RESOLVE_SHARED);
1439 	} else {
1440 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1441 	}
1442 	return (parent);
1443 }
1444 
1445 void
1446 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1447 {
1448 	if (parent) {
1449 		hammer2_chain_unlock(parent);
1450 		hammer2_chain_drop(parent);
1451 	}
1452 }
1453 
1454 static
1455 hammer2_chain_t *
1456 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1457 {
1458 	hammer2_chain_t *oparent;
1459 	hammer2_chain_t *nparent;
1460 
1461 	/*
1462 	 * Be careful of order, oparent must be unlocked before nparent
1463 	 * is locked below to avoid a deadlock.
1464 	 */
1465 	oparent = *parentp;
1466 	hammer2_spin_ex(&oparent->core.spin);
1467 	nparent = oparent->parent;
1468 	hammer2_chain_ref(nparent);
1469 	hammer2_spin_unex(&oparent->core.spin);
1470 	if (oparent) {
1471 		hammer2_chain_unlock(oparent);
1472 		hammer2_chain_drop(oparent);
1473 		oparent = NULL;
1474 	}
1475 
1476 	hammer2_chain_lock(nparent, how);
1477 	*parentp = nparent;
1478 
1479 	return (nparent);
1480 }
1481 
1482 /*
1483  * Locate the first chain whos key range overlaps (key_beg, key_end) inclusive.
1484  * (*parentp) typically points to an inode but can also point to a related
1485  * indirect block and this function will recurse upwards and find the inode
1486  * again.
1487  *
1488  * (*parentp) must be exclusively locked and referenced and can be an inode
1489  * or an existing indirect block within the inode.
1490  *
1491  * On return (*parentp) will be modified to point at the deepest parent chain
1492  * element encountered during the search, as a helper for an insertion or
1493  * deletion.   The new (*parentp) will be locked and referenced and the old
1494  * will be unlocked and dereferenced (no change if they are both the same).
1495  *
1496  * The matching chain will be returned exclusively locked.  If NOLOCK is
1497  * requested the chain will be returned only referenced.  Note that the
1498  * parent chain must always be locked shared or exclusive, matching the
1499  * HAMMER2_LOOKUP_SHARED flag.  We can conceivably lock it SHARED temporarily
1500  * when NOLOCK is specified but that complicates matters if *parentp must
1501  * inherit the chain.
1502  *
1503  * NOLOCK also implies NODATA, since an unlocked chain usually has a NULL
1504  * data pointer or can otherwise be in flux.
1505  *
1506  * NULL is returned if no match was found, but (*parentp) will still
1507  * potentially be adjusted.
1508  *
1509  * If a fatal error occurs (typically an I/O error), a dummy chain is
1510  * returned with chain->error and error-identifying information set.  This
1511  * chain will assert if you try to do anything fancy with it.
1512  *
1513  * XXX Depending on where the error occurs we should allow continued iteration.
1514  *
1515  * On return (*key_nextp) will point to an iterative value for key_beg.
1516  * (If NULL is returned (*key_nextp) is set to (key_end + 1)).
1517  *
1518  * This function will also recurse up the chain if the key is not within the
1519  * current parent's range.  (*parentp) can never be set to NULL.  An iteration
1520  * can simply allow (*parentp) to float inside the loop.
1521  *
1522  * NOTE!  chain->data is not always resolved.  By default it will not be
1523  *	  resolved for BREF_TYPE_DATA, FREEMAP_NODE, or FREEMAP_LEAF.  Use
1524  *	  HAMMER2_LOOKUP_ALWAYS to force resolution (but be careful w/
1525  *	  BREF_TYPE_DATA as the device buffer can alias the logical file
1526  *	  buffer).
1527  */
1528 hammer2_chain_t *
1529 hammer2_chain_lookup(hammer2_chain_t **parentp, hammer2_key_t *key_nextp,
1530 		     hammer2_key_t key_beg, hammer2_key_t key_end,
1531 		     int *cache_indexp, int flags)
1532 {
1533 	hammer2_dev_t *hmp;
1534 	hammer2_chain_t *parent;
1535 	hammer2_chain_t *chain;
1536 	hammer2_blockref_t *base;
1537 	hammer2_blockref_t *bref;
1538 	hammer2_blockref_t bcopy;
1539 	hammer2_key_t scan_beg;
1540 	hammer2_key_t scan_end;
1541 	int count = 0;
1542 	int how_always = HAMMER2_RESOLVE_ALWAYS;
1543 	int how_maybe = HAMMER2_RESOLVE_MAYBE;
1544 	int how;
1545 	int generation;
1546 	int maxloops = 300000;
1547 
1548 	if (flags & HAMMER2_LOOKUP_ALWAYS) {
1549 		how_maybe = how_always;
1550 		how = HAMMER2_RESOLVE_ALWAYS;
1551 	} else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK)) {
1552 		how = HAMMER2_RESOLVE_NEVER;
1553 	} else {
1554 		how = HAMMER2_RESOLVE_MAYBE;
1555 	}
1556 	if (flags & HAMMER2_LOOKUP_SHARED) {
1557 		how_maybe |= HAMMER2_RESOLVE_SHARED;
1558 		how_always |= HAMMER2_RESOLVE_SHARED;
1559 		how |= HAMMER2_RESOLVE_SHARED;
1560 	}
1561 
1562 	/*
1563 	 * Recurse (*parentp) upward if necessary until the parent completely
1564 	 * encloses the key range or we hit the inode.
1565 	 *
1566 	 * This function handles races against the flusher doing a delete-
1567 	 * duplicate above us and re-homes the parent to the duplicate in
1568 	 * that case, otherwise we'd wind up recursing down a stale chain.
1569 	 */
1570 	parent = *parentp;
1571 	hmp = parent->hmp;
1572 
1573 	while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1574 	       parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1575 		scan_beg = parent->bref.key;
1576 		scan_end = scan_beg +
1577 			   ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1578 		if (key_beg >= scan_beg && key_end <= scan_end)
1579 			break;
1580 		parent = hammer2_chain_getparent(parentp, how_maybe);
1581 	}
1582 
1583 again:
1584 	if (--maxloops == 0)
1585 		panic("hammer2_chain_lookup: maxloops");
1586 	/*
1587 	 * Locate the blockref array.  Currently we do a fully associative
1588 	 * search through the array.
1589 	 */
1590 	switch(parent->bref.type) {
1591 	case HAMMER2_BREF_TYPE_INODE:
1592 		/*
1593 		 * Special shortcut for embedded data returns the inode
1594 		 * itself.  Callers must detect this condition and access
1595 		 * the embedded data (the strategy code does this for us).
1596 		 *
1597 		 * This is only applicable to regular files and softlinks.
1598 		 */
1599 		if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1600 			if (flags & HAMMER2_LOOKUP_NODIRECT) {
1601 				chain = NULL;
1602 				*key_nextp = key_end + 1;
1603 				goto done;
1604 			}
1605 			hammer2_chain_ref(parent);
1606 			if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0)
1607 				hammer2_chain_lock(parent, how_always);
1608 			*key_nextp = key_end + 1;
1609 			return (parent);
1610 		}
1611 		base = &parent->data->ipdata.u.blockset.blockref[0];
1612 		count = HAMMER2_SET_COUNT;
1613 		break;
1614 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1615 	case HAMMER2_BREF_TYPE_INDIRECT:
1616 		/*
1617 		 * Handle MATCHIND on the parent
1618 		 */
1619 		if (flags & HAMMER2_LOOKUP_MATCHIND) {
1620 			scan_beg = parent->bref.key;
1621 			scan_end = scan_beg +
1622 			       ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1623 			if (key_beg == scan_beg && key_end == scan_end) {
1624 				chain = parent;
1625 				hammer2_chain_ref(chain);
1626 				hammer2_chain_lock(chain, how_maybe);
1627 				*key_nextp = scan_end + 1;
1628 				goto done;
1629 			}
1630 		}
1631 		/*
1632 		 * Optimize indirect blocks in the INITIAL state to avoid
1633 		 * I/O.
1634 		 */
1635 		if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1636 			base = NULL;
1637 		} else {
1638 			if (parent->data == NULL)
1639 				panic("parent->data is NULL");
1640 			base = &parent->data->npdata[0];
1641 		}
1642 		count = parent->bytes / sizeof(hammer2_blockref_t);
1643 		break;
1644 	case HAMMER2_BREF_TYPE_VOLUME:
1645 		base = &hmp->voldata.sroot_blockset.blockref[0];
1646 		count = HAMMER2_SET_COUNT;
1647 		break;
1648 	case HAMMER2_BREF_TYPE_FREEMAP:
1649 		base = &hmp->voldata.freemap_blockset.blockref[0];
1650 		count = HAMMER2_SET_COUNT;
1651 		break;
1652 	default:
1653 		panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1654 		      parent->bref.type);
1655 		base = NULL;	/* safety */
1656 		count = 0;	/* safety */
1657 	}
1658 
1659 	/*
1660 	 * Merged scan to find next candidate.
1661 	 *
1662 	 * hammer2_base_*() functions require the parent->core.live_* fields
1663 	 * to be synchronized.
1664 	 *
1665 	 * We need to hold the spinlock to access the block array and RB tree
1666 	 * and to interlock chain creation.
1667 	 */
1668 	if ((parent->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
1669 		hammer2_chain_countbrefs(parent, base, count);
1670 
1671 	/*
1672 	 * Combined search
1673 	 */
1674 	hammer2_spin_ex(&parent->core.spin);
1675 	chain = hammer2_combined_find(parent, base, count,
1676 				      cache_indexp, key_nextp,
1677 				      key_beg, key_end,
1678 				      &bref);
1679 	generation = parent->core.generation;
1680 
1681 	/*
1682 	 * Exhausted parent chain, iterate.
1683 	 */
1684 	if (bref == NULL) {
1685 		hammer2_spin_unex(&parent->core.spin);
1686 		if (key_beg == key_end)	/* short cut single-key case */
1687 			return (NULL);
1688 
1689 		/*
1690 		 * Stop if we reached the end of the iteration.
1691 		 */
1692 		if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1693 		    parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1694 			return (NULL);
1695 		}
1696 
1697 		/*
1698 		 * Calculate next key, stop if we reached the end of the
1699 		 * iteration, otherwise go up one level and loop.
1700 		 */
1701 		key_beg = parent->bref.key +
1702 			  ((hammer2_key_t)1 << parent->bref.keybits);
1703 		if (key_beg == 0 || key_beg > key_end)
1704 			return (NULL);
1705 		parent = hammer2_chain_getparent(parentp, how_maybe);
1706 		goto again;
1707 	}
1708 
1709 	/*
1710 	 * Selected from blockref or in-memory chain.
1711 	 */
1712 	if (chain == NULL) {
1713 		bcopy = *bref;
1714 		hammer2_spin_unex(&parent->core.spin);
1715 		chain = hammer2_chain_get(parent, generation,
1716 					  &bcopy);
1717 		if (chain == NULL) {
1718 			kprintf("retry lookup parent %p keys %016jx:%016jx\n",
1719 				parent, key_beg, key_end);
1720 			goto again;
1721 		}
1722 		if (bcmp(&bcopy, bref, sizeof(bcopy))) {
1723 			hammer2_chain_drop(chain);
1724 			goto again;
1725 		}
1726 	} else {
1727 		hammer2_chain_ref(chain);
1728 		hammer2_spin_unex(&parent->core.spin);
1729 	}
1730 
1731 	/*
1732 	 * chain is referenced but not locked.  We must lock the chain
1733 	 * to obtain definitive DUPLICATED/DELETED state
1734 	 */
1735 	if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1736 	    chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1737 		hammer2_chain_lock(chain, how_maybe);
1738 	} else {
1739 		hammer2_chain_lock(chain, how);
1740 	}
1741 
1742 	/*
1743 	 * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX)
1744 	 *
1745 	 * NOTE: Chain's key range is not relevant as there might be
1746 	 *	 one-offs within the range that are not deleted.
1747 	 *
1748 	 * NOTE: Lookups can race delete-duplicate because
1749 	 *	 delete-duplicate does not lock the parent's core
1750 	 *	 (they just use the spinlock on the core).  We must
1751 	 *	 check for races by comparing the DUPLICATED flag before
1752 	 *	 releasing the spinlock with the flag after locking the
1753 	 *	 chain.
1754 	 */
1755 	if (chain->flags & HAMMER2_CHAIN_DELETED) {
1756 		hammer2_chain_unlock(chain);
1757 		hammer2_chain_drop(chain);
1758 		key_beg = *key_nextp;
1759 		if (key_beg == 0 || key_beg > key_end)
1760 			return(NULL);
1761 		goto again;
1762 	}
1763 
1764 	/*
1765 	 * If the chain element is an indirect block it becomes the new
1766 	 * parent and we loop on it.  We must maintain our top-down locks
1767 	 * to prevent the flusher from interfering (i.e. doing a
1768 	 * delete-duplicate and leaving us recursing down a deleted chain).
1769 	 *
1770 	 * The parent always has to be locked with at least RESOLVE_MAYBE
1771 	 * so we can access its data.  It might need a fixup if the caller
1772 	 * passed incompatible flags.  Be careful not to cause a deadlock
1773 	 * as a data-load requires an exclusive lock.
1774 	 *
1775 	 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
1776 	 * range is within the requested key range we return the indirect
1777 	 * block and do NOT loop.  This is usually only used to acquire
1778 	 * freemap nodes.
1779 	 */
1780 	if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1781 	    chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1782 		hammer2_chain_unlock(parent);
1783 		hammer2_chain_drop(parent);
1784 		*parentp = parent = chain;
1785 		goto again;
1786 	}
1787 done:
1788 	/*
1789 	 * All done, return the chain.
1790 	 *
1791 	 * If the caller does not want a locked chain, replace the lock with
1792 	 * a ref.  Perhaps this can eventually be optimized to not obtain the
1793 	 * lock in the first place for situations where the data does not
1794 	 * need to be resolved.
1795 	 */
1796 	if (chain) {
1797 		if (flags & HAMMER2_LOOKUP_NOLOCK)
1798 			hammer2_chain_unlock(chain);
1799 	}
1800 
1801 	return (chain);
1802 }
1803 
1804 /*
1805  * After having issued a lookup we can iterate all matching keys.
1806  *
1807  * If chain is non-NULL we continue the iteration from just after it's index.
1808  *
1809  * If chain is NULL we assume the parent was exhausted and continue the
1810  * iteration at the next parent.
1811  *
1812  * If a fatal error occurs (typically an I/O error), a dummy chain is
1813  * returned with chain->error and error-identifying information set.  This
1814  * chain will assert if you try to do anything fancy with it.
1815  *
1816  * XXX Depending on where the error occurs we should allow continued iteration.
1817  *
1818  * parent must be locked on entry and remains locked throughout.  chain's
1819  * lock status must match flags.  Chain is always at least referenced.
1820  *
1821  * WARNING!  The MATCHIND flag does not apply to this function.
1822  */
1823 hammer2_chain_t *
1824 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1825 		   hammer2_key_t *key_nextp,
1826 		   hammer2_key_t key_beg, hammer2_key_t key_end,
1827 		   int *cache_indexp, int flags)
1828 {
1829 	hammer2_chain_t *parent;
1830 	int how_maybe;
1831 
1832 	/*
1833 	 * Calculate locking flags for upward recursion.
1834 	 */
1835 	how_maybe = HAMMER2_RESOLVE_MAYBE;
1836 	if (flags & HAMMER2_LOOKUP_SHARED)
1837 		how_maybe |= HAMMER2_RESOLVE_SHARED;
1838 
1839 	parent = *parentp;
1840 
1841 	/*
1842 	 * Calculate the next index and recalculate the parent if necessary.
1843 	 */
1844 	if (chain) {
1845 		key_beg = chain->bref.key +
1846 			  ((hammer2_key_t)1 << chain->bref.keybits);
1847 		if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0)
1848 			hammer2_chain_unlock(chain);
1849 		hammer2_chain_drop(chain);
1850 
1851 		/*
1852 		 * chain invalid past this point, but we can still do a
1853 		 * pointer comparison w/parent.
1854 		 *
1855 		 * Any scan where the lookup returned degenerate data embedded
1856 		 * in the inode has an invalid index and must terminate.
1857 		 */
1858 		if (chain == parent)
1859 			return(NULL);
1860 		if (key_beg == 0 || key_beg > key_end)
1861 			return(NULL);
1862 		chain = NULL;
1863 	} else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1864 		   parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1865 		/*
1866 		 * We reached the end of the iteration.
1867 		 */
1868 		return (NULL);
1869 	} else {
1870 		/*
1871 		 * Continue iteration with next parent unless the current
1872 		 * parent covers the range.
1873 		 */
1874 		key_beg = parent->bref.key +
1875 			  ((hammer2_key_t)1 << parent->bref.keybits);
1876 		if (key_beg == 0 || key_beg > key_end)
1877 			return (NULL);
1878 		parent = hammer2_chain_getparent(parentp, how_maybe);
1879 	}
1880 
1881 	/*
1882 	 * And execute
1883 	 */
1884 	return (hammer2_chain_lookup(parentp, key_nextp,
1885 				     key_beg, key_end,
1886 				     cache_indexp, flags));
1887 }
1888 
1889 /*
1890  * The raw scan function is similar to lookup/next but does not seek to a key.
1891  * Blockrefs are iterated via first_chain = (parent, NULL) and
1892  * next_chain = (parent, chain).
1893  *
1894  * The passed-in parent must be locked and its data resolved.  The returned
1895  * chain will be locked.  Pass chain == NULL to acquire the first sub-chain
1896  * under parent and then iterate with the passed-in chain (which this
1897  * function will unlock).
1898  */
1899 hammer2_chain_t *
1900 hammer2_chain_scan(hammer2_chain_t *parent, hammer2_chain_t *chain,
1901 		   int *cache_indexp, int flags)
1902 {
1903 	hammer2_dev_t *hmp;
1904 	hammer2_blockref_t *base;
1905 	hammer2_blockref_t *bref;
1906 	hammer2_blockref_t bcopy;
1907 	hammer2_key_t key;
1908 	hammer2_key_t next_key;
1909 	int count = 0;
1910 	int how_always = HAMMER2_RESOLVE_ALWAYS;
1911 	int how_maybe = HAMMER2_RESOLVE_MAYBE;
1912 	int how;
1913 	int generation;
1914 	int maxloops = 300000;
1915 
1916 	hmp = parent->hmp;
1917 
1918 	/*
1919 	 * Scan flags borrowed from lookup.
1920 	 */
1921 	if (flags & HAMMER2_LOOKUP_ALWAYS) {
1922 		how_maybe = how_always;
1923 		how = HAMMER2_RESOLVE_ALWAYS;
1924 	} else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK)) {
1925 		how = HAMMER2_RESOLVE_NEVER;
1926 	} else {
1927 		how = HAMMER2_RESOLVE_MAYBE;
1928 	}
1929 	if (flags & HAMMER2_LOOKUP_SHARED) {
1930 		how_maybe |= HAMMER2_RESOLVE_SHARED;
1931 		how_always |= HAMMER2_RESOLVE_SHARED;
1932 		how |= HAMMER2_RESOLVE_SHARED;
1933 	}
1934 
1935 	/*
1936 	 * Calculate key to locate first/next element, unlocking the previous
1937 	 * element as we go.  Be careful, the key calculation can overflow.
1938 	 */
1939 	if (chain) {
1940 		key = chain->bref.key +
1941 		      ((hammer2_key_t)1 << chain->bref.keybits);
1942 		hammer2_chain_unlock(chain);
1943 		hammer2_chain_drop(chain);
1944 		chain = NULL;
1945 		if (key == 0)
1946 			goto done;
1947 	} else {
1948 		key = 0;
1949 	}
1950 
1951 again:
1952 	KKASSERT(parent->error == 0);	/* XXX case not handled yet */
1953 	if (--maxloops == 0)
1954 		panic("hammer2_chain_scan: maxloops");
1955 	/*
1956 	 * Locate the blockref array.  Currently we do a fully associative
1957 	 * search through the array.
1958 	 */
1959 	switch(parent->bref.type) {
1960 	case HAMMER2_BREF_TYPE_INODE:
1961 		/*
1962 		 * An inode with embedded data has no sub-chains.
1963 		 */
1964 		if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)
1965 			goto done;
1966 		base = &parent->data->ipdata.u.blockset.blockref[0];
1967 		count = HAMMER2_SET_COUNT;
1968 		break;
1969 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1970 	case HAMMER2_BREF_TYPE_INDIRECT:
1971 		/*
1972 		 * Optimize indirect blocks in the INITIAL state to avoid
1973 		 * I/O.
1974 		 */
1975 		if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1976 			base = NULL;
1977 		} else {
1978 			if (parent->data == NULL)
1979 				panic("parent->data is NULL");
1980 			base = &parent->data->npdata[0];
1981 		}
1982 		count = parent->bytes / sizeof(hammer2_blockref_t);
1983 		break;
1984 	case HAMMER2_BREF_TYPE_VOLUME:
1985 		base = &hmp->voldata.sroot_blockset.blockref[0];
1986 		count = HAMMER2_SET_COUNT;
1987 		break;
1988 	case HAMMER2_BREF_TYPE_FREEMAP:
1989 		base = &hmp->voldata.freemap_blockset.blockref[0];
1990 		count = HAMMER2_SET_COUNT;
1991 		break;
1992 	default:
1993 		panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1994 		      parent->bref.type);
1995 		base = NULL;	/* safety */
1996 		count = 0;	/* safety */
1997 	}
1998 
1999 	/*
2000 	 * Merged scan to find next candidate.
2001 	 *
2002 	 * hammer2_base_*() functions require the parent->core.live_* fields
2003 	 * to be synchronized.
2004 	 *
2005 	 * We need to hold the spinlock to access the block array and RB tree
2006 	 * and to interlock chain creation.
2007 	 */
2008 	if ((parent->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2009 		hammer2_chain_countbrefs(parent, base, count);
2010 
2011 	next_key = 0;
2012 	hammer2_spin_ex(&parent->core.spin);
2013 	chain = hammer2_combined_find(parent, base, count,
2014 				      cache_indexp, &next_key,
2015 				      key, HAMMER2_KEY_MAX,
2016 				      &bref);
2017 	generation = parent->core.generation;
2018 
2019 	/*
2020 	 * Exhausted parent chain, we're done.
2021 	 */
2022 	if (bref == NULL) {
2023 		hammer2_spin_unex(&parent->core.spin);
2024 		KKASSERT(chain == NULL);
2025 		goto done;
2026 	}
2027 
2028 	/*
2029 	 * Selected from blockref or in-memory chain.
2030 	 */
2031 	if (chain == NULL) {
2032 		bcopy = *bref;
2033 		hammer2_spin_unex(&parent->core.spin);
2034 		chain = hammer2_chain_get(parent, generation, &bcopy);
2035 		if (chain == NULL) {
2036 			kprintf("retry scan parent %p keys %016jx\n",
2037 				parent, key);
2038 			goto again;
2039 		}
2040 		if (bcmp(&bcopy, bref, sizeof(bcopy))) {
2041 			hammer2_chain_drop(chain);
2042 			chain = NULL;
2043 			goto again;
2044 		}
2045 	} else {
2046 		hammer2_chain_ref(chain);
2047 		hammer2_spin_unex(&parent->core.spin);
2048 	}
2049 
2050 	/*
2051 	 * chain is referenced but not locked.  We must lock the chain
2052 	 * to obtain definitive DUPLICATED/DELETED state
2053 	 */
2054 	hammer2_chain_lock(chain, how);
2055 
2056 	/*
2057 	 * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX)
2058 	 *
2059 	 * NOTE: chain's key range is not relevant as there might be
2060 	 *	 one-offs within the range that are not deleted.
2061 	 *
2062 	 * NOTE: XXX this could create problems with scans used in
2063 	 *	 situations other than mount-time recovery.
2064 	 *
2065 	 * NOTE: Lookups can race delete-duplicate because
2066 	 *	 delete-duplicate does not lock the parent's core
2067 	 *	 (they just use the spinlock on the core).  We must
2068 	 *	 check for races by comparing the DUPLICATED flag before
2069 	 *	 releasing the spinlock with the flag after locking the
2070 	 *	 chain.
2071 	 */
2072 	if (chain->flags & HAMMER2_CHAIN_DELETED) {
2073 		hammer2_chain_unlock(chain);
2074 		hammer2_chain_drop(chain);
2075 		chain = NULL;
2076 
2077 		key = next_key;
2078 		if (key == 0)
2079 			goto done;
2080 		goto again;
2081 	}
2082 
2083 done:
2084 	/*
2085 	 * All done, return the chain or NULL
2086 	 */
2087 	return (chain);
2088 }
2089 
2090 /*
2091  * Create and return a new hammer2 system memory structure of the specified
2092  * key, type and size and insert it under (*parentp).  This is a full
2093  * insertion, based on the supplied key/keybits, and may involve creating
2094  * indirect blocks and moving other chains around via delete/duplicate.
2095  *
2096  * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (*parentp) TO THE INSERTION
2097  * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING
2098  * FULL.  This typically means that the caller is creating the chain after
2099  * doing a hammer2_chain_lookup().
2100  *
2101  * (*parentp) must be exclusive locked and may be replaced on return
2102  * depending on how much work the function had to do.
2103  *
2104  * (*parentp) must not be errored or this function will assert.
2105  *
2106  * (*chainp) usually starts out NULL and returns the newly created chain,
2107  * but if the caller desires the caller may allocate a disconnected chain
2108  * and pass it in instead.
2109  *
2110  * This function should NOT be used to insert INDIRECT blocks.  It is
2111  * typically used to create/insert inodes and data blocks.
2112  *
2113  * Caller must pass-in an exclusively locked parent the new chain is to
2114  * be inserted under, and optionally pass-in a disconnected, exclusively
2115  * locked chain to insert (else we create a new chain).  The function will
2116  * adjust (*parentp) as necessary, create or connect the chain, and
2117  * return an exclusively locked chain in *chainp.
2118  *
2119  * When creating a PFSROOT inode under the super-root, pmp is typically NULL
2120  * and will be reassigned.
2121  */
2122 int
2123 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2124 		     hammer2_chain_t **chainp, hammer2_pfs_t *pmp,
2125 		     hammer2_key_t key, int keybits, int type, size_t bytes,
2126 		     int flags)
2127 {
2128 	hammer2_dev_t *hmp;
2129 	hammer2_chain_t *chain;
2130 	hammer2_chain_t *parent;
2131 	hammer2_blockref_t *base;
2132 	hammer2_blockref_t dummy;
2133 	int allocated = 0;
2134 	int error = 0;
2135 	int count;
2136 	int maxloops = 300000;
2137 
2138 	/*
2139 	 * Topology may be crossing a PFS boundary.
2140 	 */
2141 	parent = *parentp;
2142 	KKASSERT(hammer2_mtx_owned(&parent->core.lock));
2143 	KKASSERT(parent->error == 0);
2144 	hmp = parent->hmp;
2145 	chain = *chainp;
2146 
2147 	if (chain == NULL) {
2148 		/*
2149 		 * First allocate media space and construct the dummy bref,
2150 		 * then allocate the in-memory chain structure.  Set the
2151 		 * INITIAL flag for fresh chains which do not have embedded
2152 		 * data.
2153 		 */
2154 		bzero(&dummy, sizeof(dummy));
2155 		dummy.type = type;
2156 		dummy.key = key;
2157 		dummy.keybits = keybits;
2158 		dummy.data_off = hammer2_getradix(bytes);
2159 		dummy.methods = parent->bref.methods;
2160 		chain = hammer2_chain_alloc(hmp, pmp, trans, &dummy);
2161 
2162 		/*
2163 		 * Lock the chain manually, chain_lock will load the chain
2164 		 * which we do NOT want to do.  (note: chain->refs is set
2165 		 * to 1 by chain_alloc() for us, but lockcnt is not).
2166 		 */
2167 		chain->lockcnt = 1;
2168 		hammer2_mtx_ex(&chain->core.lock);
2169 		allocated = 1;
2170 
2171 		/*
2172 		 * We do NOT set INITIAL here (yet).  INITIAL is only
2173 		 * used for indirect blocks.
2174 		 *
2175 		 * Recalculate bytes to reflect the actual media block
2176 		 * allocation.
2177 		 */
2178 		bytes = (hammer2_off_t)1 <<
2179 			(int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2180 		chain->bytes = bytes;
2181 
2182 		switch(type) {
2183 		case HAMMER2_BREF_TYPE_VOLUME:
2184 		case HAMMER2_BREF_TYPE_FREEMAP:
2185 			panic("hammer2_chain_create: called with volume type");
2186 			break;
2187 		case HAMMER2_BREF_TYPE_INDIRECT:
2188 			panic("hammer2_chain_create: cannot be used to"
2189 			      "create indirect block");
2190 			break;
2191 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2192 			panic("hammer2_chain_create: cannot be used to"
2193 			      "create freemap root or node");
2194 			break;
2195 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2196 			KKASSERT(bytes == sizeof(chain->data->bmdata));
2197 			/* fall through */
2198 		case HAMMER2_BREF_TYPE_INODE:
2199 		case HAMMER2_BREF_TYPE_DATA:
2200 		default:
2201 			/*
2202 			 * leave chain->data NULL, set INITIAL
2203 			 */
2204 			KKASSERT(chain->data == NULL);
2205 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2206 			break;
2207 		}
2208 
2209 		/*
2210 		 * Set statistics for pending updates.  These will be
2211 		 * synchronized by the flush code.
2212 		 */
2213 		switch(type) {
2214 		case HAMMER2_BREF_TYPE_INODE:
2215 			chain->inode_count = 1;
2216 			break;
2217 		case HAMMER2_BREF_TYPE_DATA:
2218 		case HAMMER2_BREF_TYPE_INDIRECT:
2219 			chain->data_count = chain->bytes;
2220 			break;
2221 		}
2222 	} else {
2223 		/*
2224 		 * We are reattaching a previously deleted chain, possibly
2225 		 * under a new parent and possibly with a new key/keybits.
2226 		 * The chain does not have to be in a modified state.  The
2227 		 * UPDATE flag will be set later on in this routine.
2228 		 *
2229 		 * Do NOT mess with the current state of the INITIAL flag.
2230 		 */
2231 		chain->bref.key = key;
2232 		chain->bref.keybits = keybits;
2233 		if (chain->flags & HAMMER2_CHAIN_DELETED)
2234 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2235 		KKASSERT(chain->parent == NULL);
2236 	}
2237 	if (flags & HAMMER2_INSERT_PFSROOT)
2238 		chain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT;
2239 	else
2240 		chain->bref.flags &= ~HAMMER2_BREF_FLAG_PFSROOT;
2241 
2242 	/*
2243 	 * Calculate how many entries we have in the blockref array and
2244 	 * determine if an indirect block is required.
2245 	 */
2246 again:
2247 	if (--maxloops == 0)
2248 		panic("hammer2_chain_create: maxloops");
2249 
2250 	switch(parent->bref.type) {
2251 	case HAMMER2_BREF_TYPE_INODE:
2252 		KKASSERT((parent->data->ipdata.op_flags &
2253 			  HAMMER2_OPFLAG_DIRECTDATA) == 0);
2254 		KKASSERT(parent->data != NULL);
2255 		base = &parent->data->ipdata.u.blockset.blockref[0];
2256 		count = HAMMER2_SET_COUNT;
2257 		break;
2258 	case HAMMER2_BREF_TYPE_INDIRECT:
2259 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2260 		if (parent->flags & HAMMER2_CHAIN_INITIAL)
2261 			base = NULL;
2262 		else
2263 			base = &parent->data->npdata[0];
2264 		count = parent->bytes / sizeof(hammer2_blockref_t);
2265 		break;
2266 	case HAMMER2_BREF_TYPE_VOLUME:
2267 		KKASSERT(parent->data != NULL);
2268 		base = &hmp->voldata.sroot_blockset.blockref[0];
2269 		count = HAMMER2_SET_COUNT;
2270 		break;
2271 	case HAMMER2_BREF_TYPE_FREEMAP:
2272 		KKASSERT(parent->data != NULL);
2273 		base = &hmp->voldata.freemap_blockset.blockref[0];
2274 		count = HAMMER2_SET_COUNT;
2275 		break;
2276 	default:
2277 		panic("hammer2_chain_create: unrecognized blockref type: %d",
2278 		      parent->bref.type);
2279 		base = NULL;
2280 		count = 0;
2281 		break;
2282 	}
2283 
2284 	/*
2285 	 * Make sure we've counted the brefs
2286 	 */
2287 	if ((parent->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2288 		hammer2_chain_countbrefs(parent, base, count);
2289 
2290 	KKASSERT(parent->core.live_count >= 0 &&
2291 		 parent->core.live_count <= count);
2292 
2293 	/*
2294 	 * If no free blockref could be found we must create an indirect
2295 	 * block and move a number of blockrefs into it.  With the parent
2296 	 * locked we can safely lock each child in order to delete+duplicate
2297 	 * it without causing a deadlock.
2298 	 *
2299 	 * This may return the new indirect block or the old parent depending
2300 	 * on where the key falls.  NULL is returned on error.
2301 	 */
2302 	if (parent->core.live_count == count) {
2303 		hammer2_chain_t *nparent;
2304 
2305 		nparent = hammer2_chain_create_indirect(trans, parent,
2306 							key, keybits,
2307 							type, &error);
2308 		if (nparent == NULL) {
2309 			if (allocated)
2310 				hammer2_chain_drop(chain);
2311 			chain = NULL;
2312 			goto done;
2313 		}
2314 		if (parent != nparent) {
2315 			hammer2_chain_unlock(parent);
2316 			hammer2_chain_drop(parent);
2317 			parent = *parentp = nparent;
2318 		}
2319 		goto again;
2320 	}
2321 
2322 	/*
2323 	 * Link the chain into its parent.
2324 	 */
2325 	if (chain->parent != NULL)
2326 		panic("hammer2: hammer2_chain_create: chain already connected");
2327 	KKASSERT(chain->parent == NULL);
2328 	hammer2_chain_insert(parent, chain,
2329 			     HAMMER2_CHAIN_INSERT_SPIN |
2330 			     HAMMER2_CHAIN_INSERT_LIVE,
2331 			     0);
2332 
2333 	if (allocated) {
2334 		/*
2335 		 * Mark the newly created chain modified.  This will cause
2336 		 * UPDATE to be set.
2337 		 *
2338 		 * Device buffers are not instantiated for DATA elements
2339 		 * as these are handled by logical buffers.
2340 		 *
2341 		 * Indirect and freemap node indirect blocks are handled
2342 		 * by hammer2_chain_create_indirect() and not by this
2343 		 * function.
2344 		 *
2345 		 * Data for all other bref types is expected to be
2346 		 * instantiated (INODE, LEAF).
2347 		 */
2348 		switch(chain->bref.type) {
2349 		case HAMMER2_BREF_TYPE_DATA:
2350 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2351 		case HAMMER2_BREF_TYPE_INODE:
2352 			hammer2_chain_modify(trans, chain,
2353 					     HAMMER2_MODIFY_OPTDATA);
2354 			break;
2355 		default:
2356 			/*
2357 			 * Remaining types are not supported by this function.
2358 			 * In particular, INDIRECT and LEAF_NODE types are
2359 			 * handled by create_indirect().
2360 			 */
2361 			panic("hammer2_chain_create: bad type: %d",
2362 			      chain->bref.type);
2363 			/* NOT REACHED */
2364 			break;
2365 		}
2366 	} else {
2367 		/*
2368 		 * When reconnecting a chain we must set UPDATE and
2369 		 * setflush so the flush recognizes that it must update
2370 		 * the bref in the parent.
2371 		 */
2372 		if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) {
2373 			hammer2_chain_ref(chain);
2374 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
2375 		}
2376 		if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2377 		    (flags & HAMMER2_INSERT_NOSTATS) == 0) {
2378 			KKASSERT(chain->data);
2379 			chain->inode_count_up +=
2380 				chain->data->ipdata.inode_count;
2381 			chain->data_count_up +=
2382 				chain->data->ipdata.data_count;
2383 		}
2384 	}
2385 
2386 	/*
2387 	 * We must setflush(parent) to ensure that it recurses through to
2388 	 * chain.  setflush(chain) might not work because ONFLUSH is possibly
2389 	 * already set in the chain (so it won't recurse up to set it in the
2390 	 * parent).
2391 	 */
2392 	hammer2_chain_setflush(trans, parent);
2393 
2394 done:
2395 	*chainp = chain;
2396 
2397 	return (error);
2398 }
2399 
2400 /*
2401  * Move the chain from its old parent to a new parent.  The chain must have
2402  * already been deleted or already disconnected (or never associated) with
2403  * a parent.  The chain is reassociated with the new parent and the deleted
2404  * flag will be cleared (no longer deleted).  The chain's modification state
2405  * is not altered.
2406  *
2407  * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (parent) TO THE INSERTION
2408  * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING
2409  * FULL.  This typically means that the caller is creating the chain after
2410  * doing a hammer2_chain_lookup().
2411  *
2412  * A non-NULL bref is typically passed when key and keybits must be overridden.
2413  * Note that hammer2_cluster_duplicate() *ONLY* uses the key and keybits fields
2414  * from a passed-in bref and uses the old chain's bref for everything else.
2415  *
2416  * Neither (parent) or (chain) can be errored.
2417  *
2418  * If (parent) is non-NULL then the new duplicated chain is inserted under
2419  * the parent.
2420  *
2421  * If (parent) is NULL then the newly duplicated chain is not inserted
2422  * anywhere, similar to if it had just been chain_alloc()'d (suitable for
2423  * passing into hammer2_chain_create() after this function returns).
2424  *
2425  * WARNING! This function calls create which means it can insert indirect
2426  *	    blocks.  This can cause other unrelated chains in the parent to
2427  *	    be moved to a newly inserted indirect block in addition to the
2428  *	    specific chain.
2429  */
2430 void
2431 hammer2_chain_rename(hammer2_trans_t *trans, hammer2_blockref_t *bref,
2432 		     hammer2_chain_t **parentp, hammer2_chain_t *chain,
2433 		     int flags)
2434 {
2435 	hammer2_dev_t *hmp;
2436 	hammer2_chain_t *parent;
2437 	size_t bytes;
2438 
2439 	/*
2440 	 * WARNING!  We should never resolve DATA to device buffers
2441 	 *	     (XXX allow it if the caller did?), and since
2442 	 *	     we currently do not have the logical buffer cache
2443 	 *	     buffer in-hand to fix its cached physical offset
2444 	 *	     we also force the modify code to not COW it. XXX
2445 	 */
2446 	hmp = chain->hmp;
2447 	KKASSERT(chain->parent == NULL);
2448 	KKASSERT(chain->error == 0);
2449 
2450 	/*
2451 	 * Now create a duplicate of the chain structure, associating
2452 	 * it with the same core, making it the same size, pointing it
2453 	 * to the same bref (the same media block).
2454 	 */
2455 	if (bref == NULL)
2456 		bref = &chain->bref;
2457 	bytes = (hammer2_off_t)1 <<
2458 		(int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2459 
2460 	/*
2461 	 * If parent is not NULL the duplicated chain will be entered under
2462 	 * the parent and the UPDATE bit set to tell flush to update
2463 	 * the blockref.
2464 	 *
2465 	 * We must setflush(parent) to ensure that it recurses through to
2466 	 * chain.  setflush(chain) might not work because ONFLUSH is possibly
2467 	 * already set in the chain (so it won't recurse up to set it in the
2468 	 * parent).
2469 	 *
2470 	 * Having both chains locked is extremely important for atomicy.
2471 	 */
2472 	if (parentp && (parent = *parentp) != NULL) {
2473 		KKASSERT(hammer2_mtx_owned(&parent->core.lock));
2474 		KKASSERT(parent->refs > 0);
2475 		KKASSERT(parent->error == 0);
2476 
2477 		hammer2_chain_create(trans, parentp, &chain, chain->pmp,
2478 				     bref->key, bref->keybits, bref->type,
2479 				     chain->bytes, flags);
2480 		KKASSERT(chain->flags & HAMMER2_CHAIN_UPDATE);
2481 		hammer2_chain_setflush(trans, *parentp);
2482 	}
2483 }
2484 
2485 /*
2486  * Helper function for deleting chains.
2487  *
2488  * The chain is removed from the live view (the RBTREE) as well as the parent's
2489  * blockmap.  Both chain and its parent must be locked.
2490  *
2491  * parent may not be errored.  chain can be errored.
2492  */
2493 static void
2494 _hammer2_chain_delete_helper(hammer2_trans_t *trans,
2495 			     hammer2_chain_t *parent, hammer2_chain_t *chain,
2496 			     int flags)
2497 {
2498 	hammer2_dev_t *hmp;
2499 
2500 	KKASSERT((chain->flags & (HAMMER2_CHAIN_DELETED |
2501 				  HAMMER2_CHAIN_FICTITIOUS)) == 0);
2502 	hmp = chain->hmp;
2503 
2504 	if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
2505 		/*
2506 		 * Chain is blockmapped, so there must be a parent.
2507 		 * Atomically remove the chain from the parent and remove
2508 		 * the blockmap entry.
2509 		 */
2510 		hammer2_blockref_t *base;
2511 		int count;
2512 
2513 		KKASSERT(parent != NULL);
2514 		KKASSERT(parent->error == 0);
2515 		KKASSERT((parent->flags & HAMMER2_CHAIN_INITIAL) == 0);
2516 		hammer2_chain_modify(trans, parent,
2517 				     HAMMER2_MODIFY_OPTDATA);
2518 
2519 		/*
2520 		 * Calculate blockmap pointer
2521 		 */
2522 		KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
2523 		hammer2_spin_ex(&parent->core.spin);
2524 
2525 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2526 		atomic_add_int(&parent->core.live_count, -1);
2527 		++parent->core.generation;
2528 		RB_REMOVE(hammer2_chain_tree, &parent->core.rbtree, chain);
2529 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2530 		--parent->core.chain_count;
2531 		chain->parent = NULL;
2532 
2533 		switch(parent->bref.type) {
2534 		case HAMMER2_BREF_TYPE_INODE:
2535 			/*
2536 			 * Access the inode's block array.  However, there
2537 			 * is no block array if the inode is flagged
2538 			 * DIRECTDATA.  The DIRECTDATA case typicaly only
2539 			 * occurs when a hardlink has been shifted up the
2540 			 * tree and the original inode gets replaced with
2541 			 * an OBJTYPE_HARDLINK placeholding inode.
2542 			 */
2543 			if (parent->data &&
2544 			    (parent->data->ipdata.op_flags &
2545 			     HAMMER2_OPFLAG_DIRECTDATA) == 0) {
2546 				base =
2547 				   &parent->data->ipdata.u.blockset.blockref[0];
2548 			} else {
2549 				base = NULL;
2550 			}
2551 			count = HAMMER2_SET_COUNT;
2552 			break;
2553 		case HAMMER2_BREF_TYPE_INDIRECT:
2554 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2555 			if (parent->data)
2556 				base = &parent->data->npdata[0];
2557 			else
2558 				base = NULL;
2559 			count = parent->bytes / sizeof(hammer2_blockref_t);
2560 			break;
2561 		case HAMMER2_BREF_TYPE_VOLUME:
2562 			base = &hmp->voldata.sroot_blockset.blockref[0];
2563 			count = HAMMER2_SET_COUNT;
2564 			break;
2565 		case HAMMER2_BREF_TYPE_FREEMAP:
2566 			base = &parent->data->npdata[0];
2567 			count = HAMMER2_SET_COUNT;
2568 			break;
2569 		default:
2570 			base = NULL;
2571 			count = 0;
2572 			panic("hammer2_flush_pass2: "
2573 			      "unrecognized blockref type: %d",
2574 			      parent->bref.type);
2575 		}
2576 
2577 		/*
2578 		 * delete blockmapped chain from its parent.
2579 		 *
2580 		 * The parent is not affected by any statistics in chain
2581 		 * which are pending synchronization.  That is, there is
2582 		 * nothing to undo in the parent since they have not yet
2583 		 * been incorporated into the parent.
2584 		 *
2585 		 * The parent is affected by statistics stored in inodes.
2586 		 * Those have already been synchronized, so they must be
2587 		 * undone.  XXX split update possible w/delete in middle?
2588 		 */
2589 		if (base) {
2590 			if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2591 			    (flags & HAMMER2_DELETE_NOSTATS) == 0) {
2592 				KKASSERT(chain->data != NULL);
2593 				parent->data_count -=
2594 					chain->data->ipdata.data_count;
2595 				parent->inode_count -=
2596 					chain->data->ipdata.inode_count;
2597 			}
2598 
2599 			int cache_index = -1;
2600 			hammer2_base_delete(trans, parent, base, count,
2601 					    &cache_index, chain);
2602 		}
2603 		hammer2_spin_unex(&parent->core.spin);
2604 	} else if (chain->flags & HAMMER2_CHAIN_ONRBTREE) {
2605 		/*
2606 		 * Chain is not blockmapped but a parent is present.
2607 		 * Atomically remove the chain from the parent.  There is
2608 		 * no blockmap entry to remove.
2609 		 *
2610 		 * Because chain was associated with a parent but not
2611 		 * synchronized, the chain's *_count_up fields contain
2612 		 * inode adjustment statistics which must be undone.
2613 		 */
2614 		hammer2_spin_ex(&parent->core.spin);
2615 		if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2616 		    (flags & HAMMER2_DELETE_NOSTATS) == 0) {
2617 			KKASSERT(chain->data != NULL);
2618 			chain->data_count_up -=
2619 				chain->data->ipdata.data_count;
2620 			chain->inode_count_up -=
2621 				chain->data->ipdata.inode_count;
2622 		}
2623 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2624 		atomic_add_int(&parent->core.live_count, -1);
2625 		++parent->core.generation;
2626 		RB_REMOVE(hammer2_chain_tree, &parent->core.rbtree, chain);
2627 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2628 		--parent->core.chain_count;
2629 		chain->parent = NULL;
2630 		hammer2_spin_unex(&parent->core.spin);
2631 	} else {
2632 		/*
2633 		 * Chain is not blockmapped and has no parent.  This
2634 		 * is a degenerate case.
2635 		 */
2636 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2637 	}
2638 
2639 #if 0
2640 	/*
2641 	 * If the deletion is permanent (i.e. the chain is not simply being
2642 	 * moved within the topology), adjust the freemap to indicate that
2643 	 * the block *might* be freeable.  bulkfree must still determine
2644 	 * that it is actually freeable.
2645 	 *
2646 	 * We no longer do this in the normal filesystem operations path
2647 	 * as it interferes with the bulkfree algorithm.
2648 	 */
2649 	if ((flags & HAMMER2_DELETE_PERMANENT) &&
2650 	    chain->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE &&
2651 	    chain->bref.type != HAMMER2_BREF_TYPE_FREEMAP_LEAF &&
2652 	    (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) {
2653 		hammer2_freemap_adjust(trans, hmp, &chain->bref,
2654 				       HAMMER2_FREEMAP_DOMAYFREE);
2655 	}
2656 #endif
2657 }
2658 
2659 /*
2660  * Create an indirect block that covers one or more of the elements in the
2661  * current parent.  Either returns the existing parent with no locking or
2662  * ref changes or returns the new indirect block locked and referenced
2663  * and leaving the original parent lock/ref intact as well.
2664  *
2665  * If an error occurs, NULL is returned and *errorp is set to the error.
2666  *
2667  * The returned chain depends on where the specified key falls.
2668  *
2669  * The key/keybits for the indirect mode only needs to follow three rules:
2670  *
2671  * (1) That all elements underneath it fit within its key space and
2672  *
2673  * (2) That all elements outside it are outside its key space.
2674  *
2675  * (3) When creating the new indirect block any elements in the current
2676  *     parent that fit within the new indirect block's keyspace must be
2677  *     moved into the new indirect block.
2678  *
2679  * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
2680  *     keyspace the the current parent, but lookup/iteration rules will
2681  *     ensure (and must ensure) that rule (2) for all parents leading up
2682  *     to the nearest inode or the root volume header is adhered to.  This
2683  *     is accomplished by always recursing through matching keyspaces in
2684  *     the hammer2_chain_lookup() and hammer2_chain_next() API.
2685  *
2686  * The current implementation calculates the current worst-case keyspace by
2687  * iterating the current parent and then divides it into two halves, choosing
2688  * whichever half has the most elements (not necessarily the half containing
2689  * the requested key).
2690  *
2691  * We can also opt to use the half with the least number of elements.  This
2692  * causes lower-numbered keys (aka logical file offsets) to recurse through
2693  * fewer indirect blocks and higher-numbered keys to recurse through more.
2694  * This also has the risk of not moving enough elements to the new indirect
2695  * block and being forced to create several indirect blocks before the element
2696  * can be inserted.
2697  *
2698  * Must be called with an exclusively locked parent.
2699  */
2700 static int hammer2_chain_indkey_freemap(hammer2_chain_t *parent,
2701 				hammer2_key_t *keyp, int keybits,
2702 				hammer2_blockref_t *base, int count);
2703 static int hammer2_chain_indkey_normal(hammer2_chain_t *parent,
2704 				hammer2_key_t *keyp, int keybits,
2705 				hammer2_blockref_t *base, int count);
2706 static
2707 hammer2_chain_t *
2708 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
2709 			      hammer2_key_t create_key, int create_bits,
2710 			      int for_type, int *errorp)
2711 {
2712 	hammer2_dev_t *hmp;
2713 	hammer2_blockref_t *base;
2714 	hammer2_blockref_t *bref;
2715 	hammer2_blockref_t bcopy;
2716 	hammer2_chain_t *chain;
2717 	hammer2_chain_t *ichain;
2718 	hammer2_chain_t dummy;
2719 	hammer2_key_t key = create_key;
2720 	hammer2_key_t key_beg;
2721 	hammer2_key_t key_end;
2722 	hammer2_key_t key_next;
2723 	int keybits = create_bits;
2724 	int count;
2725 	int nbytes;
2726 	int cache_index;
2727 	int loops;
2728 	int reason;
2729 	int generation;
2730 	int maxloops = 300000;
2731 
2732 	/*
2733 	 * Calculate the base blockref pointer or NULL if the chain
2734 	 * is known to be empty.  We need to calculate the array count
2735 	 * for RB lookups either way.
2736 	 */
2737 	hmp = parent->hmp;
2738 	*errorp = 0;
2739 	KKASSERT(hammer2_mtx_owned(&parent->core.lock));
2740 
2741 	/*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
2742 	if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2743 		base = NULL;
2744 
2745 		switch(parent->bref.type) {
2746 		case HAMMER2_BREF_TYPE_INODE:
2747 			count = HAMMER2_SET_COUNT;
2748 			break;
2749 		case HAMMER2_BREF_TYPE_INDIRECT:
2750 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2751 			count = parent->bytes / sizeof(hammer2_blockref_t);
2752 			break;
2753 		case HAMMER2_BREF_TYPE_VOLUME:
2754 			count = HAMMER2_SET_COUNT;
2755 			break;
2756 		case HAMMER2_BREF_TYPE_FREEMAP:
2757 			count = HAMMER2_SET_COUNT;
2758 			break;
2759 		default:
2760 			panic("hammer2_chain_create_indirect: "
2761 			      "unrecognized blockref type: %d",
2762 			      parent->bref.type);
2763 			count = 0;
2764 			break;
2765 		}
2766 	} else {
2767 		switch(parent->bref.type) {
2768 		case HAMMER2_BREF_TYPE_INODE:
2769 			base = &parent->data->ipdata.u.blockset.blockref[0];
2770 			count = HAMMER2_SET_COUNT;
2771 			break;
2772 		case HAMMER2_BREF_TYPE_INDIRECT:
2773 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2774 			base = &parent->data->npdata[0];
2775 			count = parent->bytes / sizeof(hammer2_blockref_t);
2776 			break;
2777 		case HAMMER2_BREF_TYPE_VOLUME:
2778 			base = &hmp->voldata.sroot_blockset.blockref[0];
2779 			count = HAMMER2_SET_COUNT;
2780 			break;
2781 		case HAMMER2_BREF_TYPE_FREEMAP:
2782 			base = &hmp->voldata.freemap_blockset.blockref[0];
2783 			count = HAMMER2_SET_COUNT;
2784 			break;
2785 		default:
2786 			panic("hammer2_chain_create_indirect: "
2787 			      "unrecognized blockref type: %d",
2788 			      parent->bref.type);
2789 			count = 0;
2790 			break;
2791 		}
2792 	}
2793 
2794 	/*
2795 	 * dummy used in later chain allocation (no longer used for lookups).
2796 	 */
2797 	bzero(&dummy, sizeof(dummy));
2798 
2799 	/*
2800 	 * When creating an indirect block for a freemap node or leaf
2801 	 * the key/keybits must be fitted to static radix levels because
2802 	 * particular radix levels use particular reserved blocks in the
2803 	 * related zone.
2804 	 *
2805 	 * This routine calculates the key/radix of the indirect block
2806 	 * we need to create, and whether it is on the high-side or the
2807 	 * low-side.
2808 	 */
2809 	if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
2810 	    for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
2811 		keybits = hammer2_chain_indkey_freemap(parent, &key, keybits,
2812 						       base, count);
2813 	} else {
2814 		keybits = hammer2_chain_indkey_normal(parent, &key, keybits,
2815 						      base, count);
2816 	}
2817 
2818 	/*
2819 	 * Normalize the key for the radix being represented, keeping the
2820 	 * high bits and throwing away the low bits.
2821 	 */
2822 	key &= ~(((hammer2_key_t)1 << keybits) - 1);
2823 
2824 	/*
2825 	 * How big should our new indirect block be?  It has to be at least
2826 	 * as large as its parent.
2827 	 */
2828 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
2829 		nbytes = HAMMER2_IND_BYTES_MIN;
2830 	else
2831 		nbytes = HAMMER2_IND_BYTES_MAX;
2832 	if (nbytes < count * sizeof(hammer2_blockref_t))
2833 		nbytes = count * sizeof(hammer2_blockref_t);
2834 
2835 	/*
2836 	 * Ok, create our new indirect block
2837 	 */
2838 	if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
2839 	    for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
2840 		dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
2841 	} else {
2842 		dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
2843 	}
2844 	dummy.bref.key = key;
2845 	dummy.bref.keybits = keybits;
2846 	dummy.bref.data_off = hammer2_getradix(nbytes);
2847 	dummy.bref.methods = parent->bref.methods;
2848 
2849 	ichain = hammer2_chain_alloc(hmp, parent->pmp, trans, &dummy.bref);
2850 	atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
2851 	hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
2852 	/* ichain has one ref at this point */
2853 
2854 	/*
2855 	 * We have to mark it modified to allocate its block, but use
2856 	 * OPTDATA to allow it to remain in the INITIAL state.  Otherwise
2857 	 * it won't be acted upon by the flush code.
2858 	 */
2859 	hammer2_chain_modify(trans, ichain, HAMMER2_MODIFY_OPTDATA);
2860 
2861 	/*
2862 	 * Iterate the original parent and move the matching brefs into
2863 	 * the new indirect block.
2864 	 *
2865 	 * XXX handle flushes.
2866 	 */
2867 	key_beg = 0;
2868 	key_end = HAMMER2_KEY_MAX;
2869 	cache_index = 0;
2870 	hammer2_spin_ex(&parent->core.spin);
2871 	loops = 0;
2872 	reason = 0;
2873 
2874 	for (;;) {
2875 		if (++loops > 100000) {
2876 		    hammer2_spin_unex(&parent->core.spin);
2877 		    panic("excessive loops r=%d p=%p base/count %p:%d %016jx\n",
2878 			  reason, parent, base, count, key_next);
2879 		}
2880 
2881 		/*
2882 		 * NOTE: spinlock stays intact, returned chain (if not NULL)
2883 		 *	 is not referenced or locked which means that we
2884 		 *	 cannot safely check its flagged / deletion status
2885 		 *	 until we lock it.
2886 		 */
2887 		chain = hammer2_combined_find(parent, base, count,
2888 					      &cache_index, &key_next,
2889 					      key_beg, key_end,
2890 					      &bref);
2891 		generation = parent->core.generation;
2892 		if (bref == NULL)
2893 			break;
2894 		key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
2895 
2896 		/*
2897 		 * Skip keys that are not within the key/radix of the new
2898 		 * indirect block.  They stay in the parent.
2899 		 */
2900 		if ((~(((hammer2_key_t)1 << keybits) - 1) &
2901 		    (key ^ bref->key)) != 0) {
2902 			goto next_key_spinlocked;
2903 		}
2904 
2905 		/*
2906 		 * Load the new indirect block by acquiring the related
2907 		 * chains (potentially from media as it might not be
2908 		 * in-memory).  Then move it to the new parent (ichain)
2909 		 * via DELETE-DUPLICATE.
2910 		 *
2911 		 * chain is referenced but not locked.  We must lock the
2912 		 * chain to obtain definitive DUPLICATED/DELETED state
2913 		 */
2914 		if (chain) {
2915 			/*
2916 			 * Use chain already present in the RBTREE
2917 			 */
2918 			hammer2_chain_ref(chain);
2919 			hammer2_spin_unex(&parent->core.spin);
2920 			hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER);
2921 		} else {
2922 			/*
2923 			 * Get chain for blockref element.  _get returns NULL
2924 			 * on insertion race.
2925 			 */
2926 			bcopy = *bref;
2927 			hammer2_spin_unex(&parent->core.spin);
2928 			chain = hammer2_chain_get(parent, generation, &bcopy);
2929 			if (chain == NULL) {
2930 				reason = 1;
2931 				hammer2_spin_ex(&parent->core.spin);
2932 				continue;
2933 			}
2934 			if (bcmp(&bcopy, bref, sizeof(bcopy))) {
2935 				kprintf("REASON 2\n");
2936 				reason = 2;
2937 				hammer2_chain_drop(chain);
2938 				hammer2_spin_ex(&parent->core.spin);
2939 				continue;
2940 			}
2941 			hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER);
2942 		}
2943 
2944 		/*
2945 		 * This is always live so if the chain has been deleted
2946 		 * we raced someone and we have to retry.
2947 		 *
2948 		 * NOTE: Lookups can race delete-duplicate because
2949 		 *	 delete-duplicate does not lock the parent's core
2950 		 *	 (they just use the spinlock on the core).  We must
2951 		 *	 check for races by comparing the DUPLICATED flag before
2952 		 *	 releasing the spinlock with the flag after locking the
2953 		 *	 chain.
2954 		 *
2955 		 *	 (note reversed logic for this one)
2956 		 */
2957 		if (chain->flags & HAMMER2_CHAIN_DELETED) {
2958 			hammer2_chain_unlock(chain);
2959 			hammer2_chain_drop(chain);
2960 			goto next_key;
2961 		}
2962 
2963 		/*
2964 		 * Shift the chain to the indirect block.
2965 		 *
2966 		 * WARNING! No reason for us to load chain data, pass NOSTATS
2967 		 *	    to prevent delete/insert from trying to access
2968 		 *	    inode stats (and thus asserting if there is no
2969 		 *	    chain->data loaded).
2970 		 */
2971 		hammer2_chain_delete(trans, parent, chain,
2972 				     HAMMER2_DELETE_NOSTATS);
2973 		hammer2_chain_rename(trans, NULL, &ichain, chain,
2974 				     HAMMER2_INSERT_NOSTATS);
2975 		hammer2_chain_unlock(chain);
2976 		hammer2_chain_drop(chain);
2977 		KKASSERT(parent->refs > 0);
2978 		chain = NULL;
2979 next_key:
2980 		hammer2_spin_ex(&parent->core.spin);
2981 next_key_spinlocked:
2982 		if (--maxloops == 0)
2983 			panic("hammer2_chain_create_indirect: maxloops");
2984 		reason = 4;
2985 		if (key_next == 0 || key_next > key_end)
2986 			break;
2987 		key_beg = key_next;
2988 		/* loop */
2989 	}
2990 	hammer2_spin_unex(&parent->core.spin);
2991 
2992 	/*
2993 	 * Insert the new indirect block into the parent now that we've
2994 	 * cleared out some entries in the parent.  We calculated a good
2995 	 * insertion index in the loop above (ichain->index).
2996 	 *
2997 	 * We don't have to set UPDATE here because we mark ichain
2998 	 * modified down below (so the normal modified -> flush -> set-moved
2999 	 * sequence applies).
3000 	 *
3001 	 * The insertion shouldn't race as this is a completely new block
3002 	 * and the parent is locked.
3003 	 */
3004 	KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
3005 	hammer2_chain_insert(parent, ichain,
3006 			     HAMMER2_CHAIN_INSERT_SPIN |
3007 			     HAMMER2_CHAIN_INSERT_LIVE,
3008 			     0);
3009 
3010 	/*
3011 	 * Make sure flushes propogate after our manual insertion.
3012 	 */
3013 	hammer2_chain_setflush(trans, ichain);
3014 	hammer2_chain_setflush(trans, parent);
3015 
3016 	/*
3017 	 * Figure out what to return.
3018 	 */
3019 	if (~(((hammer2_key_t)1 << keybits) - 1) &
3020 		   (create_key ^ key)) {
3021 		/*
3022 		 * Key being created is outside the key range,
3023 		 * return the original parent.
3024 		 */
3025 		hammer2_chain_unlock(ichain);
3026 		hammer2_chain_drop(ichain);
3027 	} else {
3028 		/*
3029 		 * Otherwise its in the range, return the new parent.
3030 		 * (leave both the new and old parent locked).
3031 		 */
3032 		parent = ichain;
3033 	}
3034 
3035 	return(parent);
3036 }
3037 
3038 /*
3039  * Calculate the keybits and highside/lowside of the freemap node the
3040  * caller is creating.
3041  *
3042  * This routine will specify the next higher-level freemap key/radix
3043  * representing the lowest-ordered set.  By doing so, eventually all
3044  * low-ordered sets will be moved one level down.
3045  *
3046  * We have to be careful here because the freemap reserves a limited
3047  * number of blocks for a limited number of levels.  So we can't just
3048  * push indiscriminately.
3049  */
3050 int
3051 hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp,
3052 			     int keybits, hammer2_blockref_t *base, int count)
3053 {
3054 	hammer2_chain_t *chain;
3055 	hammer2_blockref_t *bref;
3056 	hammer2_key_t key;
3057 	hammer2_key_t key_beg;
3058 	hammer2_key_t key_end;
3059 	hammer2_key_t key_next;
3060 	int cache_index;
3061 	int locount;
3062 	int hicount;
3063 	int maxloops = 300000;
3064 
3065 	key = *keyp;
3066 	locount = 0;
3067 	hicount = 0;
3068 	keybits = 64;
3069 
3070 	/*
3071 	 * Calculate the range of keys in the array being careful to skip
3072 	 * slots which are overridden with a deletion.
3073 	 */
3074 	key_beg = 0;
3075 	key_end = HAMMER2_KEY_MAX;
3076 	cache_index = 0;
3077 	hammer2_spin_ex(&parent->core.spin);
3078 
3079 	for (;;) {
3080 		if (--maxloops == 0) {
3081 			panic("indkey_freemap shit %p %p:%d\n",
3082 			      parent, base, count);
3083 		}
3084 		chain = hammer2_combined_find(parent, base, count,
3085 					      &cache_index, &key_next,
3086 					      key_beg, key_end,
3087 					      &bref);
3088 
3089 		/*
3090 		 * Exhausted search
3091 		 */
3092 		if (bref == NULL)
3093 			break;
3094 
3095 		/*
3096 		 * Skip deleted chains.
3097 		 */
3098 		if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3099 			if (key_next == 0 || key_next > key_end)
3100 				break;
3101 			key_beg = key_next;
3102 			continue;
3103 		}
3104 
3105 		/*
3106 		 * Use the full live (not deleted) element for the scan
3107 		 * iteration.  HAMMER2 does not allow partial replacements.
3108 		 *
3109 		 * XXX should be built into hammer2_combined_find().
3110 		 */
3111 		key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3112 
3113 		if (keybits > bref->keybits) {
3114 			key = bref->key;
3115 			keybits = bref->keybits;
3116 		} else if (keybits == bref->keybits && bref->key < key) {
3117 			key = bref->key;
3118 		}
3119 		if (key_next == 0)
3120 			break;
3121 		key_beg = key_next;
3122 	}
3123 	hammer2_spin_unex(&parent->core.spin);
3124 
3125 	/*
3126 	 * Return the keybits for a higher-level FREEMAP_NODE covering
3127 	 * this node.
3128 	 */
3129 	switch(keybits) {
3130 	case HAMMER2_FREEMAP_LEVEL0_RADIX:
3131 		keybits = HAMMER2_FREEMAP_LEVEL1_RADIX;
3132 		break;
3133 	case HAMMER2_FREEMAP_LEVEL1_RADIX:
3134 		keybits = HAMMER2_FREEMAP_LEVEL2_RADIX;
3135 		break;
3136 	case HAMMER2_FREEMAP_LEVEL2_RADIX:
3137 		keybits = HAMMER2_FREEMAP_LEVEL3_RADIX;
3138 		break;
3139 	case HAMMER2_FREEMAP_LEVEL3_RADIX:
3140 		keybits = HAMMER2_FREEMAP_LEVEL4_RADIX;
3141 		break;
3142 	case HAMMER2_FREEMAP_LEVEL4_RADIX:
3143 		panic("hammer2_chain_indkey_freemap: level too high");
3144 		break;
3145 	default:
3146 		panic("hammer2_chain_indkey_freemap: bad radix");
3147 		break;
3148 	}
3149 	*keyp = key;
3150 
3151 	return (keybits);
3152 }
3153 
3154 /*
3155  * Calculate the keybits and highside/lowside of the indirect block the
3156  * caller is creating.
3157  */
3158 static int
3159 hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp,
3160 			    int keybits, hammer2_blockref_t *base, int count)
3161 {
3162 	hammer2_blockref_t *bref;
3163 	hammer2_chain_t	*chain;
3164 	hammer2_key_t key_beg;
3165 	hammer2_key_t key_end;
3166 	hammer2_key_t key_next;
3167 	hammer2_key_t key;
3168 	int nkeybits;
3169 	int locount;
3170 	int hicount;
3171 	int cache_index;
3172 	int maxloops = 300000;
3173 
3174 	key = *keyp;
3175 	locount = 0;
3176 	hicount = 0;
3177 
3178 	/*
3179 	 * Calculate the range of keys in the array being careful to skip
3180 	 * slots which are overridden with a deletion.  Once the scan
3181 	 * completes we will cut the key range in half and shift half the
3182 	 * range into the new indirect block.
3183 	 */
3184 	key_beg = 0;
3185 	key_end = HAMMER2_KEY_MAX;
3186 	cache_index = 0;
3187 	hammer2_spin_ex(&parent->core.spin);
3188 
3189 	for (;;) {
3190 		if (--maxloops == 0) {
3191 			panic("indkey_freemap shit %p %p:%d\n",
3192 			      parent, base, count);
3193 		}
3194 		chain = hammer2_combined_find(parent, base, count,
3195 					      &cache_index, &key_next,
3196 					      key_beg, key_end,
3197 					      &bref);
3198 
3199 		/*
3200 		 * Exhausted search
3201 		 */
3202 		if (bref == NULL)
3203 			break;
3204 
3205 		/*
3206 		 * NOTE: No need to check DUPLICATED here because we do
3207 		 *	 not release the spinlock.
3208 		 */
3209 		if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3210 			if (key_next == 0 || key_next > key_end)
3211 				break;
3212 			key_beg = key_next;
3213 			continue;
3214 		}
3215 
3216 		/*
3217 		 * Use the full live (not deleted) element for the scan
3218 		 * iteration.  HAMMER2 does not allow partial replacements.
3219 		 *
3220 		 * XXX should be built into hammer2_combined_find().
3221 		 */
3222 		key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3223 
3224 		/*
3225 		 * Expand our calculated key range (key, keybits) to fit
3226 		 * the scanned key.  nkeybits represents the full range
3227 		 * that we will later cut in half (two halves @ nkeybits - 1).
3228 		 */
3229 		nkeybits = keybits;
3230 		if (nkeybits < bref->keybits) {
3231 			if (bref->keybits > 64) {
3232 				kprintf("bad bref chain %p bref %p\n",
3233 					chain, bref);
3234 				Debugger("fubar");
3235 			}
3236 			nkeybits = bref->keybits;
3237 		}
3238 		while (nkeybits < 64 &&
3239 		       (~(((hammer2_key_t)1 << nkeybits) - 1) &
3240 		        (key ^ bref->key)) != 0) {
3241 			++nkeybits;
3242 		}
3243 
3244 		/*
3245 		 * If the new key range is larger we have to determine
3246 		 * which side of the new key range the existing keys fall
3247 		 * under by checking the high bit, then collapsing the
3248 		 * locount into the hicount or vise-versa.
3249 		 */
3250 		if (keybits != nkeybits) {
3251 			if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
3252 				hicount += locount;
3253 				locount = 0;
3254 			} else {
3255 				locount += hicount;
3256 				hicount = 0;
3257 			}
3258 			keybits = nkeybits;
3259 		}
3260 
3261 		/*
3262 		 * The newly scanned key will be in the lower half or the
3263 		 * upper half of the (new) key range.
3264 		 */
3265 		if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
3266 			++hicount;
3267 		else
3268 			++locount;
3269 
3270 		if (key_next == 0)
3271 			break;
3272 		key_beg = key_next;
3273 	}
3274 	hammer2_spin_unex(&parent->core.spin);
3275 	bref = NULL;	/* now invalid (safety) */
3276 
3277 	/*
3278 	 * Adjust keybits to represent half of the full range calculated
3279 	 * above (radix 63 max)
3280 	 */
3281 	--keybits;
3282 
3283 	/*
3284 	 * Select whichever half contains the most elements.  Theoretically
3285 	 * we can select either side as long as it contains at least one
3286 	 * element (in order to ensure that a free slot is present to hold
3287 	 * the indirect block).
3288 	 */
3289 	if (hammer2_indirect_optimize) {
3290 		/*
3291 		 * Insert node for least number of keys, this will arrange
3292 		 * the first few blocks of a large file or the first few
3293 		 * inodes in a directory with fewer indirect blocks when
3294 		 * created linearly.
3295 		 */
3296 		if (hicount < locount && hicount != 0)
3297 			key |= (hammer2_key_t)1 << keybits;
3298 		else
3299 			key &= ~(hammer2_key_t)1 << keybits;
3300 	} else {
3301 		/*
3302 		 * Insert node for most number of keys, best for heavily
3303 		 * fragmented files.
3304 		 */
3305 		if (hicount > locount)
3306 			key |= (hammer2_key_t)1 << keybits;
3307 		else
3308 			key &= ~(hammer2_key_t)1 << keybits;
3309 	}
3310 	*keyp = key;
3311 
3312 	return (keybits);
3313 }
3314 
3315 /*
3316  * Sets CHAIN_DELETED and remove the chain's blockref from the parent if
3317  * it exists.
3318  *
3319  * Both parent and chain must be locked exclusively.
3320  *
3321  * This function will modify the parent if the blockref requires removal
3322  * from the parent's block table.
3323  *
3324  * This function is NOT recursive.  Any entity already pushed into the
3325  * chain (such as an inode) may still need visibility into its contents,
3326  * as well as the ability to read and modify the contents.  For example,
3327  * for an unlinked file which is still open.
3328  */
3329 void
3330 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
3331 		     hammer2_chain_t *chain, int flags)
3332 {
3333 	KKASSERT(hammer2_mtx_owned(&chain->core.lock));
3334 
3335 	/*
3336 	 * Nothing to do if already marked.
3337 	 *
3338 	 * We need the spinlock on the core whos RBTREE contains chain
3339 	 * to protect against races.
3340 	 */
3341 	if ((chain->flags & HAMMER2_CHAIN_DELETED) == 0) {
3342 		KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0 &&
3343 			 chain->parent == parent);
3344 		_hammer2_chain_delete_helper(trans, parent, chain, flags);
3345 	}
3346 
3347 	/*
3348 	 * NOTE: Special case call to hammer2_flush().  We are not in a FLUSH
3349 	 *	 transaction, so we can't pass a mirror_tid for the volume.
3350 	 *	 But since we are destroying the chain we can just pass 0
3351 	 *	 and use the flush call to clean out the subtopology.
3352 	 *
3353 	 *	 XXX not the best way to destroy the sub-topology.
3354 	 */
3355 	if (flags & HAMMER2_DELETE_PERMANENT) {
3356 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
3357 		hammer2_flush(trans, chain);
3358 	} else {
3359 		/* XXX might not be needed */
3360 		hammer2_chain_setflush(trans, chain);
3361 	}
3362 }
3363 
3364 /*
3365  * Returns the index of the nearest element in the blockref array >= elm.
3366  * Returns (count) if no element could be found.
3367  *
3368  * Sets *key_nextp to the next key for loop purposes but does not modify
3369  * it if the next key would be higher than the current value of *key_nextp.
3370  * Note that *key_nexp can overflow to 0, which should be tested by the
3371  * caller.
3372  *
3373  * (*cache_indexp) is a heuristic and can be any value without effecting
3374  * the result.
3375  *
3376  * WARNING!  Must be called with parent's spinlock held.  Spinlock remains
3377  *	     held through the operation.
3378  */
3379 static int
3380 hammer2_base_find(hammer2_chain_t *parent,
3381 		  hammer2_blockref_t *base, int count,
3382 		  int *cache_indexp, hammer2_key_t *key_nextp,
3383 		  hammer2_key_t key_beg, hammer2_key_t key_end)
3384 {
3385 	hammer2_blockref_t *scan;
3386 	hammer2_key_t scan_end;
3387 	int i;
3388 	int limit;
3389 
3390 	/*
3391 	 * Require the live chain's already have their core's counted
3392 	 * so we can optimize operations.
3393 	 */
3394         KKASSERT(parent->core.flags & HAMMER2_CORE_COUNTEDBREFS);
3395 
3396 	/*
3397 	 * Degenerate case
3398 	 */
3399 	if (count == 0 || base == NULL)
3400 		return(count);
3401 
3402 	/*
3403 	 * Sequential optimization using *cache_indexp.  This is the most
3404 	 * likely scenario.
3405 	 *
3406 	 * We can avoid trailing empty entries on live chains, otherwise
3407 	 * we might have to check the whole block array.
3408 	 */
3409 	i = *cache_indexp;
3410 	cpu_ccfence();
3411 	limit = parent->core.live_zero;
3412 	if (i >= limit)
3413 		i = limit - 1;
3414 	if (i < 0)
3415 		i = 0;
3416 	KKASSERT(i < count);
3417 
3418 	/*
3419 	 * Search backwards
3420 	 */
3421 	scan = &base[i];
3422 	while (i > 0 && (scan->type == 0 || scan->key > key_beg)) {
3423 		--scan;
3424 		--i;
3425 	}
3426 	*cache_indexp = i;
3427 
3428 	/*
3429 	 * Search forwards, stop when we find a scan element which
3430 	 * encloses the key or until we know that there are no further
3431 	 * elements.
3432 	 */
3433 	while (i < count) {
3434 		if (scan->type != 0) {
3435 			scan_end = scan->key +
3436 				   ((hammer2_key_t)1 << scan->keybits) - 1;
3437 			if (scan->key > key_beg || scan_end >= key_beg)
3438 				break;
3439 		}
3440 		if (i >= limit)
3441 			return (count);
3442 		++scan;
3443 		++i;
3444 	}
3445 	if (i != count) {
3446 		*cache_indexp = i;
3447 		if (i >= limit) {
3448 			i = count;
3449 		} else {
3450 			scan_end = scan->key +
3451 				   ((hammer2_key_t)1 << scan->keybits);
3452 			if (scan_end && (*key_nextp > scan_end ||
3453 					 *key_nextp == 0)) {
3454 				*key_nextp = scan_end;
3455 			}
3456 		}
3457 	}
3458 	return (i);
3459 }
3460 
3461 /*
3462  * Do a combined search and return the next match either from the blockref
3463  * array or from the in-memory chain.  Sets *bresp to the returned bref in
3464  * both cases, or sets it to NULL if the search exhausted.  Only returns
3465  * a non-NULL chain if the search matched from the in-memory chain.
3466  *
3467  * When no in-memory chain has been found and a non-NULL bref is returned
3468  * in *bresp.
3469  *
3470  *
3471  * The returned chain is not locked or referenced.  Use the returned bref
3472  * to determine if the search exhausted or not.  Iterate if the base find
3473  * is chosen but matches a deleted chain.
3474  *
3475  * WARNING!  Must be called with parent's spinlock held.  Spinlock remains
3476  *	     held through the operation.
3477  */
3478 static hammer2_chain_t *
3479 hammer2_combined_find(hammer2_chain_t *parent,
3480 		      hammer2_blockref_t *base, int count,
3481 		      int *cache_indexp, hammer2_key_t *key_nextp,
3482 		      hammer2_key_t key_beg, hammer2_key_t key_end,
3483 		      hammer2_blockref_t **bresp)
3484 {
3485 	hammer2_blockref_t *bref;
3486 	hammer2_chain_t *chain;
3487 	int i;
3488 
3489 	/*
3490 	 * Lookup in block array and in rbtree.
3491 	 */
3492 	*key_nextp = key_end + 1;
3493 	i = hammer2_base_find(parent, base, count, cache_indexp,
3494 			      key_nextp, key_beg, key_end);
3495 	chain = hammer2_chain_find(parent, key_nextp, key_beg, key_end);
3496 
3497 	/*
3498 	 * Neither matched
3499 	 */
3500 	if (i == count && chain == NULL) {
3501 		*bresp = NULL;
3502 		return(NULL);
3503 	}
3504 
3505 	/*
3506 	 * Only chain matched.
3507 	 */
3508 	if (i == count) {
3509 		bref = &chain->bref;
3510 		goto found;
3511 	}
3512 
3513 	/*
3514 	 * Only blockref matched.
3515 	 */
3516 	if (chain == NULL) {
3517 		bref = &base[i];
3518 		goto found;
3519 	}
3520 
3521 	/*
3522 	 * Both in-memory and blockref matched, select the nearer element.
3523 	 *
3524 	 * If both are flush with the left-hand side or both are the
3525 	 * same distance away, select the chain.  In this situation the
3526 	 * chain must have been loaded from the matching blockmap.
3527 	 */
3528 	if ((chain->bref.key <= key_beg && base[i].key <= key_beg) ||
3529 	    chain->bref.key == base[i].key) {
3530 		KKASSERT(chain->bref.key == base[i].key);
3531 		bref = &chain->bref;
3532 		goto found;
3533 	}
3534 
3535 	/*
3536 	 * Select the nearer key
3537 	 */
3538 	if (chain->bref.key < base[i].key) {
3539 		bref = &chain->bref;
3540 	} else {
3541 		bref = &base[i];
3542 		chain = NULL;
3543 	}
3544 
3545 	/*
3546 	 * If the bref is out of bounds we've exhausted our search.
3547 	 */
3548 found:
3549 	if (bref->key > key_end) {
3550 		*bresp = NULL;
3551 		chain = NULL;
3552 	} else {
3553 		*bresp = bref;
3554 	}
3555 	return(chain);
3556 }
3557 
3558 /*
3559  * Locate the specified block array element and delete it.  The element
3560  * must exist.
3561  *
3562  * The spin lock on the related chain must be held.
3563  *
3564  * NOTE: live_count was adjusted when the chain was deleted, so it does not
3565  *	 need to be adjusted when we commit the media change.
3566  */
3567 void
3568 hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
3569 		    hammer2_blockref_t *base, int count,
3570 		    int *cache_indexp, hammer2_chain_t *chain)
3571 {
3572 	hammer2_blockref_t *elm = &chain->bref;
3573 	hammer2_key_t key_next;
3574 	int i;
3575 
3576 	/*
3577 	 * Delete element.  Expect the element to exist.
3578 	 *
3579 	 * XXX see caller, flush code not yet sophisticated enough to prevent
3580 	 *     re-flushed in some cases.
3581 	 */
3582 	key_next = 0; /* max range */
3583 	i = hammer2_base_find(parent, base, count, cache_indexp,
3584 			      &key_next, elm->key, elm->key);
3585 	if (i == count || base[i].type == 0 ||
3586 	    base[i].key != elm->key ||
3587 	    ((chain->flags & HAMMER2_CHAIN_BMAPUPD) == 0 &&
3588 	     base[i].keybits != elm->keybits)) {
3589 		hammer2_spin_unex(&parent->core.spin);
3590 		panic("delete base %p element not found at %d/%d elm %p\n",
3591 		      base, i, count, elm);
3592 		return;
3593 	}
3594 	bzero(&base[i], sizeof(*base));
3595 
3596 	/*
3597 	 * We can only optimize parent->core.live_zero for live chains.
3598 	 */
3599 	if (parent->core.live_zero == i + 1) {
3600 		while (--i >= 0 && base[i].type == 0)
3601 			;
3602 		parent->core.live_zero = i + 1;
3603 	}
3604 
3605 	/*
3606 	 * Clear appropriate blockmap flags in chain.
3607 	 */
3608 	atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
3609 					HAMMER2_CHAIN_BMAPUPD);
3610 }
3611 
3612 /*
3613  * Insert the specified element.  The block array must not already have the
3614  * element and must have space available for the insertion.
3615  *
3616  * The spin lock on the related chain must be held.
3617  *
3618  * NOTE: live_count was adjusted when the chain was deleted, so it does not
3619  *	 need to be adjusted when we commit the media change.
3620  */
3621 void
3622 hammer2_base_insert(hammer2_trans_t *trans __unused, hammer2_chain_t *parent,
3623 		    hammer2_blockref_t *base, int count,
3624 		    int *cache_indexp, hammer2_chain_t *chain)
3625 {
3626 	hammer2_blockref_t *elm = &chain->bref;
3627 	hammer2_key_t key_next;
3628 	hammer2_key_t xkey;
3629 	int i;
3630 	int j;
3631 	int k;
3632 	int l;
3633 	int u = 1;
3634 
3635 	/*
3636 	 * Insert new element.  Expect the element to not already exist
3637 	 * unless we are replacing it.
3638 	 *
3639 	 * XXX see caller, flush code not yet sophisticated enough to prevent
3640 	 *     re-flushed in some cases.
3641 	 */
3642 	key_next = 0; /* max range */
3643 	i = hammer2_base_find(parent, base, count, cache_indexp,
3644 			      &key_next, elm->key, elm->key);
3645 
3646 	/*
3647 	 * Shortcut fill optimization, typical ordered insertion(s) may not
3648 	 * require a search.
3649 	 */
3650 	KKASSERT(i >= 0 && i <= count);
3651 
3652 	/*
3653 	 * Set appropriate blockmap flags in chain.
3654 	 */
3655 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_BMAPPED);
3656 
3657 	/*
3658 	 * We can only optimize parent->core.live_zero for live chains.
3659 	 */
3660 	if (i == count && parent->core.live_zero < count) {
3661 		i = parent->core.live_zero++;
3662 		base[i] = *elm;
3663 		return;
3664 	}
3665 
3666 	xkey = elm->key + ((hammer2_key_t)1 << elm->keybits) - 1;
3667 	if (i != count && (base[i].key < elm->key || xkey >= base[i].key)) {
3668 		hammer2_spin_unex(&parent->core.spin);
3669 		panic("insert base %p overlapping elements at %d elm %p\n",
3670 		      base, i, elm);
3671 	}
3672 
3673 	/*
3674 	 * Try to find an empty slot before or after.
3675 	 */
3676 	j = i;
3677 	k = i;
3678 	while (j > 0 || k < count) {
3679 		--j;
3680 		if (j >= 0 && base[j].type == 0) {
3681 			if (j == i - 1) {
3682 				base[j] = *elm;
3683 			} else {
3684 				bcopy(&base[j+1], &base[j],
3685 				      (i - j - 1) * sizeof(*base));
3686 				base[i - 1] = *elm;
3687 			}
3688 			goto validate;
3689 		}
3690 		++k;
3691 		if (k < count && base[k].type == 0) {
3692 			bcopy(&base[i], &base[i+1],
3693 			      (k - i) * sizeof(hammer2_blockref_t));
3694 			base[i] = *elm;
3695 
3696 			/*
3697 			 * We can only update parent->core.live_zero for live
3698 			 * chains.
3699 			 */
3700 			if (parent->core.live_zero <= k)
3701 				parent->core.live_zero = k + 1;
3702 			u = 2;
3703 			goto validate;
3704 		}
3705 	}
3706 	panic("hammer2_base_insert: no room!");
3707 
3708 	/*
3709 	 * Debugging
3710 	 */
3711 validate:
3712 	key_next = 0;
3713 	for (l = 0; l < count; ++l) {
3714 		if (base[l].type) {
3715 			key_next = base[l].key +
3716 				   ((hammer2_key_t)1 << base[l].keybits) - 1;
3717 			break;
3718 		}
3719 	}
3720 	while (++l < count) {
3721 		if (base[l].type) {
3722 			if (base[l].key <= key_next)
3723 				panic("base_insert %d %d,%d,%d fail %p:%d", u, i, j, k, base, l);
3724 			key_next = base[l].key +
3725 				   ((hammer2_key_t)1 << base[l].keybits) - 1;
3726 
3727 		}
3728 	}
3729 
3730 }
3731 
3732 #if 0
3733 
3734 /*
3735  * Sort the blockref array for the chain.  Used by the flush code to
3736  * sort the blockref[] array.
3737  *
3738  * The chain must be exclusively locked AND spin-locked.
3739  */
3740 typedef hammer2_blockref_t *hammer2_blockref_p;
3741 
3742 static
3743 int
3744 hammer2_base_sort_callback(const void *v1, const void *v2)
3745 {
3746 	hammer2_blockref_p bref1 = *(const hammer2_blockref_p *)v1;
3747 	hammer2_blockref_p bref2 = *(const hammer2_blockref_p *)v2;
3748 
3749 	/*
3750 	 * Make sure empty elements are placed at the end of the array
3751 	 */
3752 	if (bref1->type == 0) {
3753 		if (bref2->type == 0)
3754 			return(0);
3755 		return(1);
3756 	} else if (bref2->type == 0) {
3757 		return(-1);
3758 	}
3759 
3760 	/*
3761 	 * Sort by key
3762 	 */
3763 	if (bref1->key < bref2->key)
3764 		return(-1);
3765 	if (bref1->key > bref2->key)
3766 		return(1);
3767 	return(0);
3768 }
3769 
3770 void
3771 hammer2_base_sort(hammer2_chain_t *chain)
3772 {
3773 	hammer2_blockref_t *base;
3774 	int count;
3775 
3776 	switch(chain->bref.type) {
3777 	case HAMMER2_BREF_TYPE_INODE:
3778 		/*
3779 		 * Special shortcut for embedded data returns the inode
3780 		 * itself.  Callers must detect this condition and access
3781 		 * the embedded data (the strategy code does this for us).
3782 		 *
3783 		 * This is only applicable to regular files and softlinks.
3784 		 */
3785 		if (chain->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)
3786 			return;
3787 		base = &chain->data->ipdata.u.blockset.blockref[0];
3788 		count = HAMMER2_SET_COUNT;
3789 		break;
3790 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3791 	case HAMMER2_BREF_TYPE_INDIRECT:
3792 		/*
3793 		 * Optimize indirect blocks in the INITIAL state to avoid
3794 		 * I/O.
3795 		 */
3796 		KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) == 0);
3797 		base = &chain->data->npdata[0];
3798 		count = chain->bytes / sizeof(hammer2_blockref_t);
3799 		break;
3800 	case HAMMER2_BREF_TYPE_VOLUME:
3801 		base = &chain->hmp->voldata.sroot_blockset.blockref[0];
3802 		count = HAMMER2_SET_COUNT;
3803 		break;
3804 	case HAMMER2_BREF_TYPE_FREEMAP:
3805 		base = &chain->hmp->voldata.freemap_blockset.blockref[0];
3806 		count = HAMMER2_SET_COUNT;
3807 		break;
3808 	default:
3809 		panic("hammer2_chain_lookup: unrecognized blockref type: %d",
3810 		      chain->bref.type);
3811 		base = NULL;	/* safety */
3812 		count = 0;	/* safety */
3813 	}
3814 	kqsort(base, count, sizeof(*base), hammer2_base_sort_callback);
3815 }
3816 
3817 #endif
3818 
3819 /*
3820  * Chain memory management
3821  */
3822 void
3823 hammer2_chain_wait(hammer2_chain_t *chain)
3824 {
3825 	tsleep(chain, 0, "chnflw", 1);
3826 }
3827 
3828 const hammer2_media_data_t *
3829 hammer2_chain_rdata(hammer2_chain_t *chain)
3830 {
3831 	KKASSERT(chain->data != NULL);
3832 	return (chain->data);
3833 }
3834 
3835 hammer2_media_data_t *
3836 hammer2_chain_wdata(hammer2_chain_t *chain)
3837 {
3838 	KKASSERT(chain->data != NULL);
3839 	return (chain->data);
3840 }
3841 
3842 /*
3843  * Set the check data for a chain.  This can be a heavy-weight operation
3844  * and typically only runs on-flush.  For file data check data is calculated
3845  * when the logical buffers are flushed.
3846  */
3847 void
3848 hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata)
3849 {
3850 	chain->bref.flags &= ~HAMMER2_BREF_FLAG_ZERO;
3851 
3852 	switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
3853 	case HAMMER2_CHECK_NONE:
3854 		break;
3855 	case HAMMER2_CHECK_DISABLED:
3856 		break;
3857 	case HAMMER2_CHECK_ISCSI32:
3858 		chain->bref.check.iscsi32.value =
3859 			hammer2_icrc32(bdata, chain->bytes);
3860 		break;
3861 	case HAMMER2_CHECK_CRC64:
3862 		chain->bref.check.crc64.value = 0;
3863 		/* XXX */
3864 		break;
3865 	case HAMMER2_CHECK_SHA192:
3866 		{
3867 			SHA256_CTX hash_ctx;
3868 			union {
3869 				uint8_t digest[SHA256_DIGEST_LENGTH];
3870 				uint64_t digest64[SHA256_DIGEST_LENGTH/8];
3871 			} u;
3872 
3873 			SHA256_Init(&hash_ctx);
3874 			SHA256_Update(&hash_ctx, bdata, chain->bytes);
3875 			SHA256_Final(u.digest, &hash_ctx);
3876 			u.digest64[2] ^= u.digest64[3];
3877 			bcopy(u.digest,
3878 			      chain->bref.check.sha192.data,
3879 			      sizeof(chain->bref.check.sha192.data));
3880 		}
3881 		break;
3882 	case HAMMER2_CHECK_FREEMAP:
3883 		chain->bref.check.freemap.icrc32 =
3884 			hammer2_icrc32(bdata, chain->bytes);
3885 		break;
3886 	default:
3887 		kprintf("hammer2_chain_setcheck: unknown check type %02x\n",
3888 			chain->bref.methods);
3889 		break;
3890 	}
3891 }
3892 
3893 int
3894 hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata)
3895 {
3896 	int r;
3897 
3898 	if (chain->bref.flags & HAMMER2_BREF_FLAG_ZERO)
3899 		return 1;
3900 
3901 	switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
3902 	case HAMMER2_CHECK_NONE:
3903 		r = 1;
3904 		break;
3905 	case HAMMER2_CHECK_DISABLED:
3906 		r = 1;
3907 		break;
3908 	case HAMMER2_CHECK_ISCSI32:
3909 		r = (chain->bref.check.iscsi32.value ==
3910 		     hammer2_icrc32(bdata, chain->bytes));
3911 		break;
3912 	case HAMMER2_CHECK_CRC64:
3913 		r = (chain->bref.check.crc64.value == 0);
3914 		/* XXX */
3915 		break;
3916 	case HAMMER2_CHECK_SHA192:
3917 		{
3918 			SHA256_CTX hash_ctx;
3919 			union {
3920 				uint8_t digest[SHA256_DIGEST_LENGTH];
3921 				uint64_t digest64[SHA256_DIGEST_LENGTH/8];
3922 			} u;
3923 
3924 			SHA256_Init(&hash_ctx);
3925 			SHA256_Update(&hash_ctx, bdata, chain->bytes);
3926 			SHA256_Final(u.digest, &hash_ctx);
3927 			u.digest64[2] ^= u.digest64[3];
3928 			if (bcmp(u.digest,
3929 				 chain->bref.check.sha192.data,
3930 			         sizeof(chain->bref.check.sha192.data)) == 0) {
3931 				r = 1;
3932 			} else {
3933 				r = 0;
3934 			}
3935 		}
3936 		break;
3937 	case HAMMER2_CHECK_FREEMAP:
3938 		r = (chain->bref.check.freemap.icrc32 ==
3939 		     hammer2_icrc32(bdata, chain->bytes));
3940 		if (r == 0) {
3941 			kprintf("freemap.icrc %08x icrc32 %08x (%d)\n",
3942 				chain->bref.check.freemap.icrc32,
3943 				hammer2_icrc32(bdata, chain->bytes), chain->bytes);
3944 			if (chain->dio)
3945 				kprintf("dio %p buf %016jx,%d bdata %p/%p\n",
3946 					chain->dio, chain->dio->bp->b_loffset, chain->dio->bp->b_bufsize, bdata, chain->dio->bp->b_data);
3947 		}
3948 
3949 		break;
3950 	default:
3951 		kprintf("hammer2_chain_setcheck: unknown check type %02x\n",
3952 			chain->bref.methods);
3953 		r = 1;
3954 		break;
3955 	}
3956 	return r;
3957 }
3958