1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
5  * Copyright (c) 2011-2022 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Matthew Dillon <dillon@dragonflybsd.org>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 /*
38 #include <sys/cdefs.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/lock.h>
43 #include <sys/uuid.h>
44 #include <sys/vnode.h>
45 */
46 
47 #include "hammer2.h"
48 
49 #define INODE_DEBUG	0
50 
51 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
52 	     hammer2_tid_t, meta.inum);
53 
54 int
55 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
56 {
57 	if (ip1->meta.inum < ip2->meta.inum)
58 		return(-1);
59 	if (ip1->meta.inum > ip2->meta.inum)
60 		return(1);
61 	return(0);
62 }
63 
64 /*
65  * Caller holds pmp->list_spin and the inode should be locked.  Merge ip
66  * with the specified depend.
67  *
68  * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
69  * that successive calls must ensure the ip is on a pass2 depend (or they are
70  * all SYNCQ).  If the passed-in depend is not NULL and not (void *)-1 then
71  * we can set pass2 on it and return.
72  *
73  * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
74  * a self-depend if necessary, and depend->pass2 is set according
75  * to the PASS2 flag.  SIDEQ is set.
76  */
77 static __noinline
78 hammer2_depend_t *
79 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
80 {
81 	hammer2_pfs_t *pmp = ip->pmp;
82 	hammer2_depend_t *dtmp;
83 	hammer2_inode_t *iptmp;
84 
85 	/*
86 	 * If ip is SYNCQ its entry is used for the syncq list and it will
87 	 * no longer be associated with a dependency.  Merging this status
88 	 * with a passed-in depend implies PASS2.
89 	 */
90 	if (ip->flags & HAMMER2_INODE_SYNCQ) {
91 		if (depend == (void *)-1 ||
92 		    depend == NULL) {
93 			return ((void *)-1);
94 		}
95 		depend->pass2 = 1;
96 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
97 
98 		return depend;
99 	}
100 
101 	/*
102 	 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
103 	 * If it is not, associate the ip with the passed-in depend, creating
104 	 * a single-entry dependency using depend_static if necessary.
105 	 *
106 	 * NOTE: The use of ip->depend_static always requires that the
107 	 *	 specific ip containing the structure is part of that
108 	 *	 particular depend_static's dependency group.
109 	 */
110 	if (ip->flags & HAMMER2_INODE_SIDEQ) {
111 		/*
112 		 * Merge ip->depend with the passed-in depend.  If the
113 		 * passed-in depend is not a special case, all ips associated
114 		 * with ip->depend (including the original ip) must be moved
115 		 * to the passed-in depend.
116 		 */
117 		if (depend == NULL) {
118 			depend = ip->depend;
119 		} else if (depend == (void *)-1) {
120 			depend = ip->depend;
121 			depend->pass2 = 1;
122 		} else if (depend != ip->depend) {
123 #ifdef INVARIANTS
124 			int sanitychk = 0;
125 #endif
126 			dtmp = ip->depend;
127 			while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
128 #ifdef INVARIANTS
129 				if (iptmp == ip)
130 					sanitychk = 1;
131 #endif
132 				TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
133 				TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
134 				iptmp->depend = depend;
135 			}
136 			KKASSERT(sanitychk == 1);
137 			depend->count += dtmp->count;
138 			depend->pass2 |= dtmp->pass2;
139 			TAILQ_REMOVE(&pmp->depq, dtmp, entry);
140 			dtmp->count = 0;
141 			dtmp->pass2 = 0;
142 		}
143 	} else {
144 		/*
145 		 * Add ip to the sideq, creating a self-dependency if
146 		 * necessary.
147 		 */
148 		hammer2_inode_ref(ip); /* extra ref usually via hammer2_inode_modify() */
149 		atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
150 		if (depend == NULL) {
151 			depend = &ip->depend_static;
152 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
153 		} else if (depend == (void *)-1) {
154 			depend = &ip->depend_static;
155 			depend->pass2 = 1;
156 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
157 		} /* else add ip to passed-in depend */
158 		TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
159 		ip->depend = depend;
160 		++depend->count;
161 		++pmp->sideq_count;
162 	}
163 
164 	if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
165 		depend->pass2 = 1;
166 	if (depend->pass2)
167 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
168 
169 	return depend;
170 }
171 
172 /*
173  * Put a solo inode on the SIDEQ (meaning that its dirty).  This can also
174  * occur from inode_lock4() and inode_depend().
175  *
176  * Caller must pass-in a locked inode.
177  */
178 void
179 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
180 {
181 	hammer2_pfs_t *pmp = ip->pmp;
182 
183 	/*
184 	 * Optimize case to avoid pmp spinlock.
185 	 */
186 	if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
187 		hammer2_spin_ex(&pmp->list_spin);
188 		hammer2_inode_setdepend_locked(ip, NULL);
189 		hammer2_spin_unex(&pmp->list_spin);
190 	}
191 }
192 
193 /*
194  * Lock an inode, with SYNCQ semantics.
195  *
196  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
197  * flags for options:
198  *
199  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.
200  *	  shared locks are not subject to SYNCQ semantics, exclusive locks
201  *	  are.
202  *
203  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
204  *	  Most front-end inode locks do.
205  *
206  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
207  *	  the inode data be resolved.  This is used by the syncthr because
208  *	  it can run on an unresolved/out-of-sync cluster, and also by the
209  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
210  *	  disposing of hundreds of thousands of cached vnodes).
211  *
212  * This function, along with lock4, has SYNCQ semantics.  If the inode being
213  * locked is on the SYNCQ, that is it has been staged by the syncer, we must
214  * block until the operation is complete (even if we can lock the inode).  In
215  * order to reduce the stall time, we re-order the inode to the front of the
216  * pmp->syncq prior to blocking.  This reordering VERY significantly improves
217  * performance.
218  *
219  * The inode locking function locks the inode itself, resolves any stale
220  * chains in the inode's cluster, and allocates a fresh copy of the
221  * cluster with 1 ref and all the underlying chains locked.
222  *
223  * ip->cluster will be stable while the inode is locked.
224  *
225  * NOTE: We don't combine the inode/chain lock because putting away an
226  *       inode would otherwise confuse multiple lock holders of the inode.
227  */
228 void
229 hammer2_inode_lock(hammer2_inode_t *ip, int how)
230 {
231 	hammer2_pfs_t *pmp;
232 
233 	hammer2_inode_ref(ip);
234 	pmp = ip->pmp;
235 
236 	/*
237 	 * Inode structure mutex - Shared lock
238 	 */
239 	if (how & HAMMER2_RESOLVE_SHARED) {
240 		hammer2_mtx_sh(&ip->lock);
241 		return;
242 	}
243 
244 	/*
245 	 * Inode structure mutex - Exclusive lock
246 	 *
247 	 * An exclusive lock (if not recursive) must wait for inodes on
248 	 * SYNCQ to flush first, to ensure that meta-data dependencies such
249 	 * as the nlink count and related directory entries are not split
250 	 * across flushes.
251 	 *
252 	 * If the vnode is locked by the current thread it must be unlocked
253 	 * across the tsleep() to avoid a deadlock.
254 	 */
255 	hammer2_mtx_ex(&ip->lock);
256 	if (hammer2_mtx_refs(&ip->lock) > 1)
257 		return;
258 	while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
259 		hammer2_spin_ex(&pmp->list_spin);
260 		if (ip->flags & HAMMER2_INODE_SYNCQ) {
261 			tsleep_interlock(&ip->flags, 0);
262 			atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
263 			TAILQ_REMOVE(&pmp->syncq, ip, entry);
264 			TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
265 			hammer2_spin_unex(&pmp->list_spin);
266 			hammer2_mtx_unlock(&ip->lock);
267 			tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
268 			hammer2_mtx_ex(&ip->lock);
269 			continue;
270 		}
271 		hammer2_spin_unex(&pmp->list_spin);
272 		break;
273 	}
274 }
275 
276 /*
277  * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
278  * ip1 and ip2 must not be NULL.  ip3 and ip4 may be NULL, but if ip3 is
279  * NULL then ip4 must also be NULL.
280  *
281  * This creates a dependency between up to four inodes.
282  */
283 void
284 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
285 		    hammer2_inode_t *ip3, hammer2_inode_t *ip4)
286 {
287 	hammer2_inode_t *ips[4];
288 	hammer2_inode_t *iptmp;
289 	hammer2_inode_t *ipslp;
290 	hammer2_depend_t *depend;
291 	hammer2_pfs_t *pmp;
292 	size_t count;
293 	size_t i;
294 
295 	pmp = ip1->pmp;			/* may be NULL */
296 	KKASSERT(pmp == ip2->pmp);
297 
298 	ips[0] = ip1;
299 	ips[1] = ip2;
300 	if (ip3 == NULL) {
301 		count = 2;
302 	} else if (ip4 == NULL) {
303 		count = 3;
304 		ips[2] = ip3;
305 		KKASSERT(pmp == ip3->pmp);
306 	} else {
307 		count = 4;
308 		ips[2] = ip3;
309 		ips[3] = ip4;
310 		KKASSERT(pmp == ip3->pmp);
311 		KKASSERT(pmp == ip4->pmp);
312 	}
313 
314 	for (i = 0; i < count; ++i)
315 		hammer2_inode_ref(ips[i]);
316 
317 restart:
318 	/*
319 	 * Lock the inodes in order
320 	 */
321 	for (i = 0; i < count; ++i) {
322 		hammer2_mtx_ex(&ips[i]->lock);
323 	}
324 
325 	/*
326 	 * Associate dependencies, record the first inode found on SYNCQ
327 	 * (operation is allowed to proceed for inodes on PASS2) for our
328 	 * sleep operation, this inode is theoretically the last one sync'd
329 	 * in the sequence.
330 	 *
331 	 * All inodes found on SYNCQ are moved to the head of the syncq
332 	 * to reduce stalls.
333 	 */
334 	hammer2_spin_ex(&pmp->list_spin);
335 	depend = NULL;
336 	ipslp = NULL;
337 	for (i = 0; i < count; ++i) {
338 		iptmp = ips[i];
339 		depend = hammer2_inode_setdepend_locked(iptmp, depend);
340 		if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
341 			TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
342 			TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
343 			if (ipslp == NULL)
344 				ipslp = iptmp;
345 		}
346 	}
347 	hammer2_spin_unex(&pmp->list_spin);
348 
349 	/*
350 	 * Block and retry if any of the inodes are on SYNCQ.  It is
351 	 * important that we allow the operation to proceed in the
352 	 * PASS2 case, to avoid deadlocking against the vnode.
353 	 */
354 	if (ipslp) {
355 		for (i = 0; i < count; ++i)
356 			hammer2_mtx_unlock(&ips[i]->lock);
357 		tsleep(&ipslp->flags, 0, "h2sync", 2);
358 		goto restart;
359 	}
360 }
361 
362 /*
363  * Release an inode lock.  If another thread is blocked on SYNCQ_WAKEUP
364  * we wake them up.
365  */
366 void
367 hammer2_inode_unlock(hammer2_inode_t *ip)
368 {
369 	if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
370 		atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
371 		hammer2_mtx_unlock(&ip->lock);
372 		wakeup(&ip->flags);
373 	} else {
374 		hammer2_mtx_unlock(&ip->lock);
375 	}
376 	hammer2_inode_drop(ip);
377 }
378 
379 /*
380  * If either ip1 or ip2 have been tapped by the syncer, make sure that both
381  * are.  This ensure that dependencies (e.g. dirent-v-inode) are synced
382  * together.  For dirent-v-inode depends, pass the dirent as ip1.
383  *
384  * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
385  * single dependency.  Dependencies are entered into pmp->depq.  This
386  * effectively flags the inodes SIDEQ.
387  *
388  * Both ip1 and ip2 must be locked by the caller.  This also ensures
389  * that we can't race the end of the syncer's queue run.
390  */
391 void
392 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
393 {
394 	hammer2_pfs_t *pmp;
395 	hammer2_depend_t *depend;
396 
397 	pmp = ip1->pmp;
398 	hammer2_spin_ex(&pmp->list_spin);
399 	depend = hammer2_inode_setdepend_locked(ip1, NULL);
400 	depend = hammer2_inode_setdepend_locked(ip2, depend);
401 	hammer2_spin_unex(&pmp->list_spin);
402 }
403 
404 /*
405  * Select a chain out of an inode's cluster and lock it.
406  *
407  * The inode does not have to be locked.
408  */
409 hammer2_chain_t *
410 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
411 {
412 	hammer2_chain_t *chain;
413 	hammer2_cluster_t *cluster;
414 
415 	hammer2_spin_sh(&ip->cluster_spin);
416 	cluster = &ip->cluster;
417 	if (clindex >= cluster->nchains)
418 		chain = NULL;
419 	else
420 		chain = cluster->array[clindex].chain;
421 	if (chain) {
422 		hammer2_chain_ref(chain);
423 		hammer2_spin_unsh(&ip->cluster_spin);
424 		hammer2_chain_lock(chain, how);
425 	} else {
426 		hammer2_spin_unsh(&ip->cluster_spin);
427 	}
428 	return chain;
429 }
430 
431 hammer2_chain_t *
432 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
433 			       hammer2_chain_t **parentp, int how)
434 {
435 	hammer2_chain_t *chain;
436 	hammer2_chain_t *parent;
437 
438 	for (;;) {
439 		hammer2_spin_sh(&ip->cluster_spin);
440 		if (clindex >= ip->cluster.nchains)
441 			chain = NULL;
442 		else
443 			chain = ip->cluster.array[clindex].chain;
444 		if (chain) {
445 			hammer2_chain_ref(chain);
446 			hammer2_spin_unsh(&ip->cluster_spin);
447 			hammer2_chain_lock(chain, how);
448 		} else {
449 			hammer2_spin_unsh(&ip->cluster_spin);
450 		}
451 
452 		/*
453 		 * Get parent, lock order must be (parent, chain).
454 		 */
455 		parent = chain->parent;
456 		if (parent) {
457 			hammer2_chain_ref(parent);
458 			hammer2_chain_unlock(chain);
459 			hammer2_chain_lock(parent, how);
460 			hammer2_chain_lock(chain, how);
461 		}
462 		if (ip->cluster.array[clindex].chain == chain &&
463 		    chain->parent == parent) {
464 			break;
465 		}
466 
467 		/*
468 		 * Retry
469 		 */
470 		hammer2_chain_unlock(chain);
471 		hammer2_chain_drop(chain);
472 		if (parent) {
473 			hammer2_chain_unlock(parent);
474 			hammer2_chain_drop(parent);
475 		}
476 	}
477 	*parentp = parent;
478 
479 	return chain;
480 }
481 
482 /*
483  * Temporarily release a lock held shared or exclusive.  Caller must
484  * hold the lock shared or exclusive on call and lock will be released
485  * on return.
486  *
487  * Restore a lock that was temporarily released.
488  */
489 hammer2_mtx_state_t
490 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
491 {
492 	return hammer2_mtx_temp_release(&ip->lock);
493 }
494 
495 void
496 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
497 {
498 	hammer2_mtx_temp_restore(&ip->lock, ostate);
499 }
500 
501 /*
502  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
503  * is already held exclusively this is a NOP.
504  *
505  * The caller MUST hold the inode lock either shared or exclusive on call
506  * and will own the lock exclusively on return.
507  *
508  * Returns non-zero if the lock was already exclusive prior to the upgrade.
509  */
510 int
511 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
512 {
513 	int wasexclusive;
514 
515 	/* XXX pretends it wasn't exclusive, but shouldn't matter */
516 	//if (mtx_islocked_ex(&ip->lock)) {
517 	if (0) {
518 		wasexclusive = 1;
519 	} else {
520 		hammer2_mtx_unlock(&ip->lock);
521 		hammer2_mtx_ex(&ip->lock);
522 		wasexclusive = 0;
523 	}
524 	return wasexclusive;
525 }
526 
527 /*
528  * Downgrade an inode lock from exclusive to shared only if the inode
529  * lock was previously shared.  If the inode lock was previously exclusive,
530  * this is a NOP.
531  */
532 void
533 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
534 {
535 	if (wasexclusive == 0)
536 		hammer2_mtx_downgrade(&ip->lock);
537 }
538 
539 /*
540  * Lookup an inode by inode number
541  */
542 hammer2_inode_t *
543 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
544 {
545 	hammer2_inode_t *ip;
546 
547 	KKASSERT(pmp);
548 	if (pmp->spmp_hmp) {
549 		ip = NULL;
550 	} else {
551 		hammer2_spin_ex(&pmp->inum_spin);
552 		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
553 		if (ip)
554 			hammer2_inode_ref(ip);
555 		hammer2_spin_unex(&pmp->inum_spin);
556 	}
557 	return(ip);
558 }
559 
560 /*
561  * Adding a ref to an inode is only legal if the inode already has at least
562  * one ref.
563  *
564  * (can be called with spinlock held)
565  */
566 void
567 hammer2_inode_ref(hammer2_inode_t *ip)
568 {
569 	atomic_add_int(&ip->refs, 1);
570 	if (hammer2_debug & 0x80000) {
571 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
572 		print_backtrace(8);
573 	}
574 }
575 
576 /*
577  * Drop an inode reference, freeing the inode when the last reference goes
578  * away.
579  */
580 void
581 hammer2_inode_drop(hammer2_inode_t *ip)
582 {
583 	hammer2_pfs_t *pmp;
584 	u_int refs;
585 
586 	while (ip) {
587 		if (hammer2_debug & 0x80000) {
588 			kprintf("INODE-1 %p (%d->%d)\n",
589 				ip, ip->refs, ip->refs - 1);
590 			print_backtrace(8);
591 		}
592 		refs = ip->refs;
593 		cpu_ccfence();
594 		if (refs == 1) {
595 			/*
596 			 * Transition to zero, must interlock with
597 			 * the inode inumber lookup tree (if applicable).
598 			 * It should not be possible for anyone to race
599 			 * the transition to 0.
600 			 */
601 			pmp = ip->pmp;
602 			KKASSERT(pmp);
603 			hammer2_spin_ex(&pmp->inum_spin);
604 
605 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
606 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
607 				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
608 					atomic_clear_int(&ip->flags,
609 						     HAMMER2_INODE_ONRBTREE);
610 					RB_REMOVE(hammer2_inode_tree,
611 						  &pmp->inum_tree, ip);
612 					--pmp->inum_count;
613 				}
614 				hammer2_spin_unex(&pmp->inum_spin);
615 
616 				ip->pmp = NULL;
617 
618 				/*
619 				 * Cleaning out ip->cluster isn't entirely
620 				 * trivial.
621 				 */
622 				hammer2_inode_repoint(ip, NULL);
623 
624 				/*
625 				 * VOP_RECLAIM is currently unused,
626 				 * so directly free vnode before inode.
627 				 */
628 				if (ip->vp) {
629 					if (ip->vp->v_malloced)
630 						freevnode(ip->vp);
631 				} else {
632 					/* PFS inode ? */
633 				}
634 
635 				kfree_obj(ip, pmp->minode);
636 				atomic_add_long(&pmp->inmem_inodes, -1);
637 				ip = NULL;	/* will terminate loop */
638 			} else {
639 				hammer2_spin_unex(&ip->pmp->inum_spin);
640 			}
641 		} else {
642 			/*
643 			 * Non zero transition
644 			 */
645 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
646 				break;
647 		}
648 	}
649 }
650 
651 /*
652  * Get the vnode associated with the given inode, allocating the vnode if
653  * necessary.  The vnode will be returned exclusively locked.
654  *
655  * *errorp is set to a UNIX error, not a HAMMER2 error.
656  *
657  * The caller must lock the inode (shared or exclusive).
658  *
659  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
660  * races.
661  */
662 struct m_vnode *
663 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
664 {
665 	hammer2_pfs_t *pmp;
666 	struct m_vnode *vp;
667 
668 	pmp = ip->pmp;
669 	KKASSERT(pmp != NULL);
670 	*errorp = 0;
671 
672 	for (;;) {
673 		/*
674 		 * Attempt to reuse an existing vnode assignment.  It is
675 		 * possible to race a reclaim so the vget() may fail.  The
676 		 * inode must be unlocked during the vget() to avoid a
677 		 * deadlock against a reclaim.
678 		 */
679 		int wasexclusive;
680 
681 		vp = ip->vp;
682 		if (vp) {
683 			/*
684 			 * Inode must be unlocked during the vget() to avoid
685 			 * possible deadlocks, but leave the ip ref intact.
686 			 *
687 			 * vnode is held to prevent destruction during the
688 			 * vget().  The vget() can still fail if we lost
689 			 * a reclaim race on the vnode.
690 			 */
691 			hammer2_mtx_state_t ostate;
692 
693 			vhold(vp);
694 			ostate = hammer2_inode_lock_temp_release(ip);
695 			if (vget(vp, LK_EXCLUSIVE)) {
696 				vdrop(vp);
697 				hammer2_inode_lock_temp_restore(ip, ostate);
698 				continue;
699 			}
700 			hammer2_inode_lock_temp_restore(ip, ostate);
701 			vdrop(vp);
702 			/* vp still locked and ref from vget */
703 			if (ip->vp != vp) {
704 				kprintf("hammer2: igetv race %p/%p\n",
705 					ip->vp, vp);
706 				vput(vp);
707 				continue;
708 			}
709 			*errorp = 0;
710 			break;
711 		}
712 
713 		/*
714 		 * No vnode exists, allocate a new vnode.  Beware of
715 		 * allocation races.  This function will return an
716 		 * exclusively locked and referenced vnode.
717 		 */
718 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
719 		if (*errorp) {
720 			kprintf("hammer2: igetv getnewvnode failed %d\n",
721 				*errorp);
722 			vp = NULL;
723 			break;
724 		}
725 
726 		/*
727 		 * Lock the inode and check for an allocation race.
728 		 */
729 		wasexclusive = hammer2_inode_lock_upgrade(ip);
730 		if (ip->vp != NULL) {
731 			vp->v_type = VBAD;
732 			vx_put(vp);
733 			hammer2_inode_lock_downgrade(ip, wasexclusive);
734 			continue;
735 		}
736 
737 		switch (ip->meta.type) {
738 		case HAMMER2_OBJTYPE_DIRECTORY:
739 			vp->v_type = VDIR;
740 			break;
741 		case HAMMER2_OBJTYPE_REGFILE:
742 			/*
743 			 * Regular file must use buffer cache I/O
744 			 * (VKVABIO cpu sync semantics supported)
745 			 */
746 			vp->v_type = VREG;
747 			vsetflags(vp, VKVABIO);
748 			vinitvmio(vp, ip->meta.size,
749 				  HAMMER2_LBUFSIZE,
750 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
751 			break;
752 		case HAMMER2_OBJTYPE_SOFTLINK:
753 			/*
754 			 * XXX for now we are using the generic file_read
755 			 * and file_write code so we need a buffer cache
756 			 * association.
757 			 *
758 			 * (VKVABIO cpu sync semantics supported)
759 			 */
760 			vp->v_type = VLNK;
761 			vsetflags(vp, VKVABIO);
762 			vinitvmio(vp, ip->meta.size,
763 				  HAMMER2_LBUFSIZE,
764 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
765 			break;
766 		case HAMMER2_OBJTYPE_CDEV:
767 			vp->v_type = VCHR;
768 			/* fall through */
769 		case HAMMER2_OBJTYPE_BDEV:
770 			//vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
771 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
772 				vp->v_type = VBLK;
773 			addaliasu(vp,
774 				  ip->meta.rmajor,
775 				  ip->meta.rminor);
776 			break;
777 		case HAMMER2_OBJTYPE_FIFO:
778 			vp->v_type = VFIFO;
779 			//vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
780 			break;
781 		case HAMMER2_OBJTYPE_SOCKET:
782 			vp->v_type = VSOCK;
783 			break;
784 		default:
785 			panic("hammer2: unhandled objtype %d",
786 			      ip->meta.type);
787 			break;
788 		}
789 
790 		if (ip == pmp->iroot)
791 			vsetflags(vp, VROOT);
792 
793 		vp->v_data = ip;
794 		ip->vp = vp;
795 		hammer2_inode_ref(ip);		/* vp association */
796 		hammer2_inode_lock_downgrade(ip, wasexclusive);
797 		vx_downgrade(vp);
798 		break;
799 	}
800 
801 	/*
802 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
803 	 */
804 	if (hammer2_debug & 0x0002) {
805 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
806 			vp, -1, -1);
807 	}
808 	return (vp);
809 }
810 
811 /*
812  * XXX this API needs a rewrite.  It needs to be split into a
813  * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
814  * rid of the inode/chain lock reversal fudge.
815  *
816  * Returns the inode associated with the passed-in cluster, allocating a new
817  * hammer2_inode structure if necessary, then synchronizing it to the passed
818  * xop cluster.  When synchronizing, if idx >= 0, only cluster index (idx)
819  * is synchronized.  Otherwise the whole cluster is synchronized.  inum will
820  * be extracted from the passed-in xop and the inum argument will be ignored.
821  *
822  * If xop is passed as NULL then a new hammer2_inode is allocated with the
823  * specified inum, and returned.   For normal inodes, the inode will be
824  * indexed in memory and if it already exists the existing ip will be
825  * returned instead of allocating a new one.  The superroot and PFS inodes
826  * are not indexed in memory.
827  *
828  * The passed-in cluster must be locked and will remain locked on return.
829  * The returned inode will be locked and the caller may dispose of both
830  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
831  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
832  *
833  * The hammer2_inode structure regulates the interface between the high level
834  * kernel VNOPS API and the filesystem backend (the chains).
835  *
836  * On return the inode is locked with the supplied cluster.
837  */
838 hammer2_inode_t *
839 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
840 		  hammer2_tid_t inum, int idx)
841 {
842 	hammer2_inode_t *nip;
843 	const hammer2_inode_data_t *iptmp;
844 	const hammer2_inode_data_t *nipdata;
845 
846 	KKASSERT(xop == NULL ||
847 		 hammer2_cluster_type(&xop->cluster) ==
848 		 HAMMER2_BREF_TYPE_INODE);
849 	KKASSERT(pmp);
850 
851 	/*
852 	 * Interlocked lookup/ref of the inode.  This code is only needed
853 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
854 	 * otherwise and test for duplicates).
855 	 *
856 	 * Cluster can be NULL during the initial pfs allocation.
857 	 */
858 	if (xop) {
859 		iptmp = &hammer2_xop_gdata(xop)->ipdata;
860 		inum = iptmp->meta.inum;
861 		hammer2_xop_pdata(xop);
862 	}
863 again:
864 	nip = hammer2_inode_lookup(pmp, inum);
865 	if (nip) {
866 		/*
867 		 * We may have to unhold the cluster to avoid a deadlock
868 		 * against vnlru (and possibly other XOPs).
869 		 */
870 		if (xop) {
871 			if (hammer2_mtx_ex_try(&nip->lock) != 0) {
872 				hammer2_cluster_unhold(&xop->cluster);
873 				hammer2_mtx_ex(&nip->lock);
874 				hammer2_cluster_rehold(&xop->cluster);
875 			}
876 		} else {
877 			hammer2_mtx_ex(&nip->lock);
878 		}
879 
880 		/*
881 		 * Handle SMP race (not applicable to the super-root spmp
882 		 * which can't index inodes due to duplicative inode numbers).
883 		 */
884 		if (pmp->spmp_hmp == NULL &&
885 		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
886 			hammer2_mtx_unlock(&nip->lock);
887 			hammer2_inode_drop(nip);
888 			goto again;
889 		}
890 		if (xop) {
891 			if (idx >= 0)
892 				hammer2_inode_repoint_one(nip, &xop->cluster,
893 							  idx);
894 			else
895 				hammer2_inode_repoint(nip, &xop->cluster);
896 		}
897 		return nip;
898 	}
899 
900 	/*
901 	 * We couldn't find the inode number, create a new inode and try to
902 	 * insert it, handle insertion races.
903 	 */
904 	nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
905 	hammer2_spin_init(&nip->cluster_spin, "h2clspin");
906 	atomic_add_long(&pmp->inmem_inodes, 1);
907 
908 	/*
909 	 * Initialize nip's cluster.  A cluster is provided for normal
910 	 * inodes but typically not for the super-root or PFS inodes.
911 	 */
912 	{
913 		hammer2_inode_t *nnip = nip;
914 		nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip));
915 	}
916 
917 	nip->cluster.refs = 1;
918 	nip->cluster.pmp = pmp;
919 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
920 	if (xop) {
921 		nipdata = &hammer2_xop_gdata(xop)->ipdata;
922 		nip->meta = nipdata->meta;
923 		hammer2_xop_pdata(xop);
924 		hammer2_inode_repoint(nip, &xop->cluster);
925 	} else {
926 		nip->meta.inum = inum;		/* PFS inum is always 1 XXX */
927 		/* mtime will be updated when a cluster is available */
928 	}
929 
930 	nip->pmp = pmp;
931 
932 	/*
933 	 * ref and lock on nip gives it state compatible to after a
934 	 * hammer2_inode_lock() call.
935 	 */
936 	nip->refs = 1;
937 	hammer2_mtx_init(&nip->lock, "h2inode");
938 	hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
939 	hammer2_mtx_ex(&nip->lock);
940 	TAILQ_INIT(&nip->depend_static.sideq);
941 	/* combination of thread lock and chain lock == inode lock */
942 
943 	/*
944 	 * Attempt to add the inode.  If it fails we raced another inode
945 	 * get.  Undo all the work and try again.
946 	 */
947 	if (pmp->spmp_hmp == NULL) {
948 		hammer2_spin_ex(&pmp->inum_spin);
949 		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
950 			hammer2_spin_unex(&pmp->inum_spin);
951 			hammer2_mtx_unlock(&nip->lock);
952 			hammer2_inode_drop(nip);
953 			goto again;
954 		}
955 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
956 		++pmp->inum_count;
957 		hammer2_spin_unex(&pmp->inum_spin);
958 	}
959 	return (nip);
960 }
961 
962 /*
963  * Create a PFS inode under the superroot.  This function will create the
964  * inode, its media chains, and also insert it into the media.
965  *
966  * Caller must be in a flush transaction because we are inserting the inode
967  * onto the media.
968  */
969 hammer2_inode_t *
970 hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
971 		     const char *name, size_t name_len,
972 		     int *errorp)
973 {
974 	hammer2_xop_create_t *xop;
975 	hammer2_inode_t *pip;
976 	hammer2_inode_t *nip;
977 	int error;
978 	uint8_t pip_comp_algo;
979 	uint8_t pip_check_algo;
980 	hammer2_tid_t pip_inum;
981 	hammer2_key_t lhc;
982 
983 	pip = spmp->iroot;
984 	nip = NULL;
985 
986 	lhc = hammer2_dirhash(name, name_len);
987 	*errorp = 0;
988 
989 	/*
990 	 * Locate the inode or indirect block to create the new
991 	 * entry in.  At the same time check for key collisions
992 	 * and iterate until we don't get one.
993 	 *
994 	 * Lock the directory exclusively for now to guarantee that
995 	 * we can find an unused lhc for the name.  Due to collisions,
996 	 * two different creates can end up with the same lhc so we
997 	 * cannot depend on the OS to prevent the collision.
998 	 */
999 	hammer2_inode_lock(pip, 0);
1000 
1001 	pip_comp_algo = pip->meta.comp_algo;
1002 	pip_check_algo = pip->meta.check_algo;
1003 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1004 
1005 	/*
1006 	 * Locate an unused key in the collision space.
1007 	 */
1008 	{
1009 		hammer2_xop_scanlhc_t *sxop;
1010 		hammer2_key_t lhcbase;
1011 
1012 		lhcbase = lhc;
1013 		sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1014 		sxop->lhc = lhc;
1015 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1016 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1017 			if (lhc != sxop->head.cluster.focus->bref.key)
1018 				break;
1019 			++lhc;
1020 		}
1021 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1022 
1023 		if (error) {
1024 			if (error != HAMMER2_ERROR_ENOENT)
1025 				goto done2;
1026 			++lhc;
1027 			error = 0;
1028 		}
1029 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1030 			error = HAMMER2_ERROR_ENOSPC;
1031 			goto done2;
1032 		}
1033 	}
1034 
1035 	/*
1036 	 * Create the inode with the lhc as the key.
1037 	 */
1038 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1039 	xop->lhc = lhc;
1040 	xop->flags = HAMMER2_INSERT_PFSROOT;
1041 	bzero(&xop->meta, sizeof(xop->meta));
1042 
1043 	xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1044 	xop->meta.inum = 1;
1045 	xop->meta.iparent = pip_inum;
1046 
1047 	/* Inherit parent's inode compression mode. */
1048 	xop->meta.comp_algo = pip_comp_algo;
1049 	xop->meta.check_algo = pip_check_algo;
1050 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1051 	hammer2_update_time(&xop->meta.ctime, false);
1052 	xop->meta.mtime = xop->meta.ctime;
1053 	xop->meta.mode = 0755;
1054 	xop->meta.nlinks = 1;
1055 
1056 	/*
1057 	 * Regular files and softlinks allow a small amount of data to be
1058 	 * directly embedded in the inode.  This flag will be cleared if
1059 	 * the size is extended past the embedded limit.
1060 	 */
1061 	if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1062 	    xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1063 		xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1064 	}
1065 	hammer2_xop_setname(&xop->head, name, name_len);
1066 	xop->meta.name_len = name_len;
1067 	xop->meta.name_key = lhc;
1068 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1069 
1070 	hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
1071 
1072 	error = hammer2_xop_collect(&xop->head, 0);
1073 #if INODE_DEBUG
1074 	kprintf("CREATE INODE %*.*s\n",
1075 		(int)name_len, (int)name_len, name);
1076 #endif
1077 
1078 	if (error) {
1079 		*errorp = error;
1080 		goto done;
1081 	}
1082 
1083 	/*
1084 	 * Set up the new inode if not a hardlink pointer.
1085 	 *
1086 	 * NOTE: *_get() integrates chain's lock into the inode lock.
1087 	 *
1088 	 * NOTE: Only one new inode can currently be created per
1089 	 *	 transaction.  If the need arises we can adjust
1090 	 *	 hammer2_trans_init() to allow more.
1091 	 *
1092 	 * NOTE: nipdata will have chain's blockset data.
1093 	 */
1094 	nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
1095 	nip->comp_heuristic = 0;
1096 done:
1097 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1098 done2:
1099 	hammer2_inode_unlock(pip);
1100 
1101 	return (nip);
1102 }
1103 
1104 /*
1105  * Create a new, normal inode.  This function will create the inode,
1106  * the media chains, but will not insert the chains onto the media topology
1107  * (doing so would require a flush transaction and cause long stalls).
1108  *
1109  * Caller must be in a normal transaction.
1110  */
1111 hammer2_inode_t *
1112 hammer2_inode_create_normal(hammer2_inode_t *pip,
1113 			    struct vattr *vap, struct ucred *cred,
1114 			    hammer2_key_t inum, int *errorp)
1115 {
1116 	hammer2_xop_create_t *xop;
1117 	hammer2_inode_t *dip;
1118 	hammer2_inode_t *nip;
1119 	int error;
1120 	uid_t xuid;
1121 	uuid_t pip_uid;
1122 	uuid_t pip_gid;
1123 	uint32_t pip_mode;
1124 	uint8_t pip_comp_algo;
1125 	uint8_t pip_check_algo;
1126 	hammer2_tid_t pip_inum;
1127 
1128 	dip = pip->pmp->iroot;
1129 	KKASSERT(dip != NULL);
1130 
1131 	*errorp = 0;
1132 
1133 	/*hammer2_inode_lock(dip, 0);*/
1134 
1135 	pip_uid = pip->meta.uid;
1136 	pip_gid = pip->meta.gid;
1137 	pip_mode = pip->meta.mode;
1138 	pip_comp_algo = pip->meta.comp_algo;
1139 	pip_check_algo = pip->meta.check_algo;
1140 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1141 
1142 	/*
1143 	 * Create the in-memory hammer2_inode structure for the specified
1144 	 * inode.
1145 	 */
1146 	nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
1147 	nip->comp_heuristic = 0;
1148 	KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
1149 		 nip->cluster.nchains == 0);
1150 	atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
1151 
1152 	/*
1153 	 * Setup the inode meta-data
1154 	 */
1155 	nip->meta.type = hammer2_get_obj_type(vap->va_type);
1156 
1157 	switch (nip->meta.type) {
1158 	case HAMMER2_OBJTYPE_CDEV:
1159 	case HAMMER2_OBJTYPE_BDEV:
1160 		assert(0); /* XXX unsupported */
1161 		nip->meta.rmajor = vap->va_rmajor;
1162 		nip->meta.rminor = vap->va_rminor;
1163 		break;
1164 	default:
1165 		break;
1166 	}
1167 
1168 	KKASSERT(nip->meta.inum == inum);
1169 	nip->meta.iparent = pip_inum;
1170 
1171 	/* Inherit parent's inode compression mode. */
1172 	nip->meta.comp_algo = pip_comp_algo;
1173 	nip->meta.check_algo = pip_check_algo;
1174 	nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1175 	hammer2_update_time(&nip->meta.ctime, false);
1176 	nip->meta.mtime = nip->meta.ctime;
1177 	nip->meta.mode = vap->va_mode;
1178 	nip->meta.nlinks = 1;
1179 
1180 	xuid = hammer2_to_unix_xid(&pip_uid);
1181 	xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
1182 				     xuid, cred,
1183 				     &vap->va_mode);
1184 	if (vap->va_vaflags & VA_UID_UUID_VALID)
1185 		nip->meta.uid = vap->va_uid_uuid;
1186 	else if (vap->va_uid != (uid_t)VNOVAL)
1187 		hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
1188 	else
1189 		hammer2_guid_to_uuid(&nip->meta.uid, xuid);
1190 
1191 	if (vap->va_vaflags & VA_GID_UUID_VALID)
1192 		nip->meta.gid = vap->va_gid_uuid;
1193 	else if (vap->va_gid != (gid_t)VNOVAL)
1194 		hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
1195 	else
1196 		nip->meta.gid = pip_gid;
1197 
1198 	/*
1199 	 * Regular files and softlinks allow a small amount of data to be
1200 	 * directly embedded in the inode.  This flag will be cleared if
1201 	 * the size is extended past the embedded limit.
1202 	 */
1203 	if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1204 	    nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1205 		nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1206 	}
1207 
1208 	/*
1209 	 * Create the inode using (inum) as the key.  Pass pip for
1210 	 * method inheritance.
1211 	 */
1212 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1213 	xop->lhc = inum;
1214 	xop->flags = 0;
1215 	xop->meta = nip->meta;
1216 	KKASSERT(vap);
1217 
1218 	xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
1219 	xop->meta.name_key = inum;
1220 	nip->meta.name_len = xop->meta.name_len;
1221 	nip->meta.name_key = xop->meta.name_key;
1222 	hammer2_inode_modify(nip);
1223 
1224 	/*
1225 	 * Create the inode media chains but leave them detached.  We are
1226 	 * not in a flush transaction so we can't mess with media topology
1227 	 * above normal inodes (i.e. the index of the inodes themselves).
1228 	 *
1229 	 * We've already set the INODE_CREATING flag.  The inode's media
1230 	 * chains will be inserted onto the media topology on the next
1231 	 * filesystem sync.
1232 	 */
1233 	hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
1234 
1235 	error = hammer2_xop_collect(&xop->head, 0);
1236 #if INODE_DEBUG
1237 	kprintf("create inode type %d error %d\n", nip->meta.type, error);
1238 #endif
1239 
1240 	if (error) {
1241 		*errorp = error;
1242 		goto done;
1243 	}
1244 
1245 	/*
1246 	 * Associate the media chains created by the backend with the
1247 	 * frontend inode.
1248 	 */
1249 	hammer2_inode_repoint(nip, &xop->head.cluster);
1250 done:
1251 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1252 	/*hammer2_inode_unlock(dip);*/
1253 
1254 	return (nip);
1255 }
1256 
1257 /*
1258  * Create a directory entry under dip with the specified name, inode number,
1259  * and OBJTYPE (type).
1260  *
1261  * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1262  *
1263  * Caller must hold dip locked.
1264  */
1265 int
1266 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
1267 		      hammer2_key_t inum, uint8_t type)
1268 {
1269 	hammer2_xop_mkdirent_t *xop;
1270 	hammer2_key_t lhc;
1271 	int error;
1272 
1273 	lhc = 0;
1274 	error = 0;
1275 
1276 	KKASSERT(name != NULL);
1277 	lhc = hammer2_dirhash(name, name_len);
1278 
1279 	/*
1280 	 * Locate the inode or indirect block to create the new
1281 	 * entry in.  At the same time check for key collisions
1282 	 * and iterate until we don't get one.
1283 	 *
1284 	 * Lock the directory exclusively for now to guarantee that
1285 	 * we can find an unused lhc for the name.  Due to collisions,
1286 	 * two different creates can end up with the same lhc so we
1287 	 * cannot depend on the OS to prevent the collision.
1288 	 */
1289 	hammer2_inode_modify(dip);
1290 
1291 	/*
1292 	 * If name specified, locate an unused key in the collision space.
1293 	 * Otherwise use the passed-in lhc directly.
1294 	 */
1295 	{
1296 		hammer2_xop_scanlhc_t *sxop;
1297 		hammer2_key_t lhcbase;
1298 
1299 		lhcbase = lhc;
1300 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1301 		sxop->lhc = lhc;
1302 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1303 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1304 			if (lhc != sxop->head.cluster.focus->bref.key)
1305 				break;
1306 			++lhc;
1307 		}
1308 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1309 
1310 		if (error) {
1311 			if (error != HAMMER2_ERROR_ENOENT)
1312 				goto done2;
1313 			++lhc;
1314 			error = 0;
1315 		}
1316 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1317 			error = HAMMER2_ERROR_ENOSPC;
1318 			goto done2;
1319 		}
1320 	}
1321 
1322 	/*
1323 	 * Create the directory entry with the lhc as the key.
1324 	 */
1325 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1326 	xop->lhc = lhc;
1327 	bzero(&xop->dirent, sizeof(xop->dirent));
1328 	xop->dirent.inum = inum;
1329 	xop->dirent.type = type;
1330 	xop->dirent.namlen = name_len;
1331 
1332 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1333 	hammer2_xop_setname(&xop->head, name, name_len);
1334 
1335 	hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
1336 
1337 	error = hammer2_xop_collect(&xop->head, 0);
1338 
1339 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1340 done2:
1341 	error = hammer2_error_to_errno(error);
1342 
1343 	return error;
1344 }
1345 
1346 /*
1347  * Repoint ip->cluster's chains to cluster's chains and fixup the default
1348  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
1349  * filters out invalid or non-matching elements.
1350  *
1351  * Caller must hold the inode and cluster exclusive locked, if not NULL,
1352  * must also be locked.
1353  *
1354  * Cluster may be NULL to clean out any chains in ip->cluster.
1355  */
1356 void
1357 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
1358 {
1359 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1360 	hammer2_chain_t *ochain;
1361 	hammer2_chain_t *nchain;
1362 	int i;
1363 
1364 	bzero(dropch, sizeof(dropch));
1365 
1366 	/*
1367 	 * Replace chains in ip->cluster with chains from cluster and
1368 	 * adjust the focus if necessary.
1369 	 *
1370 	 * NOTE: nchain and/or ochain can be NULL due to gaps
1371 	 *	 in the cluster arrays.
1372 	 */
1373 	hammer2_spin_ex(&ip->cluster_spin);
1374 	for (i = 0; cluster && i < cluster->nchains; ++i) {
1375 		/*
1376 		 * Do not replace elements which are the same.  Also handle
1377 		 * element count discrepancies.
1378 		 */
1379 		nchain = cluster->array[i].chain;
1380 		if (i < ip->cluster.nchains) {
1381 			ochain = ip->cluster.array[i].chain;
1382 			if (ochain == nchain)
1383 				continue;
1384 		} else {
1385 			ochain = NULL;
1386 		}
1387 
1388 		/*
1389 		 * Make adjustments
1390 		 */
1391 		ip->cluster.array[i].chain = nchain;
1392 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1393 		ip->cluster.array[i].flags |= cluster->array[i].flags &
1394 					      HAMMER2_CITEM_INVALID;
1395 		if (nchain)
1396 			hammer2_chain_ref(nchain);
1397 		dropch[i] = ochain;
1398 	}
1399 
1400 	/*
1401 	 * Release any left-over chains in ip->cluster.
1402 	 */
1403 	while (i < ip->cluster.nchains) {
1404 		nchain = ip->cluster.array[i].chain;
1405 		if (nchain) {
1406 			ip->cluster.array[i].chain = NULL;
1407 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1408 		}
1409 		dropch[i] = nchain;
1410 		++i;
1411 	}
1412 
1413 	/*
1414 	 * Fixup fields.  Note that the inode-embedded cluster is never
1415 	 * directly locked.
1416 	 */
1417 	if (cluster) {
1418 		ip->cluster.nchains = cluster->nchains;
1419 		ip->cluster.focus = cluster->focus;
1420 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1421 	} else {
1422 		ip->cluster.nchains = 0;
1423 		ip->cluster.focus = NULL;
1424 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1425 	}
1426 
1427 	hammer2_spin_unex(&ip->cluster_spin);
1428 
1429 	/*
1430 	 * Cleanup outside of spinlock
1431 	 */
1432 	while (--i >= 0) {
1433 		if (dropch[i])
1434 			hammer2_chain_drop(dropch[i]);
1435 	}
1436 }
1437 
1438 /*
1439  * Repoint a single element from the cluster to the ip.  Used by the
1440  * synchronization threads to piecemeal update inodes.  Does not change
1441  * focus and requires inode to be re-locked to clean-up flags (XXX).
1442  */
1443 void
1444 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1445 			  int idx)
1446 {
1447 	hammer2_chain_t *ochain;
1448 	hammer2_chain_t *nchain;
1449 	int i;
1450 
1451 	hammer2_spin_ex(&ip->cluster_spin);
1452 	KKASSERT(idx < cluster->nchains);
1453 	if (idx < ip->cluster.nchains) {
1454 		ochain = ip->cluster.array[idx].chain;
1455 		nchain = cluster->array[idx].chain;
1456 	} else {
1457 		ochain = NULL;
1458 		nchain = cluster->array[idx].chain;
1459 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1460 			bzero(&ip->cluster.array[i],
1461 			      sizeof(ip->cluster.array[i]));
1462 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1463 		}
1464 		ip->cluster.nchains = idx + 1;
1465 	}
1466 	if (ochain != nchain) {
1467 		/*
1468 		 * Make adjustments.
1469 		 */
1470 		ip->cluster.array[idx].chain = nchain;
1471 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1472 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1473 						HAMMER2_CITEM_INVALID;
1474 	}
1475 	hammer2_spin_unex(&ip->cluster_spin);
1476 	if (ochain != nchain) {
1477 		if (nchain)
1478 			hammer2_chain_ref(nchain);
1479 		if (ochain)
1480 			hammer2_chain_drop(ochain);
1481 	}
1482 }
1483 
1484 hammer2_key_t
1485 hammer2_inode_data_count(const hammer2_inode_t *ip)
1486 {
1487 	hammer2_chain_t *chain;
1488 	hammer2_key_t count = 0;
1489 	int i;
1490 
1491 	for (i = 0; i < ip->cluster.nchains; ++i) {
1492 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1493 			if (count < chain->bref.embed.stats.data_count)
1494 				count = chain->bref.embed.stats.data_count;
1495 		}
1496 	}
1497 	return count;
1498 }
1499 
1500 hammer2_key_t
1501 hammer2_inode_inode_count(const hammer2_inode_t *ip)
1502 {
1503 	hammer2_chain_t *chain;
1504 	hammer2_key_t count = 0;
1505 	int i;
1506 
1507 	for (i = 0; i < ip->cluster.nchains; ++i) {
1508 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1509 			if (count < chain->bref.embed.stats.inode_count)
1510 				count = chain->bref.embed.stats.inode_count;
1511 		}
1512 	}
1513 	return count;
1514 }
1515 
1516 /*
1517  * Called with a locked inode to finish unlinking an inode after xop_unlink
1518  * had been run.  This function is responsible for decrementing nlinks.
1519  *
1520  * We don't bother decrementing nlinks if the file is not open and this was
1521  * the last link.
1522  *
1523  * If the inode is a hardlink target it's chain has not yet been deleted,
1524  * otherwise it's chain has been deleted.
1525  *
1526  * If isopen then any prior deletion was not permanent and the inode is
1527  * left intact with nlinks == 0;
1528  */
1529 int
1530 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct m_vnode **vprecyclep)
1531 {
1532 	struct m_vnode *vp;
1533 
1534 	/*
1535 	 * Decrement nlinks.  Catch a bad nlinks count here too (e.g. 0 or
1536 	 * negative), and just assume a transition to 0.
1537 	 */
1538 	if ((int64_t)ip->meta.nlinks <= 1) {
1539 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1540 
1541 		/*
1542 		 * Scrap the vnode as quickly as possible.  The vp association
1543 		 * stays intact while we hold the inode locked.  However, vp
1544 		 * can be NULL here.
1545 		 */
1546 		vp = ip->vp;
1547 		cpu_ccfence();
1548 
1549 		/*
1550 		 * If no vp is associated there is no high-level state to
1551 		 * deal with and we can scrap the inode immediately.
1552 		 */
1553 		if (vp == NULL) {
1554 			if ((ip->flags & HAMMER2_INODE_DELETING) == 0) {
1555 				atomic_set_int(&ip->flags,
1556 					       HAMMER2_INODE_DELETING);
1557 				hammer2_inode_delayed_sideq(ip);
1558 			}
1559 			return 0;
1560 		}
1561 
1562 		/*
1563 		 * Because INODE_ISUNLINKED is set with the inode lock
1564 		 * held, the vnode cannot be ripped up from under us.
1565 		 * There may still be refs so knote anyone waiting for
1566 		 * a delete notification.
1567 		 *
1568 		 * The vnode is not necessarily ref'd due to the unlinking
1569 		 * itself, so we have to defer handling to the end of the
1570 		 * VOP, which will then call hammer2_inode_vprecycle().
1571 		 */
1572 		if (vprecyclep) {
1573 			vhold(vp);
1574 			*vprecyclep = vp;
1575 		}
1576 	}
1577 
1578 	/*
1579 	 * Adjust nlinks and retain the inode on the media for now
1580 	 */
1581 	hammer2_inode_modify(ip);
1582 	if ((int64_t)ip->meta.nlinks > 1)
1583 		--ip->meta.nlinks;
1584 	else
1585 		ip->meta.nlinks = 0;
1586 
1587 	return 0;
1588 }
1589 
1590 /*
1591  * Called at the end of a VOP that removes a file with a vnode that
1592  * we want to try to dispose of quickly due to a file deletion.  If
1593  * we don't do this, the vnode can hang around with 0 refs for a very
1594  * long time and prevent reclamation of the underlying file and inode
1595  * (inode remains on-media with nlinks == 0 until the vnode is recycled
1596  * due to random system activity or a umount).
1597  */
1598 void
1599 hammer2_inode_vprecycle(struct m_vnode *vp)
1600 {
1601 	if (vget(vp, LK_EXCLUSIVE) == 0) {
1602 		vfinalize(vp);
1603 		hammer2_knote(vp, NOTE_DELETE);
1604 		vdrop(vp);
1605 		vput(vp);
1606 	} else {
1607 		vdrop(vp);
1608 	}
1609 }
1610 
1611 
1612 /*
1613  * Mark an inode as being modified, meaning that the caller will modify
1614  * ip->meta.
1615  *
1616  * If a vnode is present we set the vnode dirty and the nominal filesystem
1617  * sync will also handle synchronizing the inode meta-data.  Unless NOSIDEQ
1618  * we must ensure that the inode is on pmp->sideq.
1619  *
1620  * NOTE: We must always queue the inode to the sideq.  This allows H2 to
1621  *	 shortcut vsyncscan() and flush inodes and their related vnodes
1622  *	 in a two stages.  H2 still calls vfsync() for each vnode.
1623  *
1624  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1625  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1626  *	 later when the inode gets flushed.
1627  *
1628  * NOTE: As an exception to the general rule, the inode MAY be locked
1629  *	 shared for this particular call.
1630  */
1631 void
1632 hammer2_inode_modify(hammer2_inode_t *ip)
1633 {
1634 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1635 	if (ip->vp)
1636 		vsetisdirty(ip->vp);
1637 	if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
1638 		hammer2_inode_delayed_sideq(ip);
1639 }
1640 
1641 /*
1642  * Synchronize the inode's frontend state with the chain state prior
1643  * to any explicit flush of the inode or any strategy write call.  This
1644  * does not flush the inode's chain or its sub-topology to media (higher
1645  * level layers are responsible for doing that).
1646  *
1647  * Called with a locked inode inside a normal transaction.
1648  *
1649  * inode must be locked.
1650  */
1651 int
1652 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1653 {
1654 	int error;
1655 
1656 	error = 0;
1657 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1658 		hammer2_xop_fsync_t *xop;
1659 
1660 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1661 		xop->clear_directdata = 0;
1662 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1663 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1664 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1665 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1666 				xop->clear_directdata = 1;
1667 			}
1668 			xop->osize = ip->osize;
1669 		} else {
1670 			xop->osize = ip->meta.size;	/* safety */
1671 		}
1672 		xop->ipflags = ip->flags;
1673 		xop->meta = ip->meta;
1674 
1675 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1676 					     HAMMER2_INODE_MODIFIED);
1677 		hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
1678 		error = hammer2_xop_collect(&xop->head, 0);
1679 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1680 		if (error == HAMMER2_ERROR_ENOENT)
1681 			error = 0;
1682 		if (error) {
1683 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1684 			/*
1685 			atomic_set_int(&ip->flags,
1686 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1687 						       HAMMER2_INODE_MODIFIED));
1688 			*/
1689 			/* XXX return error somehow? */
1690 		}
1691 	}
1692 	return error;
1693 }
1694 
1695 /*
1696  * When an inode is flagged INODE_CREATING its chains have not actually
1697  * been inserting into the on-media tree yet.
1698  */
1699 int
1700 hammer2_inode_chain_ins(hammer2_inode_t *ip)
1701 {
1702 	int error;
1703 
1704 	error = 0;
1705 	if (ip->flags & HAMMER2_INODE_CREATING) {
1706 		hammer2_xop_create_t *xop;
1707 
1708 		atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
1709 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1710 		xop->lhc = ip->meta.inum;
1711 		xop->flags = 0;
1712 		hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
1713 		error = hammer2_xop_collect(&xop->head, 0);
1714 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1715 		if (error == HAMMER2_ERROR_ENOENT)
1716 			error = 0;
1717 		if (error) {
1718 			kprintf("hammer2: backend unable to "
1719 				"insert inode %p %ld\n", ip, (long)ip->meta.inum);
1720 			/* XXX return error somehow? */
1721 		}
1722 	}
1723 	return error;
1724 }
1725 
1726 /*
1727  * When an inode is flagged INODE_DELETING it has been deleted (no directory
1728  * entry or open refs are left, though as an optimization H2 might leave
1729  * nlinks == 1 to avoid unnecessary block updates).  The backend flush then
1730  * needs to actually remove it from the topology.
1731  *
1732  * NOTE: backend flush must still sync and flush the deleted inode to clean
1733  *	 out related chains.
1734  *
1735  * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
1736  *	 to prevent the vnode reclaim code from trying to delete it twice.
1737  */
1738 int
1739 hammer2_inode_chain_des(hammer2_inode_t *ip)
1740 {
1741 	int error;
1742 
1743 	error = 0;
1744 	if (ip->flags & HAMMER2_INODE_DELETING) {
1745 		hammer2_xop_destroy_t *xop;
1746 
1747 		atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING |
1748 					     HAMMER2_INODE_ISUNLINKED);
1749 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1750 		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1751 		error = hammer2_xop_collect(&xop->head, 0);
1752 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1753 
1754 		if (error == HAMMER2_ERROR_ENOENT)
1755 			error = 0;
1756 		if (error) {
1757 			kprintf("hammer2: backend unable to "
1758 				"delete inode %p %ld\n", ip, (long)ip->meta.inum);
1759 			/* XXX return error somehow? */
1760 		}
1761 	}
1762 	return error;
1763 }
1764 
1765 /*
1766  * Flushes the inode's chain and its sub-topology to media.  Interlocks
1767  * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush.  Any strategy
1768  * function creating or modifying a chain under this inode will re-set the
1769  * flag.
1770  *
1771  * inode must be locked.
1772  */
1773 int
1774 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
1775 {
1776 	hammer2_xop_fsync_t *xop;
1777 	int error;
1778 
1779 	atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1780 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
1781 	hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
1782 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1783 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1784 	if (error == HAMMER2_ERROR_ENOENT)
1785 		error = 0;
1786 
1787 	return error;
1788 }
1789 
1790 hammer2_key_t
1791 hammer2_pfs_inode_count(hammer2_pfs_t *pmp)
1792 {
1793 	struct hammer2_inode *ip;
1794 	hammer2_key_t count = 0;
1795 
1796 	hammer2_spin_ex(&pmp->inum_spin);
1797 	RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree)
1798 		count++;
1799 	hammer2_spin_unex(&pmp->inum_spin);
1800 
1801 	return count;
1802 }
1803 
1804 int
1805 vflush(struct mount *mp, int rootrefs, int flags)
1806 {
1807 	hammer2_pfs_t *pmp = MPTOPMP(mp);
1808 	struct hammer2_inode *ip, *tmp;
1809 	struct m_vnode *vp;
1810 	hammer2_key_t count_before, count_after, count_delta;
1811 
1812 	printf("%s: total chain %ld\n", __func__, hammer2_chain_allocs);
1813 	printf("%s: total dio %d\n", __func__, hammer2_dio_count);
1814 
1815 	hammer2_spin_ex(&pmp->inum_spin);
1816 	count_before = 0;
1817 	RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree)
1818 		count_before++;
1819 
1820 	RB_FOREACH_SAFE(ip, hammer2_inode_tree, &pmp->inum_tree, tmp) {
1821 		vp = ip->vp;
1822 		assert(vp);
1823 		if (!vp->v_vflushed) {
1824 			/*
1825 			printf("%s: drop ip=%p inum=%ld refs=%d\n",
1826 			    __func__, ip, ip->meta.inum, ip->refs);
1827 			*/
1828 			assert(ip->refs > 1);
1829 			hammer2_inode_drop(ip);
1830 			vp->v_vflushed = 1;
1831 		}
1832 	}
1833 
1834 	count_after = 0;
1835 	RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree)
1836 		count_after++;
1837 	hammer2_spin_unex(&pmp->inum_spin);
1838 
1839 	printf("%s: total inode %jd -> %jd\n",
1840 	    __func__, (intmax_t)count_before, (intmax_t)count_after);
1841 
1842 	assert(count_before >= count_after);
1843 	count_delta = count_before - count_after;
1844 
1845 	if (count_delta) {
1846 		if (hammer2_debug & 0x80000000)
1847 			assert(0);
1848 		else
1849 			printf("%s: %jd inode freed\n", __func__,
1850 			    (intmax_t)count_delta);
1851 	}
1852 
1853 	return 0;
1854 }
1855