xref: /dragonfly/sys/vfs/hammer2/hammer2_inode.c (revision 48e07bd5)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41 #include <sys/vnode.h>
42 
43 #include "hammer2.h"
44 
45 #define INODE_DEBUG	0
46 
47 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
48 	     hammer2_tid_t, meta.inum);
49 
50 int
51 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
52 {
53 	if (ip1->meta.inum < ip2->meta.inum)
54 		return(-1);
55 	if (ip1->meta.inum > ip2->meta.inum)
56 		return(1);
57 	return(0);
58 }
59 
60 static __inline
61 void
62 hammer2_knote(struct vnode *vp, int flags)
63 {
64 	if (flags)
65 		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
66 }
67 
68 /*
69  * Caller holds pmp->list_spin and the inode should be locked.  Merge ip
70  * with the specified depend.
71  *
72  * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
73  * that successive calls must ensure the ip is on a pass2 depend (or they are
74  * all SYNCQ).  If the passed-in depend is not NULL and not (void *)-1 then
75  * we can set pass2 on it and return.
76  *
77  * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
78  * a self-depend if necessary, and depend->pass2 is set according
79  * to the PASS2 flag.  SIDEQ is set.
80  */
81 static __noinline
82 hammer2_depend_t *
83 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
84 {
85 	hammer2_pfs_t *pmp = ip->pmp;
86 	hammer2_depend_t *dtmp;
87 	hammer2_inode_t *iptmp;
88 
89 	/*
90 	 * If ip is SYNCQ its entry is used for the syncq list and it will
91 	 * no longer be associated with a dependency.  Merging this status
92 	 * with a passed-in depend implies PASS2.
93 	 */
94 	if (ip->flags & HAMMER2_INODE_SYNCQ) {
95 		if (depend == (void *)-1 ||
96 		    depend == NULL) {
97 			return ((void *)-1);
98 		}
99 		depend->pass2 = 1;
100 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
101 
102 		return depend;
103 	}
104 
105 	/*
106 	 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
107 	 * If it is not, associate the ip with the passed-in depend, creating
108 	 * a single-entry dependency using depend_static if necessary.
109 	 *
110 	 * NOTE: The use of ip->depend_static always requires that the
111 	 *	 specific ip containing the structure is part of that
112 	 *	 particular depend_static's dependency group.
113 	 */
114 	if (ip->flags & HAMMER2_INODE_SIDEQ) {
115 		/*
116 		 * Merge ip->depend with the passed-in depend.  If the
117 		 * passed-in depend is not a special case, all ips associated
118 		 * with ip->depend (including the original ip) must be moved
119 		 * to the passed-in depend.
120 		 */
121 		if (depend == NULL) {
122 			depend = ip->depend;
123 		} else if (depend == (void *)-1) {
124 			depend = ip->depend;
125 			depend->pass2 = 1;
126 		} else if (depend != ip->depend) {
127 #ifdef INVARIANTS
128 			int sanitychk = 0;
129 #endif
130 			dtmp = ip->depend;
131 			while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
132 #ifdef INVARIANTS
133 				if (iptmp == ip)
134 					sanitychk = 1;
135 #endif
136 				TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
137 				TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
138 				iptmp->depend = depend;
139 			}
140 			KKASSERT(sanitychk == 1);
141 			depend->count += dtmp->count;
142 			depend->pass2 |= dtmp->pass2;
143 			TAILQ_REMOVE(&pmp->depq, dtmp, entry);
144 			dtmp->count = 0;
145 			dtmp->pass2 = 0;
146 		}
147 	} else {
148 		/*
149 		 * Add ip to the sideq, creating a self-dependency if
150 		 * necessary.
151 		 */
152 		hammer2_inode_ref(ip);
153 		atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
154 		if (depend == NULL) {
155 			depend = &ip->depend_static;
156 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
157 		} else if (depend == (void *)-1) {
158 			depend = &ip->depend_static;
159 			depend->pass2 = 1;
160 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
161 		} /* else add ip to passed-in depend */
162 		TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
163 		ip->depend = depend;
164 		++depend->count;
165 		++pmp->sideq_count;
166 	}
167 
168 	if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
169 		depend->pass2 = 1;
170 	if (depend->pass2)
171 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
172 
173 	return depend;
174 }
175 
176 /*
177  * Put a solo inode on the SIDEQ (meaning that its dirty).  This can also
178  * occur from inode_lock4() and inode_depend().
179  *
180  * Caller must pass-in a locked inode.
181  */
182 void
183 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
184 {
185 	hammer2_pfs_t *pmp = ip->pmp;
186 
187 	/*
188 	 * Optimize case to avoid pmp spinlock.
189 	 */
190 	if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
191 		hammer2_spin_ex(&pmp->list_spin);
192 		hammer2_inode_setdepend_locked(ip, NULL);
193 		hammer2_spin_unex(&pmp->list_spin);
194 	}
195 }
196 
197 /*
198  * Lock an inode, with SYNCQ semantics.
199  *
200  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
201  * flags for options:
202  *
203  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
204  *	  inode locking function will automatically set the RDONLY flag.
205  *	  shared locks are not subject to SYNCQ semantics, exclusive locks
206  *	  are.
207  *
208  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
209  *	  Most front-end inode locks do.
210  *
211  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
212  *	  the inode data be resolved.  This is used by the syncthr because
213  *	  it can run on an unresolved/out-of-sync cluster, and also by the
214  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
215  *	  disposing of hundreds of thousands of cached vnodes).
216  *
217  * This function, along with lock4, has SYNCQ semantics.  If the inode being
218  * locked is on the SYNCQ, that is it has been staged by the syncer, we must
219  * block until the operation is complete (even if we can lock the inode).  In
220  * order to reduce the stall time, we re-order the inode to the front of the
221  * pmp->syncq prior to blocking.  This reordering VERY significantly improves
222  * performance.
223  *
224  * The inode locking function locks the inode itself, resolves any stale
225  * chains in the inode's cluster, and allocates a fresh copy of the
226  * cluster with 1 ref and all the underlying chains locked.
227  *
228  * ip->cluster will be stable while the inode is locked.
229  *
230  * NOTE: We don't combine the inode/chain lock because putting away an
231  *       inode would otherwise confuse multiple lock holders of the inode.
232  */
233 void
234 hammer2_inode_lock(hammer2_inode_t *ip, int how)
235 {
236 	hammer2_pfs_t *pmp;
237 
238 	hammer2_inode_ref(ip);
239 	pmp = ip->pmp;
240 
241 	/*
242 	 * Inode structure mutex - Shared lock
243 	 */
244 	if (how & HAMMER2_RESOLVE_SHARED) {
245 		hammer2_mtx_sh(&ip->lock);
246 		return;
247 	}
248 
249 	/*
250 	 * Inode structure mutex - Exclusive lock
251 	 *
252 	 * An exclusive lock (if not recursive) must wait for inodes on
253 	 * SYNCQ to flush first, to ensure that meta-data dependencies such
254 	 * as the nlink count and related directory entries are not split
255 	 * across flushes.
256 	 *
257 	 * If the vnode is locked by the current thread it must be unlocked
258 	 * across the tsleep() to avoid a deadlock.
259 	 */
260 	hammer2_mtx_ex(&ip->lock);
261 	if (hammer2_mtx_refs(&ip->lock) > 1)
262 		return;
263 	while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
264 		hammer2_spin_ex(&pmp->list_spin);
265 		if (ip->flags & HAMMER2_INODE_SYNCQ) {
266 			tsleep_interlock(&ip->flags, 0);
267 			atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
268 			TAILQ_REMOVE(&pmp->syncq, ip, entry);
269 			TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
270 			hammer2_spin_unex(&pmp->list_spin);
271 			hammer2_mtx_unlock(&ip->lock);
272 			tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
273 			hammer2_mtx_ex(&ip->lock);
274 			continue;
275 		}
276 		hammer2_spin_unex(&pmp->list_spin);
277 		break;
278 	}
279 }
280 
281 /*
282  * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
283  * ip1 and ip2 must not be NULL.  ip3 and ip4 may be NULL, but if ip3 is
284  * NULL then ip4 must also be NULL.
285  *
286  * This creates a dependency between up to four inodes.
287  */
288 void
289 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
290 		    hammer2_inode_t *ip3, hammer2_inode_t *ip4)
291 {
292 	hammer2_inode_t *ips[4];
293 	hammer2_inode_t *iptmp;
294 	hammer2_inode_t *ipslp;
295 	hammer2_depend_t *depend;
296 	hammer2_pfs_t *pmp;
297 	size_t count;
298 	size_t i;
299 
300 	pmp = ip1->pmp;			/* may be NULL */
301 	KKASSERT(pmp == ip2->pmp);
302 
303 	ips[0] = ip1;
304 	ips[1] = ip2;
305 	if (ip3 == NULL) {
306 		count = 2;
307 	} else if (ip4 == NULL) {
308 		count = 3;
309 		ips[2] = ip3;
310 		KKASSERT(pmp == ip3->pmp);
311 	} else {
312 		count = 4;
313 		ips[2] = ip3;
314 		ips[3] = ip4;
315 		KKASSERT(pmp == ip3->pmp);
316 		KKASSERT(pmp == ip4->pmp);
317 	}
318 
319 	for (i = 0; i < count; ++i)
320 		hammer2_inode_ref(ips[i]);
321 
322 restart:
323 	/*
324 	 * Lock the inodes in order
325 	 */
326 	for (i = 0; i < count; ++i) {
327 		hammer2_mtx_ex(&ips[i]->lock);
328 	}
329 
330 	/*
331 	 * Associate dependencies, record the first inode found on SYNCQ
332 	 * (operation is allowed to proceed for inodes on PASS2) for our
333 	 * sleep operation, this inode is theoretically the last one sync'd
334 	 * in the sequence.
335 	 *
336 	 * All inodes found on SYNCQ are moved to the head of the syncq
337 	 * to reduce stalls.
338 	 */
339 	hammer2_spin_ex(&pmp->list_spin);
340 	depend = NULL;
341 	ipslp = NULL;
342 	for (i = 0; i < count; ++i) {
343 		iptmp = ips[i];
344 		depend = hammer2_inode_setdepend_locked(iptmp, depend);
345 		if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
346 			TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
347 			TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
348 			if (ipslp == NULL)
349 				ipslp = iptmp;
350 		}
351 	}
352 	hammer2_spin_unex(&pmp->list_spin);
353 
354 	/*
355 	 * Block and retry if any of the inodes are on SYNCQ.  It is
356 	 * important that we allow the operation to proceed in the
357 	 * PASS2 case, to avoid deadlocking against the vnode.
358 	 */
359 	if (ipslp) {
360 		for (i = 0; i < count; ++i)
361 			hammer2_mtx_unlock(&ips[i]->lock);
362 		tsleep(&ipslp->flags, 0, "h2sync", 2);
363 		goto restart;
364 	}
365 }
366 
367 /*
368  * Release an inode lock.  If another thread is blocked on SYNCQ_WAKEUP
369  * we wake them up.
370  */
371 void
372 hammer2_inode_unlock(hammer2_inode_t *ip)
373 {
374 	if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
375 		atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
376 		hammer2_mtx_unlock(&ip->lock);
377 		wakeup(&ip->flags);
378 	} else {
379 		hammer2_mtx_unlock(&ip->lock);
380 	}
381 	hammer2_inode_drop(ip);
382 }
383 
384 /*
385  * If either ip1 or ip2 have been tapped by the syncer, make sure that both
386  * are.  This ensure that dependencies (e.g. dirent-v-inode) are synced
387  * together.  For dirent-v-inode depends, pass the dirent as ip1.
388  *
389  * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
390  * single dependency.  Dependencies are entered into pmp->depq.  This
391  * effectively flags the inodes SIDEQ.
392  *
393  * Both ip1 and ip2 must be locked by the caller.  This also ensures
394  * that we can't race the end of the syncer's queue run.
395  */
396 void
397 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
398 {
399 	hammer2_pfs_t *pmp;
400 	hammer2_depend_t *depend;
401 
402 	pmp = ip1->pmp;
403 	hammer2_spin_ex(&pmp->list_spin);
404 	depend = hammer2_inode_setdepend_locked(ip1, NULL);
405 	depend = hammer2_inode_setdepend_locked(ip2, depend);
406 	hammer2_spin_unex(&pmp->list_spin);
407 }
408 
409 /*
410  * Select a chain out of an inode's cluster and lock it.
411  *
412  * The inode does not have to be locked.
413  */
414 hammer2_chain_t *
415 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
416 {
417 	hammer2_chain_t *chain;
418 	hammer2_cluster_t *cluster;
419 
420 	hammer2_spin_sh(&ip->cluster_spin);
421 	cluster = &ip->cluster;
422 	if (clindex >= cluster->nchains)
423 		chain = NULL;
424 	else
425 		chain = cluster->array[clindex].chain;
426 	if (chain) {
427 		hammer2_chain_ref(chain);
428 		hammer2_spin_unsh(&ip->cluster_spin);
429 		hammer2_chain_lock(chain, how);
430 	} else {
431 		hammer2_spin_unsh(&ip->cluster_spin);
432 	}
433 	return chain;
434 }
435 
436 hammer2_chain_t *
437 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
438 			       hammer2_chain_t **parentp, int how)
439 {
440 	hammer2_chain_t *chain;
441 	hammer2_chain_t *parent;
442 
443 	for (;;) {
444 		hammer2_spin_sh(&ip->cluster_spin);
445 		if (clindex >= ip->cluster.nchains)
446 			chain = NULL;
447 		else
448 			chain = ip->cluster.array[clindex].chain;
449 		if (chain) {
450 			hammer2_chain_ref(chain);
451 			hammer2_spin_unsh(&ip->cluster_spin);
452 			hammer2_chain_lock(chain, how);
453 		} else {
454 			hammer2_spin_unsh(&ip->cluster_spin);
455 		}
456 
457 		/*
458 		 * Get parent, lock order must be (parent, chain).
459 		 */
460 		parent = chain->parent;
461 		if (parent) {
462 			hammer2_chain_ref(parent);
463 			hammer2_chain_unlock(chain);
464 			hammer2_chain_lock(parent, how);
465 			hammer2_chain_lock(chain, how);
466 		}
467 		if (ip->cluster.array[clindex].chain == chain &&
468 		    chain->parent == parent) {
469 			break;
470 		}
471 
472 		/*
473 		 * Retry
474 		 */
475 		hammer2_chain_unlock(chain);
476 		hammer2_chain_drop(chain);
477 		if (parent) {
478 			hammer2_chain_unlock(parent);
479 			hammer2_chain_drop(parent);
480 		}
481 	}
482 	*parentp = parent;
483 
484 	return chain;
485 }
486 
487 /*
488  * Temporarily release a lock held shared or exclusive.  Caller must
489  * hold the lock shared or exclusive on call and lock will be released
490  * on return.
491  *
492  * Restore a lock that was temporarily released.
493  */
494 hammer2_mtx_state_t
495 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
496 {
497 	return hammer2_mtx_temp_release(&ip->lock);
498 }
499 
500 void
501 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
502 {
503 	hammer2_mtx_temp_restore(&ip->lock, ostate);
504 }
505 
506 /*
507  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
508  * is already held exclusively this is a NOP.
509  *
510  * The caller MUST hold the inode lock either shared or exclusive on call
511  * and will own the lock exclusively on return.
512  *
513  * Returns non-zero if the lock was already exclusive prior to the upgrade.
514  */
515 int
516 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
517 {
518 	int wasexclusive;
519 
520 	if (mtx_islocked_ex(&ip->lock)) {
521 		wasexclusive = 1;
522 	} else {
523 		hammer2_mtx_unlock(&ip->lock);
524 		hammer2_mtx_ex(&ip->lock);
525 		wasexclusive = 0;
526 	}
527 	return wasexclusive;
528 }
529 
530 /*
531  * Downgrade an inode lock from exclusive to shared only if the inode
532  * lock was previously shared.  If the inode lock was previously exclusive,
533  * this is a NOP.
534  */
535 void
536 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
537 {
538 	if (wasexclusive == 0)
539 		hammer2_mtx_downgrade(&ip->lock);
540 }
541 
542 /*
543  * Lookup an inode by inode number
544  */
545 hammer2_inode_t *
546 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
547 {
548 	hammer2_inode_t *ip;
549 
550 	KKASSERT(pmp);
551 	if (pmp->spmp_hmp) {
552 		ip = NULL;
553 	} else {
554 		hammer2_spin_ex(&pmp->inum_spin);
555 		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
556 		if (ip)
557 			hammer2_inode_ref(ip);
558 		hammer2_spin_unex(&pmp->inum_spin);
559 	}
560 	return(ip);
561 }
562 
563 /*
564  * Adding a ref to an inode is only legal if the inode already has at least
565  * one ref.
566  *
567  * (can be called with spinlock held)
568  */
569 void
570 hammer2_inode_ref(hammer2_inode_t *ip)
571 {
572 	atomic_add_int(&ip->refs, 1);
573 	if (hammer2_debug & 0x80000) {
574 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
575 		print_backtrace(8);
576 	}
577 }
578 
579 /*
580  * Drop an inode reference, freeing the inode when the last reference goes
581  * away.
582  */
583 void
584 hammer2_inode_drop(hammer2_inode_t *ip)
585 {
586 	hammer2_pfs_t *pmp;
587 	u_int refs;
588 
589 	while (ip) {
590 		if (hammer2_debug & 0x80000) {
591 			kprintf("INODE-1 %p (%d->%d)\n",
592 				ip, ip->refs, ip->refs - 1);
593 			print_backtrace(8);
594 		}
595 		refs = ip->refs;
596 		cpu_ccfence();
597 		if (refs == 1) {
598 			/*
599 			 * Transition to zero, must interlock with
600 			 * the inode inumber lookup tree (if applicable).
601 			 * It should not be possible for anyone to race
602 			 * the transition to 0.
603 			 */
604 			pmp = ip->pmp;
605 			KKASSERT(pmp);
606 			hammer2_spin_ex(&pmp->inum_spin);
607 
608 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
609 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
610 				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
611 					atomic_clear_int(&ip->flags,
612 						     HAMMER2_INODE_ONRBTREE);
613 					RB_REMOVE(hammer2_inode_tree,
614 						  &pmp->inum_tree, ip);
615 					--pmp->inum_count;
616 				}
617 				hammer2_spin_unex(&pmp->inum_spin);
618 
619 				ip->pmp = NULL;
620 
621 				/*
622 				 * Cleaning out ip->cluster isn't entirely
623 				 * trivial.
624 				 */
625 				hammer2_inode_repoint(ip, NULL);
626 
627 				kfree_obj(ip, pmp->minode);
628 				atomic_add_long(&pmp->inmem_inodes, -1);
629 				ip = NULL;	/* will terminate loop */
630 			} else {
631 				hammer2_spin_unex(&ip->pmp->inum_spin);
632 			}
633 		} else {
634 			/*
635 			 * Non zero transition
636 			 */
637 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
638 				break;
639 		}
640 	}
641 }
642 
643 /*
644  * Get the vnode associated with the given inode, allocating the vnode if
645  * necessary.  The vnode will be returned exclusively locked.
646  *
647  * *errorp is set to a UNIX error, not a HAMMER2 error.
648  *
649  * The caller must lock the inode (shared or exclusive).
650  *
651  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
652  * races.
653  */
654 struct vnode *
655 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
656 {
657 	hammer2_pfs_t *pmp;
658 	struct vnode *vp;
659 
660 	pmp = ip->pmp;
661 	KKASSERT(pmp != NULL);
662 	*errorp = 0;
663 
664 	for (;;) {
665 		/*
666 		 * Attempt to reuse an existing vnode assignment.  It is
667 		 * possible to race a reclaim so the vget() may fail.  The
668 		 * inode must be unlocked during the vget() to avoid a
669 		 * deadlock against a reclaim.
670 		 */
671 		int wasexclusive;
672 
673 		vp = ip->vp;
674 		if (vp) {
675 			/*
676 			 * Inode must be unlocked during the vget() to avoid
677 			 * possible deadlocks, but leave the ip ref intact.
678 			 *
679 			 * vnode is held to prevent destruction during the
680 			 * vget().  The vget() can still fail if we lost
681 			 * a reclaim race on the vnode.
682 			 */
683 			hammer2_mtx_state_t ostate;
684 
685 			vhold(vp);
686 			ostate = hammer2_inode_lock_temp_release(ip);
687 			if (vget(vp, LK_EXCLUSIVE)) {
688 				vdrop(vp);
689 				hammer2_inode_lock_temp_restore(ip, ostate);
690 				continue;
691 			}
692 			hammer2_inode_lock_temp_restore(ip, ostate);
693 			vdrop(vp);
694 			/* vp still locked and ref from vget */
695 			if (ip->vp != vp) {
696 				kprintf("hammer2: igetv race %p/%p\n",
697 					ip->vp, vp);
698 				vput(vp);
699 				continue;
700 			}
701 			*errorp = 0;
702 			break;
703 		}
704 
705 		/*
706 		 * No vnode exists, allocate a new vnode.  Beware of
707 		 * allocation races.  This function will return an
708 		 * exclusively locked and referenced vnode.
709 		 */
710 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
711 		if (*errorp) {
712 			kprintf("hammer2: igetv getnewvnode failed %d\n",
713 				*errorp);
714 			vp = NULL;
715 			break;
716 		}
717 
718 		/*
719 		 * Lock the inode and check for an allocation race.
720 		 */
721 		wasexclusive = hammer2_inode_lock_upgrade(ip);
722 		if (ip->vp != NULL) {
723 			vp->v_type = VBAD;
724 			vx_put(vp);
725 			hammer2_inode_lock_downgrade(ip, wasexclusive);
726 			continue;
727 		}
728 
729 		switch (ip->meta.type) {
730 		case HAMMER2_OBJTYPE_DIRECTORY:
731 			vp->v_type = VDIR;
732 			break;
733 		case HAMMER2_OBJTYPE_REGFILE:
734 			/*
735 			 * Regular file must use buffer cache I/O
736 			 * (VKVABIO cpu sync semantics supported)
737 			 */
738 			vp->v_type = VREG;
739 			vsetflags(vp, VKVABIO);
740 			vinitvmio(vp, ip->meta.size,
741 				  HAMMER2_LBUFSIZE,
742 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
743 			break;
744 		case HAMMER2_OBJTYPE_SOFTLINK:
745 			/*
746 			 * XXX for now we are using the generic file_read
747 			 * and file_write code so we need a buffer cache
748 			 * association.
749 			 *
750 			 * (VKVABIO cpu sync semantics supported)
751 			 */
752 			vp->v_type = VLNK;
753 			vsetflags(vp, VKVABIO);
754 			vinitvmio(vp, ip->meta.size,
755 				  HAMMER2_LBUFSIZE,
756 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
757 			break;
758 		case HAMMER2_OBJTYPE_CDEV:
759 			vp->v_type = VCHR;
760 			/* fall through */
761 		case HAMMER2_OBJTYPE_BDEV:
762 			vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
763 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
764 				vp->v_type = VBLK;
765 			addaliasu(vp,
766 				  ip->meta.rmajor,
767 				  ip->meta.rminor);
768 			break;
769 		case HAMMER2_OBJTYPE_FIFO:
770 			vp->v_type = VFIFO;
771 			vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
772 			break;
773 		case HAMMER2_OBJTYPE_SOCKET:
774 			vp->v_type = VSOCK;
775 			break;
776 		default:
777 			panic("hammer2: unhandled objtype %d",
778 			      ip->meta.type);
779 			break;
780 		}
781 
782 		if (ip == pmp->iroot)
783 			vsetflags(vp, VROOT);
784 
785 		vp->v_data = ip;
786 		ip->vp = vp;
787 		hammer2_inode_ref(ip);		/* vp association */
788 		hammer2_inode_lock_downgrade(ip, wasexclusive);
789 		vx_downgrade(vp);
790 		break;
791 	}
792 
793 	/*
794 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
795 	 */
796 	if (hammer2_debug & 0x0002) {
797 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
798 			vp, vp->v_refcnt, vp->v_auxrefs);
799 	}
800 	return (vp);
801 }
802 
803 /*
804  * XXX this API needs a rewrite.  It needs to be split into a
805  * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
806  * rid of the inode/chain lock reversal fudge.
807  *
808  * Returns the inode associated with the passed-in cluster, allocating a new
809  * hammer2_inode structure if necessary, then synchronizing it to the passed
810  * xop cluster.  When synchronizing, if idx >= 0, only cluster index (idx)
811  * is synchronized.  Otherwise the whole cluster is synchronized.  inum will
812  * be extracted from the passed-in xop and the inum argument will be ignored.
813  *
814  * If xop is passed as NULL then a new hammer2_inode is allocated with the
815  * specified inum, and returned.   For normal inodes, the inode will be
816  * indexed in memory and if it already exists the existing ip will be
817  * returned instead of allocating a new one.  The superroot and PFS inodes
818  * are not indexed in memory.
819  *
820  * The passed-in cluster must be locked and will remain locked on return.
821  * The returned inode will be locked and the caller may dispose of both
822  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
823  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
824  *
825  * The hammer2_inode structure regulates the interface between the high level
826  * kernel VNOPS API and the filesystem backend (the chains).
827  *
828  * On return the inode is locked with the supplied cluster.
829  */
830 hammer2_inode_t *
831 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
832 		  hammer2_tid_t inum, int idx)
833 {
834 	hammer2_inode_t *nip;
835 	const hammer2_inode_data_t *iptmp;
836 	const hammer2_inode_data_t *nipdata;
837 
838 	KKASSERT(xop == NULL ||
839 		 hammer2_cluster_type(&xop->cluster) ==
840 		 HAMMER2_BREF_TYPE_INODE);
841 	KKASSERT(pmp);
842 
843 	/*
844 	 * Interlocked lookup/ref of the inode.  This code is only needed
845 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
846 	 * otherwise and test for duplicates).
847 	 *
848 	 * Cluster can be NULL during the initial pfs allocation.
849 	 */
850 	if (xop) {
851 		iptmp = &hammer2_xop_gdata(xop)->ipdata;
852 		inum = iptmp->meta.inum;
853 		hammer2_xop_pdata(xop);
854 	}
855 again:
856 	nip = hammer2_inode_lookup(pmp, inum);
857 	if (nip) {
858 		/*
859 		 * We may have to unhold the cluster to avoid a deadlock
860 		 * against vnlru (and possibly other XOPs).
861 		 */
862 		if (xop) {
863 			if (hammer2_mtx_ex_try(&nip->lock) != 0) {
864 				hammer2_cluster_unhold(&xop->cluster);
865 				hammer2_mtx_ex(&nip->lock);
866 				hammer2_cluster_rehold(&xop->cluster);
867 			}
868 		} else {
869 			hammer2_mtx_ex(&nip->lock);
870 		}
871 
872 		/*
873 		 * Handle SMP race (not applicable to the super-root spmp
874 		 * which can't index inodes due to duplicative inode numbers).
875 		 */
876 		if (pmp->spmp_hmp == NULL &&
877 		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
878 			hammer2_mtx_unlock(&nip->lock);
879 			hammer2_inode_drop(nip);
880 			goto again;
881 		}
882 		if (xop) {
883 			if (idx >= 0)
884 				hammer2_inode_repoint_one(nip, &xop->cluster,
885 							  idx);
886 			else
887 				hammer2_inode_repoint(nip, &xop->cluster);
888 		}
889 		return nip;
890 	}
891 
892 	/*
893 	 * We couldn't find the inode number, create a new inode and try to
894 	 * insert it, handle insertion races.
895 	 */
896 	nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
897 	spin_init(&nip->cluster_spin, "h2clspin");
898 	atomic_add_long(&pmp->inmem_inodes, 1);
899 	if (pmp->spmp_hmp)
900 		nip->flags = HAMMER2_INODE_SROOT;
901 
902 	/*
903 	 * Initialize nip's cluster.  A cluster is provided for normal
904 	 * inodes but typically not for the super-root or PFS inodes.
905 	 */
906 	{
907 		hammer2_inode_t *nnip = nip;
908 		nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip));
909 	}
910 
911 	nip->cluster.refs = 1;
912 	nip->cluster.pmp = pmp;
913 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
914 	if (xop) {
915 		nipdata = &hammer2_xop_gdata(xop)->ipdata;
916 		nip->meta = nipdata->meta;
917 		hammer2_xop_pdata(xop);
918 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
919 		hammer2_inode_repoint(nip, &xop->cluster);
920 	} else {
921 		nip->meta.inum = inum;		/* PFS inum is always 1 XXX */
922 		/* mtime will be updated when a cluster is available */
923 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);	/*XXX*/
924 	}
925 
926 	nip->pmp = pmp;
927 
928 	/*
929 	 * ref and lock on nip gives it state compatible to after a
930 	 * hammer2_inode_lock() call.
931 	 */
932 	nip->refs = 1;
933 	hammer2_mtx_init(&nip->lock, "h2inode");
934 	hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
935 	hammer2_mtx_ex(&nip->lock);
936 	TAILQ_INIT(&nip->depend_static.sideq);
937 	/* combination of thread lock and chain lock == inode lock */
938 
939 	/*
940 	 * Attempt to add the inode.  If it fails we raced another inode
941 	 * get.  Undo all the work and try again.
942 	 */
943 	if (pmp->spmp_hmp == NULL) {
944 		hammer2_spin_ex(&pmp->inum_spin);
945 		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
946 			hammer2_spin_unex(&pmp->inum_spin);
947 			hammer2_mtx_unlock(&nip->lock);
948 			hammer2_inode_drop(nip);
949 			goto again;
950 		}
951 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
952 		++pmp->inum_count;
953 		hammer2_spin_unex(&pmp->inum_spin);
954 	}
955 	return (nip);
956 }
957 
958 /*
959  * Create a PFS inode under the superroot.  This function will create the
960  * inode, its media chains, and also insert it into the media.
961  *
962  * Caller must be in a flush transaction because we are inserting the inode
963  * onto the media.
964  */
965 hammer2_inode_t *
966 hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
967 		     const uint8_t *name, size_t name_len,
968 		     int *errorp)
969 {
970 	hammer2_xop_create_t *xop;
971 	hammer2_inode_t *pip;
972 	hammer2_inode_t *nip;
973 	int error;
974 	uuid_t pip_uid;
975 	uuid_t pip_gid;
976 	uint32_t pip_mode;
977 	uint8_t pip_comp_algo;
978 	uint8_t pip_check_algo;
979 	hammer2_tid_t pip_inum;
980 	hammer2_key_t lhc;
981 
982 	pip = spmp->iroot;
983 	nip = NULL;
984 
985 	lhc = hammer2_dirhash(name, name_len);
986 	*errorp = 0;
987 
988 	/*
989 	 * Locate the inode or indirect block to create the new
990 	 * entry in.  At the same time check for key collisions
991 	 * and iterate until we don't get one.
992 	 *
993 	 * Lock the directory exclusively for now to guarantee that
994 	 * we can find an unused lhc for the name.  Due to collisions,
995 	 * two different creates can end up with the same lhc so we
996 	 * cannot depend on the OS to prevent the collision.
997 	 */
998 	hammer2_inode_lock(pip, 0);
999 
1000 	pip_uid = pip->meta.uid;
1001 	pip_gid = pip->meta.gid;
1002 	pip_mode = pip->meta.mode;
1003 	pip_comp_algo = pip->meta.comp_algo;
1004 	pip_check_algo = pip->meta.check_algo;
1005 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1006 
1007 	/*
1008 	 * Locate an unused key in the collision space.
1009 	 */
1010 	{
1011 		hammer2_xop_scanlhc_t *sxop;
1012 		hammer2_key_t lhcbase;
1013 
1014 		lhcbase = lhc;
1015 		sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1016 		sxop->lhc = lhc;
1017 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1018 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1019 			if (lhc != sxop->head.cluster.focus->bref.key)
1020 				break;
1021 			++lhc;
1022 		}
1023 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1024 
1025 		if (error) {
1026 			if (error != HAMMER2_ERROR_ENOENT)
1027 				goto done2;
1028 			++lhc;
1029 			error = 0;
1030 		}
1031 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1032 			error = HAMMER2_ERROR_ENOSPC;
1033 			goto done2;
1034 		}
1035 	}
1036 
1037 	/*
1038 	 * Create the inode with the lhc as the key.
1039 	 */
1040 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1041 	xop->lhc = lhc;
1042 	xop->flags = HAMMER2_INSERT_PFSROOT;
1043 	bzero(&xop->meta, sizeof(xop->meta));
1044 
1045 	xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1046 	xop->meta.inum = 1;
1047 	xop->meta.iparent = pip_inum;
1048 
1049 	/* Inherit parent's inode compression mode. */
1050 	xop->meta.comp_algo = pip_comp_algo;
1051 	xop->meta.check_algo = pip_check_algo;
1052 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1053 	hammer2_update_time(&xop->meta.ctime);
1054 	xop->meta.mtime = xop->meta.ctime;
1055 	xop->meta.mode = 0755;
1056 	xop->meta.nlinks = 1;
1057 
1058 	/*
1059 	 * Regular files and softlinks allow a small amount of data to be
1060 	 * directly embedded in the inode.  This flag will be cleared if
1061 	 * the size is extended past the embedded limit.
1062 	 */
1063 	if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1064 	    xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1065 		xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1066 	}
1067 	hammer2_xop_setname(&xop->head, name, name_len);
1068 	xop->meta.name_len = name_len;
1069 	xop->meta.name_key = lhc;
1070 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1071 
1072 	hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
1073 
1074 	error = hammer2_xop_collect(&xop->head, 0);
1075 #if INODE_DEBUG
1076 	kprintf("CREATE INODE %*.*s\n",
1077 		(int)name_len, (int)name_len, name);
1078 #endif
1079 
1080 	if (error) {
1081 		*errorp = error;
1082 		goto done;
1083 	}
1084 
1085 	/*
1086 	 * Set up the new inode if not a hardlink pointer.
1087 	 *
1088 	 * NOTE: *_get() integrates chain's lock into the inode lock.
1089 	 *
1090 	 * NOTE: Only one new inode can currently be created per
1091 	 *	 transaction.  If the need arises we can adjust
1092 	 *	 hammer2_trans_init() to allow more.
1093 	 *
1094 	 * NOTE: nipdata will have chain's blockset data.
1095 	 */
1096 	nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
1097 	nip->comp_heuristic = 0;
1098 done:
1099 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1100 done2:
1101 	hammer2_inode_unlock(pip);
1102 
1103 	return (nip);
1104 }
1105 
1106 /*
1107  * Create a new, normal inode.  This function will create the inode,
1108  * the media chains, but will not insert the chains onto the media topology
1109  * (doing so would require a flush transaction and cause long stalls).
1110  *
1111  * Caller must be in a normal transaction.
1112  */
1113 hammer2_inode_t *
1114 hammer2_inode_create_normal(hammer2_inode_t *pip,
1115 			    struct vattr *vap, struct ucred *cred,
1116 			    hammer2_key_t inum, int *errorp)
1117 {
1118 	hammer2_xop_create_t *xop;
1119 	hammer2_inode_t *dip;
1120 	hammer2_inode_t *nip;
1121 	int error;
1122 	uid_t xuid;
1123 	uuid_t pip_uid;
1124 	uuid_t pip_gid;
1125 	uint32_t pip_mode;
1126 	uint8_t pip_comp_algo;
1127 	uint8_t pip_check_algo;
1128 	hammer2_tid_t pip_inum;
1129 	uint8_t type;
1130 
1131 	dip = pip->pmp->iroot;
1132 	KKASSERT(dip != NULL);
1133 
1134 	*errorp = 0;
1135 
1136 	/*hammer2_inode_lock(dip, 0);*/
1137 
1138 	pip_uid = pip->meta.uid;
1139 	pip_gid = pip->meta.gid;
1140 	pip_mode = pip->meta.mode;
1141 	pip_comp_algo = pip->meta.comp_algo;
1142 	pip_check_algo = pip->meta.check_algo;
1143 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1144 
1145 	/*
1146 	 * Create the in-memory hammer2_inode structure for the specified
1147 	 * inode.
1148 	 */
1149 	nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
1150 	nip->comp_heuristic = 0;
1151 	KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
1152 		 nip->cluster.nchains == 0);
1153 	atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
1154 
1155 	/*
1156 	 * Setup the inode meta-data
1157 	 */
1158 	nip->meta.type = hammer2_get_obj_type(vap->va_type);
1159 
1160 	switch (nip->meta.type) {
1161 	case HAMMER2_OBJTYPE_CDEV:
1162 	case HAMMER2_OBJTYPE_BDEV:
1163 		nip->meta.rmajor = vap->va_rmajor;
1164 		nip->meta.rminor = vap->va_rminor;
1165 		break;
1166 	default:
1167 		break;
1168 	}
1169 	type = nip->meta.type;
1170 
1171 	KKASSERT(nip->meta.inum == inum);
1172 	nip->meta.iparent = pip_inum;
1173 
1174 	/* Inherit parent's inode compression mode. */
1175 	nip->meta.comp_algo = pip_comp_algo;
1176 	nip->meta.check_algo = pip_check_algo;
1177 	nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1178 	hammer2_update_time(&nip->meta.ctime);
1179 	nip->meta.mtime = nip->meta.ctime;
1180 	nip->meta.mode = vap->va_mode;
1181 	nip->meta.nlinks = 1;
1182 
1183 	xuid = hammer2_to_unix_xid(&pip_uid);
1184 	xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
1185 				     xuid, cred,
1186 				     &vap->va_mode);
1187 	if (vap->va_vaflags & VA_UID_UUID_VALID)
1188 		nip->meta.uid = vap->va_uid_uuid;
1189 	else if (vap->va_uid != (uid_t)VNOVAL)
1190 		hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
1191 	else
1192 		hammer2_guid_to_uuid(&nip->meta.uid, xuid);
1193 
1194 	if (vap->va_vaflags & VA_GID_UUID_VALID)
1195 		nip->meta.gid = vap->va_gid_uuid;
1196 	else if (vap->va_gid != (gid_t)VNOVAL)
1197 		hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
1198 	else
1199 		nip->meta.gid = pip_gid;
1200 
1201 	/*
1202 	 * Regular files and softlinks allow a small amount of data to be
1203 	 * directly embedded in the inode.  This flag will be cleared if
1204 	 * the size is extended past the embedded limit.
1205 	 */
1206 	if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1207 	    nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1208 		nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1209 	}
1210 
1211 	/*
1212 	 * Create the inode using (inum) as the key.  Pass pip for
1213 	 * method inheritance.
1214 	 */
1215 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1216 	xop->lhc = inum;
1217 	xop->flags = 0;
1218 	xop->meta = nip->meta;
1219 	KKASSERT(vap);
1220 
1221 	xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
1222 	xop->meta.name_key = inum;
1223 	nip->meta.name_len = xop->meta.name_len;
1224 	nip->meta.name_key = xop->meta.name_key;
1225 	hammer2_inode_modify(nip);
1226 
1227 	/*
1228 	 * Create the inode media chains but leave them detached.  We are
1229 	 * not in a flush transaction so we can't mess with media topology
1230 	 * above normal inodes (i.e. the index of the inodes themselves).
1231 	 *
1232 	 * We've already set the INODE_CREATING flag.  The inode's media
1233 	 * chains will be inserted onto the media topology on the next
1234 	 * filesystem sync.
1235 	 */
1236 	hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
1237 
1238 	error = hammer2_xop_collect(&xop->head, 0);
1239 #if INODE_DEBUG
1240 	kprintf("create inode type %d error %d\n", nip->meta.type, error);
1241 #endif
1242 
1243 	if (error) {
1244 		*errorp = error;
1245 		goto done;
1246 	}
1247 
1248 	/*
1249 	 * Associate the media chains created by the backend with the
1250 	 * frontend inode.
1251 	 */
1252 	hammer2_inode_repoint(nip, &xop->head.cluster);
1253 done:
1254 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1255 	/*hammer2_inode_unlock(dip);*/
1256 
1257 	return (nip);
1258 }
1259 
1260 /*
1261  * Create a directory entry under dip with the specified name, inode number,
1262  * and OBJTYPE (type).
1263  *
1264  * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1265  *
1266  * Caller must hold dip locked.
1267  */
1268 int
1269 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
1270 		      hammer2_key_t inum, uint8_t type)
1271 {
1272 	hammer2_xop_mkdirent_t *xop;
1273 	hammer2_key_t lhc;
1274 	int error;
1275 
1276 	lhc = 0;
1277 	error = 0;
1278 
1279 	KKASSERT(name != NULL);
1280 	lhc = hammer2_dirhash(name, name_len);
1281 
1282 	/*
1283 	 * Locate the inode or indirect block to create the new
1284 	 * entry in.  At the same time check for key collisions
1285 	 * and iterate until we don't get one.
1286 	 *
1287 	 * Lock the directory exclusively for now to guarantee that
1288 	 * we can find an unused lhc for the name.  Due to collisions,
1289 	 * two different creates can end up with the same lhc so we
1290 	 * cannot depend on the OS to prevent the collision.
1291 	 */
1292 	hammer2_inode_modify(dip);
1293 
1294 	/*
1295 	 * If name specified, locate an unused key in the collision space.
1296 	 * Otherwise use the passed-in lhc directly.
1297 	 */
1298 	{
1299 		hammer2_xop_scanlhc_t *sxop;
1300 		hammer2_key_t lhcbase;
1301 
1302 		lhcbase = lhc;
1303 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1304 		sxop->lhc = lhc;
1305 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1306 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1307 			if (lhc != sxop->head.cluster.focus->bref.key)
1308 				break;
1309 			++lhc;
1310 		}
1311 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1312 
1313 		if (error) {
1314 			if (error != HAMMER2_ERROR_ENOENT)
1315 				goto done2;
1316 			++lhc;
1317 			error = 0;
1318 		}
1319 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1320 			error = HAMMER2_ERROR_ENOSPC;
1321 			goto done2;
1322 		}
1323 	}
1324 
1325 	/*
1326 	 * Create the directory entry with the lhc as the key.
1327 	 */
1328 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1329 	xop->lhc = lhc;
1330 	bzero(&xop->dirent, sizeof(xop->dirent));
1331 	xop->dirent.inum = inum;
1332 	xop->dirent.type = type;
1333 	xop->dirent.namlen = name_len;
1334 
1335 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1336 	hammer2_xop_setname(&xop->head, name, name_len);
1337 
1338 	hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
1339 
1340 	error = hammer2_xop_collect(&xop->head, 0);
1341 
1342 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1343 done2:
1344 	error = hammer2_error_to_errno(error);
1345 
1346 	return error;
1347 }
1348 
1349 /*
1350  * Repoint ip->cluster's chains to cluster's chains and fixup the default
1351  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
1352  * filters out invalid or non-matching elements.
1353  *
1354  * Caller must hold the inode and cluster exclusive locked, if not NULL,
1355  * must also be locked.
1356  *
1357  * Cluster may be NULL to clean out any chains in ip->cluster.
1358  */
1359 void
1360 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
1361 {
1362 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1363 	hammer2_chain_t *ochain;
1364 	hammer2_chain_t *nchain;
1365 	int i;
1366 
1367 	bzero(dropch, sizeof(dropch));
1368 
1369 	/*
1370 	 * Replace chains in ip->cluster with chains from cluster and
1371 	 * adjust the focus if necessary.
1372 	 *
1373 	 * NOTE: nchain and/or ochain can be NULL due to gaps
1374 	 *	 in the cluster arrays.
1375 	 */
1376 	hammer2_spin_ex(&ip->cluster_spin);
1377 	for (i = 0; cluster && i < cluster->nchains; ++i) {
1378 		/*
1379 		 * Do not replace elements which are the same.  Also handle
1380 		 * element count discrepancies.
1381 		 */
1382 		nchain = cluster->array[i].chain;
1383 		if (i < ip->cluster.nchains) {
1384 			ochain = ip->cluster.array[i].chain;
1385 			if (ochain == nchain)
1386 				continue;
1387 		} else {
1388 			ochain = NULL;
1389 		}
1390 
1391 		/*
1392 		 * Make adjustments
1393 		 */
1394 		ip->cluster.array[i].chain = nchain;
1395 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1396 		ip->cluster.array[i].flags |= cluster->array[i].flags &
1397 					      HAMMER2_CITEM_INVALID;
1398 		if (nchain)
1399 			hammer2_chain_ref(nchain);
1400 		dropch[i] = ochain;
1401 	}
1402 
1403 	/*
1404 	 * Release any left-over chains in ip->cluster.
1405 	 */
1406 	while (i < ip->cluster.nchains) {
1407 		nchain = ip->cluster.array[i].chain;
1408 		if (nchain) {
1409 			ip->cluster.array[i].chain = NULL;
1410 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1411 		}
1412 		dropch[i] = nchain;
1413 		++i;
1414 	}
1415 
1416 	/*
1417 	 * Fixup fields.  Note that the inode-embedded cluster is never
1418 	 * directly locked.
1419 	 */
1420 	if (cluster) {
1421 		ip->cluster.nchains = cluster->nchains;
1422 		ip->cluster.focus = cluster->focus;
1423 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1424 	} else {
1425 		ip->cluster.nchains = 0;
1426 		ip->cluster.focus = NULL;
1427 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1428 	}
1429 
1430 	hammer2_spin_unex(&ip->cluster_spin);
1431 
1432 	/*
1433 	 * Cleanup outside of spinlock
1434 	 */
1435 	while (--i >= 0) {
1436 		if (dropch[i])
1437 			hammer2_chain_drop(dropch[i]);
1438 	}
1439 }
1440 
1441 /*
1442  * Repoint a single element from the cluster to the ip.  Used by the
1443  * synchronization threads to piecemeal update inodes.  Does not change
1444  * focus and requires inode to be re-locked to clean-up flags (XXX).
1445  */
1446 void
1447 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1448 			  int idx)
1449 {
1450 	hammer2_chain_t *ochain;
1451 	hammer2_chain_t *nchain;
1452 	int i;
1453 
1454 	hammer2_spin_ex(&ip->cluster_spin);
1455 	KKASSERT(idx < cluster->nchains);
1456 	if (idx < ip->cluster.nchains) {
1457 		ochain = ip->cluster.array[idx].chain;
1458 		nchain = cluster->array[idx].chain;
1459 	} else {
1460 		ochain = NULL;
1461 		nchain = cluster->array[idx].chain;
1462 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1463 			bzero(&ip->cluster.array[i],
1464 			      sizeof(ip->cluster.array[i]));
1465 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1466 		}
1467 		ip->cluster.nchains = idx + 1;
1468 	}
1469 	if (ochain != nchain) {
1470 		/*
1471 		 * Make adjustments.
1472 		 */
1473 		ip->cluster.array[idx].chain = nchain;
1474 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1475 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1476 						HAMMER2_CITEM_INVALID;
1477 	}
1478 	hammer2_spin_unex(&ip->cluster_spin);
1479 	if (ochain != nchain) {
1480 		if (nchain)
1481 			hammer2_chain_ref(nchain);
1482 		if (ochain)
1483 			hammer2_chain_drop(ochain);
1484 	}
1485 }
1486 
1487 hammer2_key_t
1488 hammer2_inode_data_count(const hammer2_inode_t *ip)
1489 {
1490 	hammer2_chain_t *chain;
1491 	hammer2_key_t count = 0;
1492 	int i;
1493 
1494 	for (i = 0; i < ip->cluster.nchains; ++i) {
1495 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1496 			if (count < chain->bref.embed.stats.data_count)
1497 				count = chain->bref.embed.stats.data_count;
1498 		}
1499 	}
1500 	return count;
1501 }
1502 
1503 hammer2_key_t
1504 hammer2_inode_inode_count(const hammer2_inode_t *ip)
1505 {
1506 	hammer2_chain_t *chain;
1507 	hammer2_key_t count = 0;
1508 	int i;
1509 
1510 	for (i = 0; i < ip->cluster.nchains; ++i) {
1511 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1512 			if (count < chain->bref.embed.stats.inode_count)
1513 				count = chain->bref.embed.stats.inode_count;
1514 		}
1515 	}
1516 	return count;
1517 }
1518 
1519 /*
1520  * Called with a locked inode to finish unlinking an inode after xop_unlink
1521  * had been run.  This function is responsible for decrementing nlinks.
1522  *
1523  * We don't bother decrementing nlinks if the file is not open and this was
1524  * the last link.
1525  *
1526  * If the inode is a hardlink target it's chain has not yet been deleted,
1527  * otherwise it's chain has been deleted.
1528  *
1529  * If isopen then any prior deletion was not permanent and the inode is
1530  * left intact with nlinks == 0;
1531  */
1532 int
1533 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1534 {
1535 	hammer2_pfs_t *pmp;
1536 	int error;
1537 
1538 	pmp = ip->pmp;
1539 
1540 	/*
1541 	 * Decrement nlinks.  If this is the last link and the file is
1542 	 * not open we can just delete the inode and not bother dropping
1543 	 * nlinks to 0 (avoiding unnecessary block updates).
1544 	 */
1545 	if (ip->meta.nlinks == 1) {
1546 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1547 		if (isopen == 0)
1548 			goto killit;
1549 	}
1550 
1551 	hammer2_inode_modify(ip);
1552 	--ip->meta.nlinks;
1553 	if ((int64_t)ip->meta.nlinks < 0)
1554 		ip->meta.nlinks = 0;	/* safety */
1555 
1556 	/*
1557 	 * If nlinks is not zero we are done.  However, this should only be
1558 	 * possible with a hardlink target.  If the inode is an embedded
1559 	 * hardlink nlinks should have dropped to zero, warn and proceed
1560 	 * with the next step.
1561 	 */
1562 	if (ip->meta.nlinks) {
1563 		if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1564 			return 0;
1565 		kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1566 			(intmax_t)ip->meta.nlinks);
1567 		return 0;
1568 	}
1569 
1570 	if (ip->vp)
1571 		hammer2_knote(ip->vp, NOTE_DELETE);
1572 
1573 	/*
1574 	 * nlinks is now an implied zero, delete the inode if not open.
1575 	 * We avoid unnecessary media updates by not bothering to actually
1576 	 * decrement nlinks for the 1->0 transition
1577 	 *
1578 	 * Put the inode on the sideq to ensure that any disconnected chains
1579 	 * get properly flushed (so they can be freed).  Defer the deletion
1580 	 * to the sync code, doing it now will desynchronize the inode from
1581 	 * related directory entries (which is bad).
1582 	 *
1583 	 * NOTE: killit can be reached without modifying the inode, so
1584 	 *	 make sure that it is on the SIDEQ.
1585 	 */
1586 	if (isopen == 0) {
1587 #if 0
1588 		hammer2_xop_destroy_t *xop;
1589 #endif
1590 
1591 killit:
1592 		atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
1593 		hammer2_inode_delayed_sideq(ip);
1594 #if 0
1595 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1596 		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1597 		error = hammer2_xop_collect(&xop->head, 0);
1598 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1599 #endif
1600 	}
1601 	error = 0;	/* XXX */
1602 
1603 	return error;
1604 }
1605 
1606 /*
1607  * Mark an inode as being modified, meaning that the caller will modify
1608  * ip->meta.
1609  *
1610  * If a vnode is present we set the vnode dirty and the nominal filesystem
1611  * sync will also handle synchronizing the inode meta-data.  Unless NOSIDEQ
1612  * we must ensure that the inode is on pmp->sideq.
1613  *
1614  * NOTE: We must always queue the inode to the sideq.  This allows H2 to
1615  *	 shortcut vsyncscan() and flush inodes and their related vnodes
1616  *	 in a two stages.  H2 still calls vfsync() for each vnode.
1617  *
1618  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1619  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1620  *	 later when the inode gets flushed.
1621  *
1622  * NOTE: As an exception to the general rule, the inode MAY be locked
1623  *	 shared for this particular call.
1624  */
1625 void
1626 hammer2_inode_modify(hammer2_inode_t *ip)
1627 {
1628 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1629 	if (ip->vp)
1630 		vsetisdirty(ip->vp);
1631 	if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
1632 		hammer2_inode_delayed_sideq(ip);
1633 }
1634 
1635 /*
1636  * Synchronize the inode's frontend state with the chain state prior
1637  * to any explicit flush of the inode or any strategy write call.  This
1638  * does not flush the inode's chain or its sub-topology to media (higher
1639  * level layers are responsible for doing that).
1640  *
1641  * Called with a locked inode inside a normal transaction.
1642  *
1643  * inode must be locked.
1644  */
1645 int
1646 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1647 {
1648 	int error;
1649 
1650 	error = 0;
1651 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1652 		hammer2_xop_fsync_t *xop;
1653 
1654 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1655 		xop->clear_directdata = 0;
1656 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1657 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1658 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1659 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1660 				xop->clear_directdata = 1;
1661 			}
1662 			xop->osize = ip->osize;
1663 		} else {
1664 			xop->osize = ip->meta.size;	/* safety */
1665 		}
1666 		xop->ipflags = ip->flags;
1667 		xop->meta = ip->meta;
1668 
1669 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1670 					     HAMMER2_INODE_MODIFIED);
1671 		hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
1672 		error = hammer2_xop_collect(&xop->head, 0);
1673 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1674 		if (error == HAMMER2_ERROR_ENOENT)
1675 			error = 0;
1676 		if (error) {
1677 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1678 			/*
1679 			atomic_set_int(&ip->flags,
1680 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1681 						       HAMMER2_INODE_MODIFIED));
1682 			*/
1683 			/* XXX return error somehow? */
1684 		}
1685 	}
1686 	return error;
1687 }
1688 
1689 /*
1690  * When an inode is flagged INODE_CREATING its chains have not actually
1691  * been inserting into the on-media tree yet.
1692  */
1693 int
1694 hammer2_inode_chain_ins(hammer2_inode_t *ip)
1695 {
1696 	int error;
1697 
1698 	error = 0;
1699 	if (ip->flags & HAMMER2_INODE_CREATING) {
1700 		hammer2_xop_create_t *xop;
1701 
1702 		atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
1703 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1704 		xop->lhc = ip->meta.inum;
1705 		xop->flags = 0;
1706 		hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
1707 		error = hammer2_xop_collect(&xop->head, 0);
1708 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1709 		if (error == HAMMER2_ERROR_ENOENT)
1710 			error = 0;
1711 		if (error) {
1712 			kprintf("hammer2: backend unable to "
1713 				"insert inode %p %ld\n", ip, ip->meta.inum);
1714 			/* XXX return error somehow? */
1715 		}
1716 	}
1717 	return error;
1718 }
1719 
1720 /*
1721  * When an inode is flagged INODE_DELETING it has been deleted (no directory
1722  * entry or open refs are left, though as an optimization H2 might leave
1723  * nlinks == 1 to avoid unnecessary block updates).  The backend flush then
1724  * needs to actually remove it from the topology.
1725  *
1726  * NOTE: backend flush must still sync and flush the deleted inode to clean
1727  *	 out related chains.
1728  *
1729  * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
1730  *	 to prevent the vnode reclaim code from trying to delete it twice.
1731  */
1732 int
1733 hammer2_inode_chain_des(hammer2_inode_t *ip)
1734 {
1735 	int error;
1736 
1737 	error = 0;
1738 	if (ip->flags & HAMMER2_INODE_DELETING) {
1739 		hammer2_xop_destroy_t *xop;
1740 
1741 		atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING |
1742 					     HAMMER2_INODE_ISUNLINKED);
1743 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1744 		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1745 		error = hammer2_xop_collect(&xop->head, 0);
1746 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1747 
1748 		if (error == HAMMER2_ERROR_ENOENT)
1749 			error = 0;
1750 		if (error) {
1751 			kprintf("hammer2: backend unable to "
1752 				"delete inode %p %ld\n", ip, ip->meta.inum);
1753 			/* XXX return error somehow? */
1754 		}
1755 	}
1756 	return error;
1757 }
1758 
1759 /*
1760  * Flushes the inode's chain and its sub-topology to media.  Interlocks
1761  * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush.  Any strategy
1762  * function creating or modifying a chain under this inode will re-set the
1763  * flag.
1764  *
1765  * inode must be locked.
1766  */
1767 int
1768 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
1769 {
1770 	hammer2_xop_fsync_t *xop;
1771 	int error;
1772 
1773 	atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1774 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
1775 	hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
1776 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1777 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1778 	if (error == HAMMER2_ERROR_ENOENT)
1779 		error = 0;
1780 
1781 	return error;
1782 }
1783