xref: /dragonfly/sys/vfs/hammer2/hammer2_inode.c (revision 0b2c5ee3)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41 
42 #include "hammer2.h"
43 
44 #define INODE_DEBUG	0
45 
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 	     hammer2_tid_t, meta.inum);
48 
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52 	if (ip1->meta.inum < ip2->meta.inum)
53 		return(-1);
54 	if (ip1->meta.inum > ip2->meta.inum)
55 		return(1);
56 	return(0);
57 }
58 
59 static __inline
60 void
61 hammer2_knote(struct vnode *vp, int flags)
62 {
63 	if (flags)
64 		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
65 }
66 
67 /*
68  * Caller holds pmp->list_spin and the inode should be locked.  Merge ip
69  * with the specified depend.
70  *
71  * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
72  * that successive calls must ensure the ip is on a pass2 depend (or they are
73  * all SYNCQ).  If the passed-in depend is not NULL and not (void *)-1 then
74  * we can set pass2 on it and return.
75  *
76  * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
77  * a self-depend if necessary, and depend->pass2 is set according
78  * to the PASS2 flag.  SIDEQ is set.
79  */
80 static __noinline
81 hammer2_depend_t *
82 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
83 {
84 	hammer2_pfs_t *pmp = ip->pmp;
85 	hammer2_depend_t *dtmp;
86 	hammer2_inode_t *iptmp;
87 
88 	/*
89 	 * If ip is SYNCQ its entry is used for the syncq list and it will
90 	 * no longer be associated with a dependency.  Merging this status
91 	 * with a passed-in depend implies PASS2.
92 	 */
93 	if (ip->flags & HAMMER2_INODE_SYNCQ) {
94 		if (depend == (void *)-1 ||
95 		    depend == NULL) {
96 			return ((void *)-1);
97 		}
98 		depend->pass2 = 1;
99 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
100 
101 		return depend;
102 	}
103 
104 	/*
105 	 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
106 	 * If it is not, associate the ip with the passed-in depend, creating
107 	 * a single-entry dependency using depend_static if necessary.
108 	 *
109 	 * NOTE: The use of ip->depend_static always requires that the
110 	 *	 specific ip containing the structure is part of that
111 	 *	 particular depend_static's dependency group.
112 	 */
113 	if (ip->flags & HAMMER2_INODE_SIDEQ) {
114 		/*
115 		 * Merge ip->depend with the passed-in depend.  If the
116 		 * passed-in depend is not a special case, all ips associated
117 		 * with ip->depend (including the original ip) must be moved
118 		 * to the passed-in depend.
119 		 */
120 		if (depend == NULL) {
121 			depend = ip->depend;
122 		} else if (depend == (void *)-1) {
123 			depend = ip->depend;
124 			depend->pass2 = 1;
125 		} else if (depend != ip->depend) {
126 #ifdef INVARIANTS
127 			int sanitychk = 0;
128 #endif
129 			dtmp = ip->depend;
130 			while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
131 #ifdef INVARIANTS
132 				if (iptmp == ip)
133 					sanitychk = 1;
134 #endif
135 				TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
136 				TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
137 				iptmp->depend = depend;
138 			}
139 			KKASSERT(sanitychk == 1);
140 			depend->count += dtmp->count;
141 			depend->pass2 |= dtmp->pass2;
142 			TAILQ_REMOVE(&pmp->depq, dtmp, entry);
143 			dtmp->count = 0;
144 			dtmp->pass2 = 0;
145 		}
146 	} else {
147 		/*
148 		 * Add ip to the sideq, creating a self-dependency if
149 		 * necessary.
150 		 */
151 		hammer2_inode_ref(ip);
152 		atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
153 		if (depend == NULL) {
154 			depend = &ip->depend_static;
155 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
156 		} else if (depend == (void *)-1) {
157 			depend = &ip->depend_static;
158 			depend->pass2 = 1;
159 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
160 		} /* else add ip to passed-in depend */
161 		TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
162 		ip->depend = depend;
163 		++depend->count;
164 		++pmp->sideq_count;
165 	}
166 
167 	if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
168 		depend->pass2 = 1;
169 	if (depend->pass2)
170 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
171 
172 	return depend;
173 }
174 
175 /*
176  * Put a solo inode on the SIDEQ (meaning that its dirty).  This can also
177  * occur from inode_lock4() and inode_depend().
178  *
179  * Caller must pass-in a locked inode.
180  */
181 void
182 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
183 {
184 	hammer2_pfs_t *pmp = ip->pmp;
185 
186 	/*
187 	 * Optimize case to avoid pmp spinlock.
188 	 */
189 	if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
190 		hammer2_spin_ex(&pmp->list_spin);
191 		hammer2_inode_setdepend_locked(ip, NULL);
192 		hammer2_spin_unex(&pmp->list_spin);
193 	}
194 }
195 
196 /*
197  * Lock an inode, with SYNCQ semantics.
198  *
199  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
200  * flags for options:
201  *
202  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
203  *	  inode locking function will automatically set the RDONLY flag.
204  *	  shared locks are not subject to SYNCQ semantics, exclusive locks
205  *	  are.
206  *
207  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
208  *	  Most front-end inode locks do.
209  *
210  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
211  *	  the inode data be resolved.  This is used by the syncthr because
212  *	  it can run on an unresolved/out-of-sync cluster, and also by the
213  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
214  *	  disposing of hundreds of thousands of cached vnodes).
215  *
216  * This function, along with lock4, has SYNCQ semantics.  If the inode being
217  * locked is on the SYNCQ, that is it has been staged by the syncer, we must
218  * block until the operation is complete (even if we can lock the inode).  In
219  * order to reduce the stall time, we re-order the inode to the front of the
220  * pmp->syncq prior to blocking.  This reordering VERY significantly improves
221  * performance.
222  *
223  * The inode locking function locks the inode itself, resolves any stale
224  * chains in the inode's cluster, and allocates a fresh copy of the
225  * cluster with 1 ref and all the underlying chains locked.
226  *
227  * ip->cluster will be stable while the inode is locked.
228  *
229  * NOTE: We don't combine the inode/chain lock because putting away an
230  *       inode would otherwise confuse multiple lock holders of the inode.
231  */
232 void
233 hammer2_inode_lock(hammer2_inode_t *ip, int how)
234 {
235 	hammer2_pfs_t *pmp;
236 
237 	hammer2_inode_ref(ip);
238 	pmp = ip->pmp;
239 
240 	/*
241 	 * Inode structure mutex - Shared lock
242 	 */
243 	if (how & HAMMER2_RESOLVE_SHARED) {
244 		hammer2_mtx_sh(&ip->lock);
245 		return;
246 	}
247 
248 	/*
249 	 * Inode structure mutex - Exclusive lock
250 	 *
251 	 * An exclusive lock (if not recursive) must wait for inodes on
252 	 * SYNCQ to flush first, to ensure that meta-data dependencies such
253 	 * as the nlink count and related directory entries are not split
254 	 * across flushes.
255 	 *
256 	 * If the vnode is locked by the current thread it must be unlocked
257 	 * across the tsleep() to avoid a deadlock.
258 	 */
259 	hammer2_mtx_ex(&ip->lock);
260 	if (hammer2_mtx_refs(&ip->lock) > 1)
261 		return;
262 	while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
263 		hammer2_spin_ex(&pmp->list_spin);
264 		if (ip->flags & HAMMER2_INODE_SYNCQ) {
265 			tsleep_interlock(&ip->flags, 0);
266 			atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
267 			TAILQ_REMOVE(&pmp->syncq, ip, entry);
268 			TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
269 			hammer2_spin_unex(&pmp->list_spin);
270 			hammer2_mtx_unlock(&ip->lock);
271 			tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
272 			hammer2_mtx_ex(&ip->lock);
273 			continue;
274 		}
275 		hammer2_spin_unex(&pmp->list_spin);
276 		break;
277 	}
278 }
279 
280 /*
281  * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
282  * ip1 and ip2 must not be NULL.  ip3 and ip4 may be NULL, but if ip3 is
283  * NULL then ip4 must also be NULL.
284  *
285  * This creates a dependency between up to four inodes.
286  */
287 void
288 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
289 		    hammer2_inode_t *ip3, hammer2_inode_t *ip4)
290 {
291 	hammer2_inode_t *ips[4];
292 	hammer2_inode_t *iptmp;
293 	hammer2_inode_t *ipslp;
294 	hammer2_depend_t *depend;
295 	hammer2_pfs_t *pmp;
296 	size_t count;
297 	size_t i;
298 
299 	pmp = ip1->pmp;			/* may be NULL */
300 	KKASSERT(pmp == ip2->pmp);
301 
302 	ips[0] = ip1;
303 	ips[1] = ip2;
304 	if (ip3 == NULL) {
305 		count = 2;
306 	} else if (ip4 == NULL) {
307 		count = 3;
308 		ips[2] = ip3;
309 		KKASSERT(pmp == ip3->pmp);
310 	} else {
311 		count = 4;
312 		ips[2] = ip3;
313 		ips[3] = ip4;
314 		KKASSERT(pmp == ip3->pmp);
315 		KKASSERT(pmp == ip4->pmp);
316 	}
317 
318 	for (i = 0; i < count; ++i)
319 		hammer2_inode_ref(ips[i]);
320 
321 restart:
322 	/*
323 	 * Lock the inodes in order
324 	 */
325 	for (i = 0; i < count; ++i) {
326 		hammer2_mtx_ex(&ips[i]->lock);
327 	}
328 
329 	/*
330 	 * Associate dependencies, record the first inode found on SYNCQ
331 	 * (operation is allowed to proceed for inodes on PASS2) for our
332 	 * sleep operation, this inode is theoretically the last one sync'd
333 	 * in the sequence.
334 	 *
335 	 * All inodes found on SYNCQ are moved to the head of the syncq
336 	 * to reduce stalls.
337 	 */
338 	hammer2_spin_ex(&pmp->list_spin);
339 	depend = NULL;
340 	ipslp = NULL;
341 	for (i = 0; i < count; ++i) {
342 		iptmp = ips[i];
343 		depend = hammer2_inode_setdepend_locked(iptmp, depend);
344 		if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
345 			TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
346 			TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
347 			if (ipslp == NULL)
348 				ipslp = iptmp;
349 		}
350 	}
351 	hammer2_spin_unex(&pmp->list_spin);
352 
353 	/*
354 	 * Block and retry if any of the inodes are on SYNCQ.  It is
355 	 * important that we allow the operation to proceed in the
356 	 * PASS2 case, to avoid deadlocking against the vnode.
357 	 */
358 	if (ipslp) {
359 		for (i = 0; i < count; ++i)
360 			hammer2_mtx_unlock(&ips[i]->lock);
361 		tsleep(&ipslp->flags, 0, "h2sync", 2);
362 		goto restart;
363 	}
364 }
365 
366 /*
367  * Release an inode lock.  If another thread is blocked on SYNCQ_WAKEUP
368  * we wake them up.
369  */
370 void
371 hammer2_inode_unlock(hammer2_inode_t *ip)
372 {
373 	if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
374 		atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
375 		hammer2_mtx_unlock(&ip->lock);
376 		wakeup(&ip->flags);
377 	} else {
378 		hammer2_mtx_unlock(&ip->lock);
379 	}
380 	hammer2_inode_drop(ip);
381 }
382 
383 /*
384  * If either ip1 or ip2 have been tapped by the syncer, make sure that both
385  * are.  This ensure that dependencies (e.g. dirent-v-inode) are synced
386  * together.  For dirent-v-inode depends, pass the dirent as ip1.
387  *
388  * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
389  * single dependency.  Dependencies are entered into pmp->depq.  This
390  * effectively flags the inodes SIDEQ.
391  *
392  * Both ip1 and ip2 must be locked by the caller.  This also ensures
393  * that we can't race the end of the syncer's queue run.
394  */
395 void
396 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
397 {
398 	hammer2_pfs_t *pmp;
399 	hammer2_depend_t *depend;
400 
401 	pmp = ip1->pmp;
402 	hammer2_spin_ex(&pmp->list_spin);
403 	depend = hammer2_inode_setdepend_locked(ip1, NULL);
404 	depend = hammer2_inode_setdepend_locked(ip2, depend);
405 	hammer2_spin_unex(&pmp->list_spin);
406 }
407 
408 /*
409  * Select a chain out of an inode's cluster and lock it.
410  *
411  * The inode does not have to be locked.
412  */
413 hammer2_chain_t *
414 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
415 {
416 	hammer2_chain_t *chain;
417 	hammer2_cluster_t *cluster;
418 
419 	hammer2_spin_sh(&ip->cluster_spin);
420 	cluster = &ip->cluster;
421 	if (clindex >= cluster->nchains)
422 		chain = NULL;
423 	else
424 		chain = cluster->array[clindex].chain;
425 	if (chain) {
426 		hammer2_chain_ref(chain);
427 		hammer2_spin_unsh(&ip->cluster_spin);
428 		hammer2_chain_lock(chain, how);
429 	} else {
430 		hammer2_spin_unsh(&ip->cluster_spin);
431 	}
432 	return chain;
433 }
434 
435 hammer2_chain_t *
436 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
437 			       hammer2_chain_t **parentp, int how)
438 {
439 	hammer2_chain_t *chain;
440 	hammer2_chain_t *parent;
441 
442 	for (;;) {
443 		hammer2_spin_sh(&ip->cluster_spin);
444 		if (clindex >= ip->cluster.nchains)
445 			chain = NULL;
446 		else
447 			chain = ip->cluster.array[clindex].chain;
448 		if (chain) {
449 			hammer2_chain_ref(chain);
450 			hammer2_spin_unsh(&ip->cluster_spin);
451 			hammer2_chain_lock(chain, how);
452 		} else {
453 			hammer2_spin_unsh(&ip->cluster_spin);
454 		}
455 
456 		/*
457 		 * Get parent, lock order must be (parent, chain).
458 		 */
459 		parent = chain->parent;
460 		if (parent) {
461 			hammer2_chain_ref(parent);
462 			hammer2_chain_unlock(chain);
463 			hammer2_chain_lock(parent, how);
464 			hammer2_chain_lock(chain, how);
465 		}
466 		if (ip->cluster.array[clindex].chain == chain &&
467 		    chain->parent == parent) {
468 			break;
469 		}
470 
471 		/*
472 		 * Retry
473 		 */
474 		hammer2_chain_unlock(chain);
475 		hammer2_chain_drop(chain);
476 		if (parent) {
477 			hammer2_chain_unlock(parent);
478 			hammer2_chain_drop(parent);
479 		}
480 	}
481 	*parentp = parent;
482 
483 	return chain;
484 }
485 
486 /*
487  * Temporarily release a lock held shared or exclusive.  Caller must
488  * hold the lock shared or exclusive on call and lock will be released
489  * on return.
490  *
491  * Restore a lock that was temporarily released.
492  */
493 hammer2_mtx_state_t
494 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
495 {
496 	return hammer2_mtx_temp_release(&ip->lock);
497 }
498 
499 void
500 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
501 {
502 	hammer2_mtx_temp_restore(&ip->lock, ostate);
503 }
504 
505 /*
506  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
507  * is already held exclusively this is a NOP.
508  *
509  * The caller MUST hold the inode lock either shared or exclusive on call
510  * and will own the lock exclusively on return.
511  *
512  * Returns non-zero if the lock was already exclusive prior to the upgrade.
513  */
514 int
515 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
516 {
517 	int wasexclusive;
518 
519 	if (mtx_islocked_ex(&ip->lock)) {
520 		wasexclusive = 1;
521 	} else {
522 		hammer2_mtx_unlock(&ip->lock);
523 		hammer2_mtx_ex(&ip->lock);
524 		wasexclusive = 0;
525 	}
526 	return wasexclusive;
527 }
528 
529 /*
530  * Downgrade an inode lock from exclusive to shared only if the inode
531  * lock was previously shared.  If the inode lock was previously exclusive,
532  * this is a NOP.
533  */
534 void
535 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
536 {
537 	if (wasexclusive == 0)
538 		hammer2_mtx_downgrade(&ip->lock);
539 }
540 
541 /*
542  * Lookup an inode by inode number
543  */
544 hammer2_inode_t *
545 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
546 {
547 	hammer2_inode_t *ip;
548 
549 	KKASSERT(pmp);
550 	if (pmp->spmp_hmp) {
551 		ip = NULL;
552 	} else {
553 		hammer2_spin_ex(&pmp->inum_spin);
554 		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
555 		if (ip)
556 			hammer2_inode_ref(ip);
557 		hammer2_spin_unex(&pmp->inum_spin);
558 	}
559 	return(ip);
560 }
561 
562 /*
563  * Adding a ref to an inode is only legal if the inode already has at least
564  * one ref.
565  *
566  * (can be called with spinlock held)
567  */
568 void
569 hammer2_inode_ref(hammer2_inode_t *ip)
570 {
571 	atomic_add_int(&ip->refs, 1);
572 	if (hammer2_debug & 0x80000) {
573 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
574 		print_backtrace(8);
575 	}
576 }
577 
578 /*
579  * Drop an inode reference, freeing the inode when the last reference goes
580  * away.
581  */
582 void
583 hammer2_inode_drop(hammer2_inode_t *ip)
584 {
585 	hammer2_pfs_t *pmp;
586 	u_int refs;
587 
588 	while (ip) {
589 		if (hammer2_debug & 0x80000) {
590 			kprintf("INODE-1 %p (%d->%d)\n",
591 				ip, ip->refs, ip->refs - 1);
592 			print_backtrace(8);
593 		}
594 		refs = ip->refs;
595 		cpu_ccfence();
596 		if (refs == 1) {
597 			/*
598 			 * Transition to zero, must interlock with
599 			 * the inode inumber lookup tree (if applicable).
600 			 * It should not be possible for anyone to race
601 			 * the transition to 0.
602 			 */
603 			pmp = ip->pmp;
604 			KKASSERT(pmp);
605 			hammer2_spin_ex(&pmp->inum_spin);
606 
607 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
608 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
609 				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
610 					atomic_clear_int(&ip->flags,
611 						     HAMMER2_INODE_ONRBTREE);
612 					RB_REMOVE(hammer2_inode_tree,
613 						  &pmp->inum_tree, ip);
614 					--pmp->inum_count;
615 				}
616 				hammer2_spin_unex(&pmp->inum_spin);
617 
618 				ip->pmp = NULL;
619 
620 				/*
621 				 * Cleaning out ip->cluster isn't entirely
622 				 * trivial.
623 				 */
624 				hammer2_inode_repoint(ip, NULL);
625 
626 				kfree_obj(ip, pmp->minode);
627 				atomic_add_long(&pmp->inmem_inodes, -1);
628 				ip = NULL;	/* will terminate loop */
629 			} else {
630 				hammer2_spin_unex(&ip->pmp->inum_spin);
631 			}
632 		} else {
633 			/*
634 			 * Non zero transition
635 			 */
636 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
637 				break;
638 		}
639 	}
640 }
641 
642 /*
643  * Get the vnode associated with the given inode, allocating the vnode if
644  * necessary.  The vnode will be returned exclusively locked.
645  *
646  * *errorp is set to a UNIX error, not a HAMMER2 error.
647  *
648  * The caller must lock the inode (shared or exclusive).
649  *
650  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
651  * races.
652  */
653 struct vnode *
654 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
655 {
656 	hammer2_pfs_t *pmp;
657 	struct vnode *vp;
658 
659 	pmp = ip->pmp;
660 	KKASSERT(pmp != NULL);
661 	*errorp = 0;
662 
663 	for (;;) {
664 		/*
665 		 * Attempt to reuse an existing vnode assignment.  It is
666 		 * possible to race a reclaim so the vget() may fail.  The
667 		 * inode must be unlocked during the vget() to avoid a
668 		 * deadlock against a reclaim.
669 		 */
670 		int wasexclusive;
671 
672 		vp = ip->vp;
673 		if (vp) {
674 			/*
675 			 * Inode must be unlocked during the vget() to avoid
676 			 * possible deadlocks, but leave the ip ref intact.
677 			 *
678 			 * vnode is held to prevent destruction during the
679 			 * vget().  The vget() can still fail if we lost
680 			 * a reclaim race on the vnode.
681 			 */
682 			hammer2_mtx_state_t ostate;
683 
684 			vhold(vp);
685 			ostate = hammer2_inode_lock_temp_release(ip);
686 			if (vget(vp, LK_EXCLUSIVE)) {
687 				vdrop(vp);
688 				hammer2_inode_lock_temp_restore(ip, ostate);
689 				continue;
690 			}
691 			hammer2_inode_lock_temp_restore(ip, ostate);
692 			vdrop(vp);
693 			/* vp still locked and ref from vget */
694 			if (ip->vp != vp) {
695 				kprintf("hammer2: igetv race %p/%p\n",
696 					ip->vp, vp);
697 				vput(vp);
698 				continue;
699 			}
700 			*errorp = 0;
701 			break;
702 		}
703 
704 		/*
705 		 * No vnode exists, allocate a new vnode.  Beware of
706 		 * allocation races.  This function will return an
707 		 * exclusively locked and referenced vnode.
708 		 */
709 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
710 		if (*errorp) {
711 			kprintf("hammer2: igetv getnewvnode failed %d\n",
712 				*errorp);
713 			vp = NULL;
714 			break;
715 		}
716 
717 		/*
718 		 * Lock the inode and check for an allocation race.
719 		 */
720 		wasexclusive = hammer2_inode_lock_upgrade(ip);
721 		if (ip->vp != NULL) {
722 			vp->v_type = VBAD;
723 			vx_put(vp);
724 			hammer2_inode_lock_downgrade(ip, wasexclusive);
725 			continue;
726 		}
727 
728 		switch (ip->meta.type) {
729 		case HAMMER2_OBJTYPE_DIRECTORY:
730 			vp->v_type = VDIR;
731 			break;
732 		case HAMMER2_OBJTYPE_REGFILE:
733 			/*
734 			 * Regular file must use buffer cache I/O
735 			 * (VKVABIO cpu sync semantics supported)
736 			 */
737 			vp->v_type = VREG;
738 			vsetflags(vp, VKVABIO);
739 			vinitvmio(vp, ip->meta.size,
740 				  HAMMER2_LBUFSIZE,
741 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
742 			break;
743 		case HAMMER2_OBJTYPE_SOFTLINK:
744 			/*
745 			 * XXX for now we are using the generic file_read
746 			 * and file_write code so we need a buffer cache
747 			 * association.
748 			 *
749 			 * (VKVABIO cpu sync semantics supported)
750 			 */
751 			vp->v_type = VLNK;
752 			vsetflags(vp, VKVABIO);
753 			vinitvmio(vp, ip->meta.size,
754 				  HAMMER2_LBUFSIZE,
755 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
756 			break;
757 		case HAMMER2_OBJTYPE_CDEV:
758 			vp->v_type = VCHR;
759 			/* fall through */
760 		case HAMMER2_OBJTYPE_BDEV:
761 			vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
762 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
763 				vp->v_type = VBLK;
764 			addaliasu(vp,
765 				  ip->meta.rmajor,
766 				  ip->meta.rminor);
767 			break;
768 		case HAMMER2_OBJTYPE_FIFO:
769 			vp->v_type = VFIFO;
770 			vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
771 			break;
772 		case HAMMER2_OBJTYPE_SOCKET:
773 			vp->v_type = VSOCK;
774 			break;
775 		default:
776 			panic("hammer2: unhandled objtype %d",
777 			      ip->meta.type);
778 			break;
779 		}
780 
781 		if (ip == pmp->iroot)
782 			vsetflags(vp, VROOT);
783 
784 		vp->v_data = ip;
785 		ip->vp = vp;
786 		hammer2_inode_ref(ip);		/* vp association */
787 		hammer2_inode_lock_downgrade(ip, wasexclusive);
788 		vx_downgrade(vp);
789 		break;
790 	}
791 
792 	/*
793 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
794 	 */
795 	if (hammer2_debug & 0x0002) {
796 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
797 			vp, vp->v_refcnt, vp->v_auxrefs);
798 	}
799 	return (vp);
800 }
801 
802 /*
803  * XXX this API needs a rewrite.  It needs to be split into a
804  * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
805  * rid of the inode/chain lock reversal fudge.
806  *
807  * Returns the inode associated with the passed-in cluster, allocating a new
808  * hammer2_inode structure if necessary, then synchronizing it to the passed
809  * xop cluster.  When synchronizing, if idx >= 0, only cluster index (idx)
810  * is synchronized.  Otherwise the whole cluster is synchronized.  inum will
811  * be extracted from the passed-in xop and the inum argument will be ignored.
812  *
813  * If xop is passed as NULL then a new hammer2_inode is allocated with the
814  * specified inum, and returned.   For normal inodes, the inode will be
815  * indexed in memory and if it already exists the existing ip will be
816  * returned instead of allocating a new one.  The superroot and PFS inodes
817  * are not indexed in memory.
818  *
819  * The passed-in cluster must be locked and will remain locked on return.
820  * The returned inode will be locked and the caller may dispose of both
821  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
822  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
823  *
824  * The hammer2_inode structure regulates the interface between the high level
825  * kernel VNOPS API and the filesystem backend (the chains).
826  *
827  * On return the inode is locked with the supplied cluster.
828  */
829 hammer2_inode_t *
830 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
831 		  hammer2_tid_t inum, int idx)
832 {
833 	hammer2_inode_t *nip;
834 	const hammer2_inode_data_t *iptmp;
835 	const hammer2_inode_data_t *nipdata;
836 
837 	KKASSERT(xop == NULL ||
838 		 hammer2_cluster_type(&xop->cluster) ==
839 		 HAMMER2_BREF_TYPE_INODE);
840 	KKASSERT(pmp);
841 
842 	/*
843 	 * Interlocked lookup/ref of the inode.  This code is only needed
844 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
845 	 * otherwise and test for duplicates).
846 	 *
847 	 * Cluster can be NULL during the initial pfs allocation.
848 	 */
849 	if (xop) {
850 		iptmp = &hammer2_xop_gdata(xop)->ipdata;
851 		inum = iptmp->meta.inum;
852 		hammer2_xop_pdata(xop);
853 	}
854 again:
855 	nip = hammer2_inode_lookup(pmp, inum);
856 	if (nip) {
857 		/*
858 		 * We may have to unhold the cluster to avoid a deadlock
859 		 * against vnlru (and possibly other XOPs).
860 		 */
861 		if (xop) {
862 			if (hammer2_mtx_ex_try(&nip->lock) != 0) {
863 				hammer2_cluster_unhold(&xop->cluster);
864 				hammer2_mtx_ex(&nip->lock);
865 				hammer2_cluster_rehold(&xop->cluster);
866 			}
867 		} else {
868 			hammer2_mtx_ex(&nip->lock);
869 		}
870 
871 		/*
872 		 * Handle SMP race (not applicable to the super-root spmp
873 		 * which can't index inodes due to duplicative inode numbers).
874 		 */
875 		if (pmp->spmp_hmp == NULL &&
876 		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
877 			hammer2_mtx_unlock(&nip->lock);
878 			hammer2_inode_drop(nip);
879 			goto again;
880 		}
881 		if (xop) {
882 			if (idx >= 0)
883 				hammer2_inode_repoint_one(nip, &xop->cluster,
884 							  idx);
885 			else
886 				hammer2_inode_repoint(nip, &xop->cluster);
887 		}
888 		return nip;
889 	}
890 
891 	/*
892 	 * We couldn't find the inode number, create a new inode and try to
893 	 * insert it, handle insertion races.
894 	 */
895 	nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
896 	spin_init(&nip->cluster_spin, "h2clspin");
897 	atomic_add_long(&pmp->inmem_inodes, 1);
898 	if (pmp->spmp_hmp)
899 		nip->flags = HAMMER2_INODE_SROOT;
900 
901 	/*
902 	 * Initialize nip's cluster.  A cluster is provided for normal
903 	 * inodes but typically not for the super-root or PFS inodes.
904 	 */
905 	nip->cluster.refs = 1;
906 	nip->cluster.pmp = pmp;
907 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
908 	if (xop) {
909 		nipdata = &hammer2_xop_gdata(xop)->ipdata;
910 		nip->meta = nipdata->meta;
911 		hammer2_xop_pdata(xop);
912 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
913 		hammer2_inode_repoint(nip, &xop->cluster);
914 	} else {
915 		nip->meta.inum = inum;		/* PFS inum is always 1 XXX */
916 		/* mtime will be updated when a cluster is available */
917 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);	/*XXX*/
918 	}
919 
920 	nip->pmp = pmp;
921 
922 	/*
923 	 * ref and lock on nip gives it state compatible to after a
924 	 * hammer2_inode_lock() call.
925 	 */
926 	nip->refs = 1;
927 	hammer2_mtx_init(&nip->lock, "h2inode");
928 	hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
929 	hammer2_mtx_ex(&nip->lock);
930 	TAILQ_INIT(&nip->depend_static.sideq);
931 	/* combination of thread lock and chain lock == inode lock */
932 
933 	/*
934 	 * Attempt to add the inode.  If it fails we raced another inode
935 	 * get.  Undo all the work and try again.
936 	 */
937 	if (pmp->spmp_hmp == NULL) {
938 		hammer2_spin_ex(&pmp->inum_spin);
939 		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
940 			hammer2_spin_unex(&pmp->inum_spin);
941 			hammer2_mtx_unlock(&nip->lock);
942 			hammer2_inode_drop(nip);
943 			goto again;
944 		}
945 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
946 		++pmp->inum_count;
947 		hammer2_spin_unex(&pmp->inum_spin);
948 	}
949 	return (nip);
950 }
951 
952 /*
953  * Create a PFS inode under the superroot.  This function will create the
954  * inode, its media chains, and also insert it into the media.
955  *
956  * Caller must be in a flush transaction because we are inserting the inode
957  * onto the media.
958  */
959 hammer2_inode_t *
960 hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
961 		     const uint8_t *name, size_t name_len,
962 		     int *errorp)
963 {
964 	hammer2_xop_create_t *xop;
965 	hammer2_inode_t *pip;
966 	hammer2_inode_t *nip;
967 	int error;
968 	uuid_t pip_uid;
969 	uuid_t pip_gid;
970 	uint32_t pip_mode;
971 	uint8_t pip_comp_algo;
972 	uint8_t pip_check_algo;
973 	hammer2_tid_t pip_inum;
974 	hammer2_key_t lhc;
975 
976 	pip = spmp->iroot;
977 	nip = NULL;
978 
979 	lhc = hammer2_dirhash(name, name_len);
980 	*errorp = 0;
981 
982 	/*
983 	 * Locate the inode or indirect block to create the new
984 	 * entry in.  At the same time check for key collisions
985 	 * and iterate until we don't get one.
986 	 *
987 	 * Lock the directory exclusively for now to guarantee that
988 	 * we can find an unused lhc for the name.  Due to collisions,
989 	 * two different creates can end up with the same lhc so we
990 	 * cannot depend on the OS to prevent the collision.
991 	 */
992 	hammer2_inode_lock(pip, 0);
993 
994 	pip_uid = pip->meta.uid;
995 	pip_gid = pip->meta.gid;
996 	pip_mode = pip->meta.mode;
997 	pip_comp_algo = pip->meta.comp_algo;
998 	pip_check_algo = pip->meta.check_algo;
999 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1000 
1001 	/*
1002 	 * Locate an unused key in the collision space.
1003 	 */
1004 	{
1005 		hammer2_xop_scanlhc_t *sxop;
1006 		hammer2_key_t lhcbase;
1007 
1008 		lhcbase = lhc;
1009 		sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1010 		sxop->lhc = lhc;
1011 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1012 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1013 			if (lhc != sxop->head.cluster.focus->bref.key)
1014 				break;
1015 			++lhc;
1016 		}
1017 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1018 
1019 		if (error) {
1020 			if (error != HAMMER2_ERROR_ENOENT)
1021 				goto done2;
1022 			++lhc;
1023 			error = 0;
1024 		}
1025 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1026 			error = HAMMER2_ERROR_ENOSPC;
1027 			goto done2;
1028 		}
1029 	}
1030 
1031 	/*
1032 	 * Create the inode with the lhc as the key.
1033 	 */
1034 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1035 	xop->lhc = lhc;
1036 	xop->flags = HAMMER2_INSERT_PFSROOT;
1037 	bzero(&xop->meta, sizeof(xop->meta));
1038 
1039 	xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1040 	xop->meta.inum = 1;
1041 	xop->meta.iparent = pip_inum;
1042 
1043 	/* Inherit parent's inode compression mode. */
1044 	xop->meta.comp_algo = pip_comp_algo;
1045 	xop->meta.check_algo = pip_check_algo;
1046 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1047 	hammer2_update_time(&xop->meta.ctime);
1048 	xop->meta.mtime = xop->meta.ctime;
1049 	xop->meta.mode = 0755;
1050 	xop->meta.nlinks = 1;
1051 
1052 	/*
1053 	 * Regular files and softlinks allow a small amount of data to be
1054 	 * directly embedded in the inode.  This flag will be cleared if
1055 	 * the size is extended past the embedded limit.
1056 	 */
1057 	if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1058 	    xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1059 		xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1060 	}
1061 	hammer2_xop_setname(&xop->head, name, name_len);
1062 	xop->meta.name_len = name_len;
1063 	xop->meta.name_key = lhc;
1064 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1065 
1066 	hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
1067 
1068 	error = hammer2_xop_collect(&xop->head, 0);
1069 #if INODE_DEBUG
1070 	kprintf("CREATE INODE %*.*s\n",
1071 		(int)name_len, (int)name_len, name);
1072 #endif
1073 
1074 	if (error) {
1075 		*errorp = error;
1076 		goto done;
1077 	}
1078 
1079 	/*
1080 	 * Set up the new inode if not a hardlink pointer.
1081 	 *
1082 	 * NOTE: *_get() integrates chain's lock into the inode lock.
1083 	 *
1084 	 * NOTE: Only one new inode can currently be created per
1085 	 *	 transaction.  If the need arises we can adjust
1086 	 *	 hammer2_trans_init() to allow more.
1087 	 *
1088 	 * NOTE: nipdata will have chain's blockset data.
1089 	 */
1090 	nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
1091 	nip->comp_heuristic = 0;
1092 done:
1093 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1094 done2:
1095 	hammer2_inode_unlock(pip);
1096 
1097 	return (nip);
1098 }
1099 
1100 /*
1101  * Create a new, normal inode.  This function will create the inode,
1102  * the media chains, but will not insert the chains onto the media topology
1103  * (doing so would require a flush transaction and cause long stalls).
1104  *
1105  * Caller must be in a normal transaction.
1106  */
1107 hammer2_inode_t *
1108 hammer2_inode_create_normal(hammer2_inode_t *pip,
1109 			    struct vattr *vap, struct ucred *cred,
1110 			    hammer2_key_t inum, int *errorp)
1111 {
1112 	hammer2_xop_create_t *xop;
1113 	hammer2_inode_t *dip;
1114 	hammer2_inode_t *nip;
1115 	int error;
1116 	uid_t xuid;
1117 	uuid_t pip_uid;
1118 	uuid_t pip_gid;
1119 	uint32_t pip_mode;
1120 	uint8_t pip_comp_algo;
1121 	uint8_t pip_check_algo;
1122 	hammer2_tid_t pip_inum;
1123 	uint8_t type;
1124 
1125 	dip = pip->pmp->iroot;
1126 	KKASSERT(dip != NULL);
1127 
1128 	*errorp = 0;
1129 
1130 	/*hammer2_inode_lock(dip, 0);*/
1131 
1132 	pip_uid = pip->meta.uid;
1133 	pip_gid = pip->meta.gid;
1134 	pip_mode = pip->meta.mode;
1135 	pip_comp_algo = pip->meta.comp_algo;
1136 	pip_check_algo = pip->meta.check_algo;
1137 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1138 
1139 	/*
1140 	 * Create the in-memory hammer2_inode structure for the specified
1141 	 * inode.
1142 	 */
1143 	nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
1144 	nip->comp_heuristic = 0;
1145 	KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
1146 		 nip->cluster.nchains == 0);
1147 	atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
1148 
1149 	/*
1150 	 * Setup the inode meta-data
1151 	 */
1152 	nip->meta.type = hammer2_get_obj_type(vap->va_type);
1153 
1154 	switch (nip->meta.type) {
1155 	case HAMMER2_OBJTYPE_CDEV:
1156 	case HAMMER2_OBJTYPE_BDEV:
1157 		nip->meta.rmajor = vap->va_rmajor;
1158 		nip->meta.rminor = vap->va_rminor;
1159 		break;
1160 	default:
1161 		break;
1162 	}
1163 	type = nip->meta.type;
1164 
1165 	KKASSERT(nip->meta.inum == inum);
1166 	nip->meta.iparent = pip_inum;
1167 
1168 	/* Inherit parent's inode compression mode. */
1169 	nip->meta.comp_algo = pip_comp_algo;
1170 	nip->meta.check_algo = pip_check_algo;
1171 	nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1172 	hammer2_update_time(&nip->meta.ctime);
1173 	nip->meta.mtime = nip->meta.ctime;
1174 	nip->meta.mode = vap->va_mode;
1175 	nip->meta.nlinks = 1;
1176 
1177 	xuid = hammer2_to_unix_xid(&pip_uid);
1178 	xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
1179 				     xuid, cred,
1180 				     &vap->va_mode);
1181 	if (vap->va_vaflags & VA_UID_UUID_VALID)
1182 		nip->meta.uid = vap->va_uid_uuid;
1183 	else if (vap->va_uid != (uid_t)VNOVAL)
1184 		hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
1185 	else
1186 		hammer2_guid_to_uuid(&nip->meta.uid, xuid);
1187 
1188 	if (vap->va_vaflags & VA_GID_UUID_VALID)
1189 		nip->meta.gid = vap->va_gid_uuid;
1190 	else if (vap->va_gid != (gid_t)VNOVAL)
1191 		hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
1192 	else
1193 		nip->meta.gid = pip_gid;
1194 
1195 	/*
1196 	 * Regular files and softlinks allow a small amount of data to be
1197 	 * directly embedded in the inode.  This flag will be cleared if
1198 	 * the size is extended past the embedded limit.
1199 	 */
1200 	if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1201 	    nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1202 		nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1203 	}
1204 
1205 	/*
1206 	 * Create the inode using (inum) as the key.  Pass pip for
1207 	 * method inheritance.
1208 	 */
1209 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1210 	xop->lhc = inum;
1211 	xop->flags = 0;
1212 	xop->meta = nip->meta;
1213 	KKASSERT(vap);
1214 
1215 	xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
1216 	xop->meta.name_key = inum;
1217 	nip->meta.name_len = xop->meta.name_len;
1218 	nip->meta.name_key = xop->meta.name_key;
1219 	hammer2_inode_modify(nip);
1220 
1221 	/*
1222 	 * Create the inode media chains but leave them detached.  We are
1223 	 * not in a flush transaction so we can't mess with media topology
1224 	 * above normal inodes (i.e. the index of the inodes themselves).
1225 	 *
1226 	 * We've already set the INODE_CREATING flag.  The inode's media
1227 	 * chains will be inserted onto the media topology on the next
1228 	 * filesystem sync.
1229 	 */
1230 	hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
1231 
1232 	error = hammer2_xop_collect(&xop->head, 0);
1233 #if INODE_DEBUG
1234 	kprintf("create inode type %d error %d\n", nip->meta.type, error);
1235 #endif
1236 
1237 	if (error) {
1238 		*errorp = error;
1239 		goto done;
1240 	}
1241 
1242 	/*
1243 	 * Associate the media chains created by the backend with the
1244 	 * frontend inode.
1245 	 */
1246 	hammer2_inode_repoint(nip, &xop->head.cluster);
1247 done:
1248 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1249 	/*hammer2_inode_unlock(dip);*/
1250 
1251 	return (nip);
1252 }
1253 
1254 /*
1255  * Create a directory entry under dip with the specified name, inode number,
1256  * and OBJTYPE (type).
1257  *
1258  * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1259  *
1260  * Caller must hold dip locked.
1261  */
1262 int
1263 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
1264 		      hammer2_key_t inum, uint8_t type)
1265 {
1266 	hammer2_xop_mkdirent_t *xop;
1267 	hammer2_key_t lhc;
1268 	int error;
1269 
1270 	lhc = 0;
1271 	error = 0;
1272 
1273 	KKASSERT(name != NULL);
1274 	lhc = hammer2_dirhash(name, name_len);
1275 
1276 	/*
1277 	 * Locate the inode or indirect block to create the new
1278 	 * entry in.  At the same time check for key collisions
1279 	 * and iterate until we don't get one.
1280 	 *
1281 	 * Lock the directory exclusively for now to guarantee that
1282 	 * we can find an unused lhc for the name.  Due to collisions,
1283 	 * two different creates can end up with the same lhc so we
1284 	 * cannot depend on the OS to prevent the collision.
1285 	 */
1286 	hammer2_inode_modify(dip);
1287 
1288 	/*
1289 	 * If name specified, locate an unused key in the collision space.
1290 	 * Otherwise use the passed-in lhc directly.
1291 	 */
1292 	{
1293 		hammer2_xop_scanlhc_t *sxop;
1294 		hammer2_key_t lhcbase;
1295 
1296 		lhcbase = lhc;
1297 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1298 		sxop->lhc = lhc;
1299 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1300 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1301 			if (lhc != sxop->head.cluster.focus->bref.key)
1302 				break;
1303 			++lhc;
1304 		}
1305 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1306 
1307 		if (error) {
1308 			if (error != HAMMER2_ERROR_ENOENT)
1309 				goto done2;
1310 			++lhc;
1311 			error = 0;
1312 		}
1313 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1314 			error = HAMMER2_ERROR_ENOSPC;
1315 			goto done2;
1316 		}
1317 	}
1318 
1319 	/*
1320 	 * Create the directory entry with the lhc as the key.
1321 	 */
1322 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1323 	xop->lhc = lhc;
1324 	bzero(&xop->dirent, sizeof(xop->dirent));
1325 	xop->dirent.inum = inum;
1326 	xop->dirent.type = type;
1327 	xop->dirent.namlen = name_len;
1328 
1329 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1330 	hammer2_xop_setname(&xop->head, name, name_len);
1331 
1332 	hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
1333 
1334 	error = hammer2_xop_collect(&xop->head, 0);
1335 
1336 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1337 done2:
1338 	error = hammer2_error_to_errno(error);
1339 
1340 	return error;
1341 }
1342 
1343 /*
1344  * Repoint ip->cluster's chains to cluster's chains and fixup the default
1345  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
1346  * filters out invalid or non-matching elements.
1347  *
1348  * Caller must hold the inode and cluster exclusive locked, if not NULL,
1349  * must also be locked.
1350  *
1351  * Cluster may be NULL to clean out any chains in ip->cluster.
1352  */
1353 void
1354 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
1355 {
1356 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1357 	hammer2_chain_t *ochain;
1358 	hammer2_chain_t *nchain;
1359 	int i;
1360 
1361 	bzero(dropch, sizeof(dropch));
1362 
1363 	/*
1364 	 * Replace chains in ip->cluster with chains from cluster and
1365 	 * adjust the focus if necessary.
1366 	 *
1367 	 * NOTE: nchain and/or ochain can be NULL due to gaps
1368 	 *	 in the cluster arrays.
1369 	 */
1370 	hammer2_spin_ex(&ip->cluster_spin);
1371 	for (i = 0; cluster && i < cluster->nchains; ++i) {
1372 		/*
1373 		 * Do not replace elements which are the same.  Also handle
1374 		 * element count discrepancies.
1375 		 */
1376 		nchain = cluster->array[i].chain;
1377 		if (i < ip->cluster.nchains) {
1378 			ochain = ip->cluster.array[i].chain;
1379 			if (ochain == nchain)
1380 				continue;
1381 		} else {
1382 			ochain = NULL;
1383 		}
1384 
1385 		/*
1386 		 * Make adjustments
1387 		 */
1388 		ip->cluster.array[i].chain = nchain;
1389 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1390 		ip->cluster.array[i].flags |= cluster->array[i].flags &
1391 					      HAMMER2_CITEM_INVALID;
1392 		if (nchain)
1393 			hammer2_chain_ref(nchain);
1394 		dropch[i] = ochain;
1395 	}
1396 
1397 	/*
1398 	 * Release any left-over chains in ip->cluster.
1399 	 */
1400 	while (i < ip->cluster.nchains) {
1401 		nchain = ip->cluster.array[i].chain;
1402 		if (nchain) {
1403 			ip->cluster.array[i].chain = NULL;
1404 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1405 		}
1406 		dropch[i] = nchain;
1407 		++i;
1408 	}
1409 
1410 	/*
1411 	 * Fixup fields.  Note that the inode-embedded cluster is never
1412 	 * directly locked.
1413 	 */
1414 	if (cluster) {
1415 		ip->cluster.nchains = cluster->nchains;
1416 		ip->cluster.focus = cluster->focus;
1417 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1418 	} else {
1419 		ip->cluster.nchains = 0;
1420 		ip->cluster.focus = NULL;
1421 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1422 	}
1423 
1424 	hammer2_spin_unex(&ip->cluster_spin);
1425 
1426 	/*
1427 	 * Cleanup outside of spinlock
1428 	 */
1429 	while (--i >= 0) {
1430 		if (dropch[i])
1431 			hammer2_chain_drop(dropch[i]);
1432 	}
1433 }
1434 
1435 /*
1436  * Repoint a single element from the cluster to the ip.  Used by the
1437  * synchronization threads to piecemeal update inodes.  Does not change
1438  * focus and requires inode to be re-locked to clean-up flags (XXX).
1439  */
1440 void
1441 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1442 			  int idx)
1443 {
1444 	hammer2_chain_t *ochain;
1445 	hammer2_chain_t *nchain;
1446 	int i;
1447 
1448 	hammer2_spin_ex(&ip->cluster_spin);
1449 	KKASSERT(idx < cluster->nchains);
1450 	if (idx < ip->cluster.nchains) {
1451 		ochain = ip->cluster.array[idx].chain;
1452 		nchain = cluster->array[idx].chain;
1453 	} else {
1454 		ochain = NULL;
1455 		nchain = cluster->array[idx].chain;
1456 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1457 			bzero(&ip->cluster.array[i],
1458 			      sizeof(ip->cluster.array[i]));
1459 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1460 		}
1461 		ip->cluster.nchains = idx + 1;
1462 	}
1463 	if (ochain != nchain) {
1464 		/*
1465 		 * Make adjustments.
1466 		 */
1467 		ip->cluster.array[idx].chain = nchain;
1468 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1469 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1470 						HAMMER2_CITEM_INVALID;
1471 	}
1472 	hammer2_spin_unex(&ip->cluster_spin);
1473 	if (ochain != nchain) {
1474 		if (nchain)
1475 			hammer2_chain_ref(nchain);
1476 		if (ochain)
1477 			hammer2_chain_drop(ochain);
1478 	}
1479 }
1480 
1481 hammer2_key_t
1482 hammer2_inode_data_count(const hammer2_inode_t *ip)
1483 {
1484 	hammer2_chain_t *chain;
1485 	hammer2_key_t count = 0;
1486 	int i;
1487 
1488 	for (i = 0; i < ip->cluster.nchains; ++i) {
1489 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1490 			if (count < chain->bref.embed.stats.data_count)
1491 				count = chain->bref.embed.stats.data_count;
1492 		}
1493 	}
1494 	return count;
1495 }
1496 
1497 hammer2_key_t
1498 hammer2_inode_inode_count(const hammer2_inode_t *ip)
1499 {
1500 	hammer2_chain_t *chain;
1501 	hammer2_key_t count = 0;
1502 	int i;
1503 
1504 	for (i = 0; i < ip->cluster.nchains; ++i) {
1505 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1506 			if (count < chain->bref.embed.stats.inode_count)
1507 				count = chain->bref.embed.stats.inode_count;
1508 		}
1509 	}
1510 	return count;
1511 }
1512 
1513 /*
1514  * Called with a locked inode to finish unlinking an inode after xop_unlink
1515  * had been run.  This function is responsible for decrementing nlinks.
1516  *
1517  * We don't bother decrementing nlinks if the file is not open and this was
1518  * the last link.
1519  *
1520  * If the inode is a hardlink target it's chain has not yet been deleted,
1521  * otherwise it's chain has been deleted.
1522  *
1523  * If isopen then any prior deletion was not permanent and the inode is
1524  * left intact with nlinks == 0;
1525  */
1526 int
1527 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1528 {
1529 	hammer2_pfs_t *pmp;
1530 	int error;
1531 
1532 	pmp = ip->pmp;
1533 
1534 	/*
1535 	 * Decrement nlinks.  If this is the last link and the file is
1536 	 * not open we can just delete the inode and not bother dropping
1537 	 * nlinks to 0 (avoiding unnecessary block updates).
1538 	 */
1539 	if (ip->meta.nlinks == 1) {
1540 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1541 		if (isopen == 0)
1542 			goto killit;
1543 	}
1544 
1545 	hammer2_inode_modify(ip);
1546 	--ip->meta.nlinks;
1547 	if ((int64_t)ip->meta.nlinks < 0)
1548 		ip->meta.nlinks = 0;	/* safety */
1549 
1550 	/*
1551 	 * If nlinks is not zero we are done.  However, this should only be
1552 	 * possible with a hardlink target.  If the inode is an embedded
1553 	 * hardlink nlinks should have dropped to zero, warn and proceed
1554 	 * with the next step.
1555 	 */
1556 	if (ip->meta.nlinks) {
1557 		if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1558 			return 0;
1559 		kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1560 			(intmax_t)ip->meta.nlinks);
1561 		return 0;
1562 	}
1563 
1564 	if (ip->vp)
1565 		hammer2_knote(ip->vp, NOTE_DELETE);
1566 
1567 	/*
1568 	 * nlinks is now an implied zero, delete the inode if not open.
1569 	 * We avoid unnecessary media updates by not bothering to actually
1570 	 * decrement nlinks for the 1->0 transition
1571 	 *
1572 	 * Put the inode on the sideq to ensure that any disconnected chains
1573 	 * get properly flushed (so they can be freed).  Defer the deletion
1574 	 * to the sync code, doing it now will desynchronize the inode from
1575 	 * related directory entries (which is bad).
1576 	 *
1577 	 * NOTE: killit can be reached without modifying the inode, so
1578 	 *	 make sure that it is on the SIDEQ.
1579 	 */
1580 	if (isopen == 0) {
1581 #if 0
1582 		hammer2_xop_destroy_t *xop;
1583 #endif
1584 
1585 killit:
1586 		atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
1587 		hammer2_inode_delayed_sideq(ip);
1588 #if 0
1589 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1590 		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1591 		error = hammer2_xop_collect(&xop->head, 0);
1592 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1593 #endif
1594 	}
1595 	error = 0;	/* XXX */
1596 
1597 	return error;
1598 }
1599 
1600 /*
1601  * Mark an inode as being modified, meaning that the caller will modify
1602  * ip->meta.
1603  *
1604  * If a vnode is present we set the vnode dirty and the nominal filesystem
1605  * sync will also handle synchronizing the inode meta-data.  Unless NOSIDEQ
1606  * we must ensure that the inode is on pmp->sideq.
1607  *
1608  * NOTE: We must always queue the inode to the sideq.  This allows H2 to
1609  *	 shortcut vsyncscan() and flush inodes and their related vnodes
1610  *	 in a two stages.  H2 still calls vfsync() for each vnode.
1611  *
1612  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1613  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1614  *	 later when the inode gets flushed.
1615  *
1616  * NOTE: As an exception to the general rule, the inode MAY be locked
1617  *	 shared for this particular call.
1618  */
1619 void
1620 hammer2_inode_modify(hammer2_inode_t *ip)
1621 {
1622 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1623 	if (ip->vp)
1624 		vsetisdirty(ip->vp);
1625 	if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
1626 		hammer2_inode_delayed_sideq(ip);
1627 }
1628 
1629 /*
1630  * Synchronize the inode's frontend state with the chain state prior
1631  * to any explicit flush of the inode or any strategy write call.  This
1632  * does not flush the inode's chain or its sub-topology to media (higher
1633  * level layers are responsible for doing that).
1634  *
1635  * Called with a locked inode inside a normal transaction.
1636  *
1637  * inode must be locked.
1638  */
1639 int
1640 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1641 {
1642 	int error;
1643 
1644 	error = 0;
1645 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1646 		hammer2_xop_fsync_t *xop;
1647 
1648 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1649 		xop->clear_directdata = 0;
1650 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1651 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1652 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1653 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1654 				xop->clear_directdata = 1;
1655 			}
1656 			xop->osize = ip->osize;
1657 		} else {
1658 			xop->osize = ip->meta.size;	/* safety */
1659 		}
1660 		xop->ipflags = ip->flags;
1661 		xop->meta = ip->meta;
1662 
1663 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1664 					     HAMMER2_INODE_MODIFIED);
1665 		hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
1666 		error = hammer2_xop_collect(&xop->head, 0);
1667 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1668 		if (error == HAMMER2_ERROR_ENOENT)
1669 			error = 0;
1670 		if (error) {
1671 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1672 			/*
1673 			atomic_set_int(&ip->flags,
1674 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1675 						       HAMMER2_INODE_MODIFIED));
1676 			*/
1677 			/* XXX return error somehow? */
1678 		}
1679 	}
1680 	return error;
1681 }
1682 
1683 /*
1684  * When an inode is flagged INODE_CREATING its chains have not actually
1685  * been inserting into the on-media tree yet.
1686  */
1687 int
1688 hammer2_inode_chain_ins(hammer2_inode_t *ip)
1689 {
1690 	int error;
1691 
1692 	error = 0;
1693 	if (ip->flags & HAMMER2_INODE_CREATING) {
1694 		hammer2_xop_create_t *xop;
1695 
1696 		atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
1697 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1698 		xop->lhc = ip->meta.inum;
1699 		xop->flags = 0;
1700 		hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
1701 		error = hammer2_xop_collect(&xop->head, 0);
1702 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1703 		if (error == HAMMER2_ERROR_ENOENT)
1704 			error = 0;
1705 		if (error) {
1706 			kprintf("hammer2: backend unable to "
1707 				"insert inode %p %ld\n", ip, ip->meta.inum);
1708 			/* XXX return error somehow? */
1709 		}
1710 	}
1711 	return error;
1712 }
1713 
1714 /*
1715  * When an inode is flagged INODE_DELETING it has been deleted (no directory
1716  * entry or open refs are left, though as an optimization H2 might leave
1717  * nlinks == 1 to avoid unnecessary block updates).  The backend flush then
1718  * needs to actually remove it from the topology.
1719  *
1720  * NOTE: backend flush must still sync and flush the deleted inode to clean
1721  *	 out related chains.
1722  *
1723  * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
1724  *	 to prevent the vnode reclaim code from trying to delete it twice.
1725  */
1726 int
1727 hammer2_inode_chain_des(hammer2_inode_t *ip)
1728 {
1729 	int error;
1730 
1731 	error = 0;
1732 	if (ip->flags & HAMMER2_INODE_DELETING) {
1733 		hammer2_xop_destroy_t *xop;
1734 
1735 		atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING |
1736 					     HAMMER2_INODE_ISUNLINKED);
1737 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1738 		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1739 		error = hammer2_xop_collect(&xop->head, 0);
1740 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1741 
1742 		if (error == HAMMER2_ERROR_ENOENT)
1743 			error = 0;
1744 		if (error) {
1745 			kprintf("hammer2: backend unable to "
1746 				"delete inode %p %ld\n", ip, ip->meta.inum);
1747 			/* XXX return error somehow? */
1748 		}
1749 	}
1750 	return error;
1751 }
1752 
1753 /*
1754  * Flushes the inode's chain and its sub-topology to media.  Interlocks
1755  * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush.  Any strategy
1756  * function creating or modifying a chain under this inode will re-set the
1757  * flag.
1758  *
1759  * inode must be locked.
1760  */
1761 int
1762 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
1763 {
1764 	hammer2_xop_fsync_t *xop;
1765 	int error;
1766 
1767 	atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1768 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
1769 	hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
1770 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1771 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1772 	if (error == HAMMER2_ERROR_ENOENT)
1773 		error = 0;
1774 
1775 	return error;
1776 }
1777