xref: /dragonfly/sys/vfs/hammer2/hammer2_inode.c (revision 60e242c5)
1 /*
2  * Copyright (c) 2011-2023 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41 #include <sys/vnode.h>
42 
43 #include "hammer2.h"
44 
45 #define INODE_DEBUG	0
46 
47 /*
48  * Initialize inum hash in fresh structure
49  */
50 void
51 hammer2_inum_hash_init(hammer2_pfs_t *pmp)
52 {
53 	hammer2_inum_hash_t *hash;
54 	int i;
55 
56 	for (i = 0; i < HAMMER2_INUMHASH_SIZE; ++i) {
57 		hash = &pmp->inumhash[i];
58 		hammer2_spin_init(&hash->spin, "h2inum");
59 	}
60 }
61 
62 /*
63  * Caller holds pmp->list_spin and the inode should be locked.  Merge ip
64  * with the specified depend.
65  *
66  * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
67  * that successive calls must ensure the ip is on a pass2 depend (or they are
68  * all SYNCQ).  If the passed-in depend is not NULL and not (void *)-1 then
69  * we can set pass2 on it and return.
70  *
71  * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
72  * a self-depend if necessary, and depend->pass2 is set according
73  * to the PASS2 flag.  SIDEQ is set.
74  */
75 static __noinline
76 hammer2_depend_t *
77 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
78 {
79 	hammer2_pfs_t *pmp = ip->pmp;
80 	hammer2_depend_t *dtmp;
81 	hammer2_inode_t *iptmp;
82 
83 	/*
84 	 * If ip is SYNCQ its entry is used for the syncq list and it will
85 	 * no longer be associated with a dependency.  Merging this status
86 	 * with a passed-in depend implies PASS2.
87 	 */
88 	if (ip->flags & HAMMER2_INODE_SYNCQ) {
89 		if (depend == (void *)-1 ||
90 		    depend == NULL) {
91 			return ((void *)-1);
92 		}
93 		depend->pass2 = 1;
94 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
95 
96 		return depend;
97 	}
98 
99 	/*
100 	 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
101 	 * If it is not, associate the ip with the passed-in depend, creating
102 	 * a single-entry dependency using depend_static if necessary.
103 	 *
104 	 * NOTE: The use of ip->depend_static always requires that the
105 	 *	 specific ip containing the structure is part of that
106 	 *	 particular depend_static's dependency group.
107 	 */
108 	if (ip->flags & HAMMER2_INODE_SIDEQ) {
109 		/*
110 		 * Merge ip->depend with the passed-in depend.  If the
111 		 * passed-in depend is not a special case, all ips associated
112 		 * with ip->depend (including the original ip) must be moved
113 		 * to the passed-in depend.
114 		 */
115 		if (depend == NULL) {
116 			depend = ip->depend;
117 		} else if (depend == (void *)-1) {
118 			depend = ip->depend;
119 			depend->pass2 = 1;
120 		} else if (depend != ip->depend) {
121 #ifdef INVARIANTS
122 			int sanitychk = 0;
123 #endif
124 			dtmp = ip->depend;
125 			while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
126 #ifdef INVARIANTS
127 				if (iptmp == ip)
128 					sanitychk = 1;
129 #endif
130 				TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
131 				TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
132 				iptmp->depend = depend;
133 			}
134 			KKASSERT(sanitychk == 1);
135 			depend->count += dtmp->count;
136 			depend->pass2 |= dtmp->pass2;
137 			TAILQ_REMOVE(&pmp->depq, dtmp, entry);
138 			dtmp->count = 0;
139 			dtmp->pass2 = 0;
140 		}
141 	} else {
142 		/*
143 		 * Add ip to the sideq, creating a self-dependency if
144 		 * necessary.
145 		 */
146 		hammer2_inode_ref(ip);
147 		atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
148 		if (depend == NULL) {
149 			depend = &ip->depend_static;
150 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
151 		} else if (depend == (void *)-1) {
152 			depend = &ip->depend_static;
153 			depend->pass2 = 1;
154 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
155 		} /* else add ip to passed-in depend */
156 		TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
157 		ip->depend = depend;
158 		++depend->count;
159 		++pmp->sideq_count;
160 	}
161 
162 	if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
163 		depend->pass2 = 1;
164 	if (depend->pass2)
165 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
166 
167 	return depend;
168 }
169 
170 /*
171  * Put a solo inode on the SIDEQ (meaning that its dirty).  This can also
172  * occur from inode_lock4() and inode_depend().
173  *
174  * Caller must pass-in a locked inode.
175  */
176 void
177 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
178 {
179 	hammer2_pfs_t *pmp = ip->pmp;
180 
181 	/*
182 	 * Optimize case to avoid pmp spinlock.
183 	 */
184 	if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
185 		hammer2_spin_ex(&pmp->list_spin);
186 		hammer2_inode_setdepend_locked(ip, NULL);
187 		hammer2_spin_unex(&pmp->list_spin);
188 	}
189 }
190 
191 /*
192  * Lock an inode, with SYNCQ semantics.
193  *
194  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
195  * flags for options:
196  *
197  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.
198  *	  shared locks are not subject to SYNCQ semantics, exclusive locks
199  *	  are.
200  *
201  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
202  *	  Most front-end inode locks do.
203  *
204  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
205  *	  the inode data be resolved.  This is used by the syncthr because
206  *	  it can run on an unresolved/out-of-sync cluster, and also by the
207  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
208  *	  disposing of hundreds of thousands of cached vnodes).
209  *
210  * This function, along with lock4, has SYNCQ semantics.  If the inode being
211  * locked is on the SYNCQ, that is it has been staged by the syncer, we must
212  * block until the operation is complete (even if we can lock the inode).  In
213  * order to reduce the stall time, we re-order the inode to the front of the
214  * pmp->syncq prior to blocking.  This reordering VERY significantly improves
215  * performance.
216  *
217  * The inode locking function locks the inode itself, resolves any stale
218  * chains in the inode's cluster, and allocates a fresh copy of the
219  * cluster with 1 ref and all the underlying chains locked.
220  *
221  * ip->cluster will be stable while the inode is locked.
222  *
223  * NOTE: We don't combine the inode/chain lock because putting away an
224  *       inode would otherwise confuse multiple lock holders of the inode.
225  */
226 void
227 hammer2_inode_lock(hammer2_inode_t *ip, int how)
228 {
229 	hammer2_pfs_t *pmp;
230 
231 	hammer2_inode_ref(ip);
232 	pmp = ip->pmp;
233 
234 	/*
235 	 * Inode structure mutex - Shared lock
236 	 */
237 	if (how & HAMMER2_RESOLVE_SHARED) {
238 		hammer2_mtx_sh(&ip->lock);
239 		return;
240 	}
241 
242 	/*
243 	 * Inode structure mutex - Exclusive lock
244 	 *
245 	 * An exclusive lock (if not recursive) must wait for inodes on
246 	 * SYNCQ to flush first, to ensure that meta-data dependencies such
247 	 * as the nlink count and related directory entries are not split
248 	 * across flushes.
249 	 *
250 	 * If the vnode is locked by the current thread it must be unlocked
251 	 * across the tsleep() to avoid a deadlock.
252 	 */
253 	hammer2_mtx_ex(&ip->lock);
254 	if (hammer2_mtx_refs(&ip->lock) > 1)
255 		return;
256 	while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
257 		hammer2_spin_ex(&pmp->list_spin);
258 		if (ip->flags & HAMMER2_INODE_SYNCQ) {
259 			tsleep_interlock(&ip->flags, 0);
260 			atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
261 			TAILQ_REMOVE(&pmp->syncq, ip, entry);
262 			TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
263 			hammer2_spin_unex(&pmp->list_spin);
264 			hammer2_mtx_unlock(&ip->lock);
265 			tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
266 			hammer2_mtx_ex(&ip->lock);
267 			continue;
268 		}
269 		hammer2_spin_unex(&pmp->list_spin);
270 		break;
271 	}
272 }
273 
274 /*
275  * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
276  * ip1 and ip2 must not be NULL.  ip3 and ip4 may be NULL, but if ip3 is
277  * NULL then ip4 must also be NULL.
278  *
279  * This creates a dependency between up to four inodes.
280  */
281 void
282 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
283 		    hammer2_inode_t *ip3, hammer2_inode_t *ip4)
284 {
285 	hammer2_inode_t *ips[4];
286 	hammer2_inode_t *iptmp;
287 	hammer2_inode_t *ipslp;
288 	hammer2_depend_t *depend;
289 	hammer2_pfs_t *pmp;
290 	size_t count;
291 	size_t i;
292 
293 	pmp = ip1->pmp;			/* may be NULL */
294 	KKASSERT(pmp == ip2->pmp);
295 
296 	ips[0] = ip1;
297 	ips[1] = ip2;
298 	if (ip3 == NULL) {
299 		count = 2;
300 	} else if (ip4 == NULL) {
301 		count = 3;
302 		ips[2] = ip3;
303 		KKASSERT(pmp == ip3->pmp);
304 	} else {
305 		count = 4;
306 		ips[2] = ip3;
307 		ips[3] = ip4;
308 		KKASSERT(pmp == ip3->pmp);
309 		KKASSERT(pmp == ip4->pmp);
310 	}
311 
312 	for (i = 0; i < count; ++i)
313 		hammer2_inode_ref(ips[i]);
314 
315 restart:
316 	/*
317 	 * Lock the inodes in order
318 	 */
319 	for (i = 0; i < count; ++i) {
320 		hammer2_mtx_ex(&ips[i]->lock);
321 	}
322 
323 	/*
324 	 * Associate dependencies, record the first inode found on SYNCQ
325 	 * (operation is allowed to proceed for inodes on PASS2) for our
326 	 * sleep operation, this inode is theoretically the last one sync'd
327 	 * in the sequence.
328 	 *
329 	 * All inodes found on SYNCQ are moved to the head of the syncq
330 	 * to reduce stalls.
331 	 */
332 	hammer2_spin_ex(&pmp->list_spin);
333 	depend = NULL;
334 	ipslp = NULL;
335 	for (i = 0; i < count; ++i) {
336 		iptmp = ips[i];
337 		depend = hammer2_inode_setdepend_locked(iptmp, depend);
338 		if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
339 			TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
340 			TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
341 			if (ipslp == NULL)
342 				ipslp = iptmp;
343 		}
344 	}
345 	hammer2_spin_unex(&pmp->list_spin);
346 
347 	/*
348 	 * Block and retry if any of the inodes are on SYNCQ.  It is
349 	 * important that we allow the operation to proceed in the
350 	 * PASS2 case, to avoid deadlocking against the vnode.
351 	 */
352 	if (ipslp) {
353 		for (i = 0; i < count; ++i)
354 			hammer2_mtx_unlock(&ips[i]->lock);
355 		tsleep(&ipslp->flags, 0, "h2sync", 2);
356 		goto restart;
357 	}
358 }
359 
360 /*
361  * Release an inode lock.  If another thread is blocked on SYNCQ_WAKEUP
362  * we wake them up.
363  */
364 void
365 hammer2_inode_unlock(hammer2_inode_t *ip)
366 {
367 	if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
368 		atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
369 		hammer2_mtx_unlock(&ip->lock);
370 		wakeup(&ip->flags);
371 	} else {
372 		hammer2_mtx_unlock(&ip->lock);
373 	}
374 	hammer2_inode_drop(ip);
375 }
376 
377 /*
378  * If either ip1 or ip2 have been tapped by the syncer, make sure that both
379  * are.  This ensure that dependencies (e.g. dirent-v-inode) are synced
380  * together.  For dirent-v-inode depends, pass the dirent as ip1.
381  *
382  * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
383  * single dependency.  Dependencies are entered into pmp->depq.  This
384  * effectively flags the inodes SIDEQ.
385  *
386  * Both ip1 and ip2 must be locked by the caller.  This also ensures
387  * that we can't race the end of the syncer's queue run.
388  */
389 void
390 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
391 {
392 	hammer2_pfs_t *pmp;
393 	hammer2_depend_t *depend;
394 
395 	pmp = ip1->pmp;
396 	hammer2_spin_ex(&pmp->list_spin);
397 	depend = hammer2_inode_setdepend_locked(ip1, NULL);
398 	depend = hammer2_inode_setdepend_locked(ip2, depend);
399 	hammer2_spin_unex(&pmp->list_spin);
400 }
401 
402 /*
403  * Select a chain out of an inode's cluster and lock it.
404  *
405  * The inode does not have to be locked.
406  */
407 hammer2_chain_t *
408 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
409 {
410 	hammer2_chain_t *chain;
411 	hammer2_cluster_t *cluster;
412 
413 	hammer2_spin_sh(&ip->cluster_spin);
414 	cluster = &ip->cluster;
415 	if (clindex >= cluster->nchains)
416 		chain = NULL;
417 	else
418 		chain = cluster->array[clindex].chain;
419 	if (chain) {
420 		hammer2_chain_ref(chain);
421 		hammer2_spin_unsh(&ip->cluster_spin);
422 		hammer2_chain_lock(chain, how);
423 	} else {
424 		hammer2_spin_unsh(&ip->cluster_spin);
425 	}
426 	return chain;
427 }
428 
429 hammer2_chain_t *
430 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
431 			       hammer2_chain_t **parentp, int how)
432 {
433 	hammer2_chain_t *chain;
434 	hammer2_chain_t *parent;
435 
436 	for (;;) {
437 		hammer2_spin_sh(&ip->cluster_spin);
438 		if (clindex >= ip->cluster.nchains)
439 			chain = NULL;
440 		else
441 			chain = ip->cluster.array[clindex].chain;
442 		if (chain) {
443 			hammer2_chain_ref(chain);
444 			hammer2_spin_unsh(&ip->cluster_spin);
445 			hammer2_chain_lock(chain, how);
446 		} else {
447 			hammer2_spin_unsh(&ip->cluster_spin);
448 		}
449 
450 		/*
451 		 * Get parent, lock order must be (parent, chain).
452 		 */
453 		parent = chain->parent;
454 		if (parent) {
455 			hammer2_chain_ref(parent);
456 			hammer2_chain_unlock(chain);
457 			hammer2_chain_lock(parent, how);
458 			hammer2_chain_lock(chain, how);
459 		}
460 		if (ip->cluster.array[clindex].chain == chain &&
461 		    chain->parent == parent) {
462 			break;
463 		}
464 
465 		/*
466 		 * Retry
467 		 */
468 		hammer2_chain_unlock(chain);
469 		hammer2_chain_drop(chain);
470 		if (parent) {
471 			hammer2_chain_unlock(parent);
472 			hammer2_chain_drop(parent);
473 		}
474 	}
475 	*parentp = parent;
476 
477 	return chain;
478 }
479 
480 /*
481  * Temporarily release a lock held shared or exclusive.  Caller must
482  * hold the lock shared or exclusive on call and lock will be released
483  * on return.
484  *
485  * Restore a lock that was temporarily released.
486  */
487 hammer2_mtx_state_t
488 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
489 {
490 	return hammer2_mtx_temp_release(&ip->lock);
491 }
492 
493 void
494 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
495 {
496 	hammer2_mtx_temp_restore(&ip->lock, ostate);
497 }
498 
499 /*
500  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
501  * is already held exclusively this is a NOP.
502  *
503  * The caller MUST hold the inode lock either shared or exclusive on call
504  * and will own the lock exclusively on return.
505  *
506  * Returns non-zero if the lock was already exclusive prior to the upgrade.
507  */
508 int
509 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
510 {
511 	int wasexclusive;
512 
513 	if (mtx_islocked_ex(&ip->lock)) {
514 		wasexclusive = 1;
515 	} else {
516 		hammer2_mtx_unlock(&ip->lock);
517 		hammer2_mtx_ex(&ip->lock);
518 		wasexclusive = 0;
519 	}
520 	return wasexclusive;
521 }
522 
523 /*
524  * Downgrade an inode lock from exclusive to shared only if the inode
525  * lock was previously shared.  If the inode lock was previously exclusive,
526  * this is a NOP.
527  */
528 void
529 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
530 {
531 	if (wasexclusive == 0)
532 		hammer2_mtx_downgrade(&ip->lock);
533 }
534 
535 static __inline hammer2_inum_hash_t *
536 inumhash(hammer2_pfs_t *pmp, hammer2_tid_t inum)
537 {
538 	int hv;
539 
540 	hv = (int)inum;
541 	return (&pmp->inumhash[hv & HAMMER2_INUMHASH_MASK]);
542 }
543 
544 
545 /*
546  * Lookup an inode by inode number
547  */
548 hammer2_inode_t *
549 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
550 {
551 	hammer2_inum_hash_t *hash;
552 	hammer2_inode_t *ip;
553 
554 	KKASSERT(pmp);
555 	if (pmp->spmp_hmp) {
556 		ip = NULL;
557 	} else {
558 		hash = inumhash(pmp, inum);
559 		hammer2_spin_sh(&hash->spin);
560 		for (ip = hash->base; ip; ip = ip->next) {
561 			if (ip->meta.inum == inum) {
562 				hammer2_inode_ref(ip);
563 				break;
564 			}
565 		}
566 		hammer2_spin_unsh(&hash->spin);
567 	}
568 	return(ip);
569 }
570 
571 /*
572  * Adding a ref to an inode is only legal if the inode already has at least
573  * one ref.
574  *
575  * (can be called with spinlock held)
576  */
577 void
578 hammer2_inode_ref(hammer2_inode_t *ip)
579 {
580 	atomic_add_int(&ip->refs, 1);
581 	if (hammer2_debug & 0x80000) {
582 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
583 		print_backtrace(8);
584 	}
585 }
586 
587 /*
588  * Drop an inode reference, freeing the inode when the last reference goes
589  * away.
590  */
591 void
592 hammer2_inode_drop(hammer2_inode_t *ip)
593 {
594 	hammer2_pfs_t *pmp;
595 	u_int refs;
596 
597 	while (ip) {
598 		if (hammer2_debug & 0x80000) {
599 			kprintf("INODE-1 %p (%d->%d)\n",
600 				ip, ip->refs, ip->refs - 1);
601 			print_backtrace(8);
602 		}
603 		refs = ip->refs;
604 		cpu_ccfence();
605 		if (refs == 1) {
606 			/*
607 			 * Transition to zero, must interlock with
608 			 * the inode inumber lookup tree (if applicable).
609 			 * It should not be possible for anyone to race
610 			 * the transition to 0.
611 			 */
612 			hammer2_inum_hash_t *hash;
613 			hammer2_inode_t **xipp;
614 
615 			pmp = ip->pmp;
616 			KKASSERT(pmp);
617 			hash = inumhash(pmp, ip->meta.inum);
618 
619 			hammer2_spin_ex(&hash->spin);
620 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
621 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
622 				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
623 					xipp = &hash->base;
624 					while (*xipp != ip)
625 						xipp = &(*xipp)->next;
626 					*xipp = ip->next;
627 					ip->next = NULL;
628 					atomic_add_long(&pmp->inum_count, -1);
629 					atomic_clear_int(&ip->flags,
630 						     HAMMER2_INODE_ONRBTREE);
631 				}
632 				hammer2_spin_unex(&hash->spin);
633 
634 				ip->pmp = NULL;
635 
636 				/*
637 				 * Cleaning out ip->cluster isn't entirely
638 				 * trivial.
639 				 */
640 				hammer2_inode_repoint(ip, NULL);
641 
642 				kfree_obj(ip, pmp->minode);
643 				atomic_add_long(&pmp->inmem_inodes, -1);
644 				ip = NULL;	/* will terminate loop */
645 			} else {
646 				hammer2_spin_unex(&hash->spin);
647 			}
648 		} else {
649 			/*
650 			 * Non zero transition
651 			 */
652 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
653 				break;
654 		}
655 	}
656 }
657 
658 /*
659  * Get the vnode associated with the given inode, allocating the vnode if
660  * necessary.  The vnode will be returned exclusively locked.
661  *
662  * *errorp is set to a UNIX error, not a HAMMER2 error.
663  *
664  * The caller must lock the inode (shared or exclusive).
665  *
666  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
667  * races.
668  */
669 struct vnode *
670 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
671 {
672 	hammer2_pfs_t *pmp;
673 	struct vnode *vp;
674 
675 	pmp = ip->pmp;
676 	KKASSERT(pmp != NULL);
677 	*errorp = 0;
678 
679 	for (;;) {
680 		/*
681 		 * Attempt to reuse an existing vnode assignment.  It is
682 		 * possible to race a reclaim so the vget() may fail.  The
683 		 * inode must be unlocked during the vget() to avoid a
684 		 * deadlock against a reclaim.
685 		 */
686 		int wasexclusive;
687 
688 		vp = ip->vp;
689 		if (vp) {
690 			/*
691 			 * Inode must be unlocked during the vget() to avoid
692 			 * possible deadlocks, but leave the ip ref intact.
693 			 *
694 			 * vnode is held to prevent destruction during the
695 			 * vget().  The vget() can still fail if we lost
696 			 * a reclaim race on the vnode.
697 			 */
698 			hammer2_mtx_state_t ostate;
699 
700 			vhold(vp);
701 			ostate = hammer2_inode_lock_temp_release(ip);
702 			if (vget(vp, LK_EXCLUSIVE)) {
703 				vdrop(vp);
704 				hammer2_inode_lock_temp_restore(ip, ostate);
705 				continue;
706 			}
707 			hammer2_inode_lock_temp_restore(ip, ostate);
708 			vdrop(vp);
709 			/* vp still locked and ref from vget */
710 			if (ip->vp != vp) {
711 				kprintf("hammer2: igetv race %p/%p\n",
712 					ip->vp, vp);
713 				vput(vp);
714 				continue;
715 			}
716 			*errorp = 0;
717 			break;
718 		}
719 
720 		/*
721 		 * No vnode exists, allocate a new vnode.  Beware of
722 		 * allocation races.  This function will return an
723 		 * exclusively locked and referenced vnode.
724 		 */
725 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
726 		if (*errorp) {
727 			kprintf("hammer2: igetv getnewvnode failed %d\n",
728 				*errorp);
729 			vp = NULL;
730 			break;
731 		}
732 
733 		/*
734 		 * Lock the inode and check for an allocation race.
735 		 */
736 		wasexclusive = hammer2_inode_lock_upgrade(ip);
737 		if (ip->vp != NULL) {
738 			vp->v_type = VBAD;
739 			vx_put(vp);
740 			hammer2_inode_lock_downgrade(ip, wasexclusive);
741 			continue;
742 		}
743 
744 		switch (ip->meta.type) {
745 		case HAMMER2_OBJTYPE_DIRECTORY:
746 			vp->v_type = VDIR;
747 			break;
748 		case HAMMER2_OBJTYPE_REGFILE:
749 			/*
750 			 * Regular file must use buffer cache I/O
751 			 * (VKVABIO cpu sync semantics supported)
752 			 */
753 			vp->v_type = VREG;
754 			vsetflags(vp, VKVABIO);
755 			vinitvmio(vp, ip->meta.size,
756 				  HAMMER2_LBUFSIZE,
757 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
758 			break;
759 		case HAMMER2_OBJTYPE_SOFTLINK:
760 			/*
761 			 * XXX for now we are using the generic file_read
762 			 * and file_write code so we need a buffer cache
763 			 * association.
764 			 *
765 			 * (VKVABIO cpu sync semantics supported)
766 			 */
767 			vp->v_type = VLNK;
768 			vsetflags(vp, VKVABIO);
769 			vinitvmio(vp, ip->meta.size,
770 				  HAMMER2_LBUFSIZE,
771 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
772 			break;
773 		case HAMMER2_OBJTYPE_CDEV:
774 			vp->v_type = VCHR;
775 			/* fall through */
776 		case HAMMER2_OBJTYPE_BDEV:
777 			vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
778 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
779 				vp->v_type = VBLK;
780 			addaliasu(vp,
781 				  ip->meta.rmajor,
782 				  ip->meta.rminor);
783 			break;
784 		case HAMMER2_OBJTYPE_FIFO:
785 			vp->v_type = VFIFO;
786 			vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
787 			break;
788 		case HAMMER2_OBJTYPE_SOCKET:
789 			vp->v_type = VSOCK;
790 			break;
791 		default:
792 			panic("hammer2: unhandled objtype %d",
793 			      ip->meta.type);
794 			break;
795 		}
796 
797 		if (ip == pmp->iroot)
798 			vsetflags(vp, VROOT);
799 
800 		vp->v_data = ip;
801 		ip->vp = vp;
802 		hammer2_inode_ref(ip);		/* vp association */
803 		hammer2_inode_lock_downgrade(ip, wasexclusive);
804 		vx_downgrade(vp);
805 		break;
806 	}
807 
808 	/*
809 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
810 	 */
811 	if (hammer2_debug & 0x0002) {
812 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
813 			vp, vp->v_refcnt, vp->v_auxrefs);
814 	}
815 	return (vp);
816 }
817 
818 /*
819  * XXX this API needs a rewrite.  It needs to be split into a
820  * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
821  * rid of the inode/chain lock reversal fudge.
822  *
823  * Returns the inode associated with the passed-in cluster, allocating a new
824  * hammer2_inode structure if necessary, then synchronizing it to the passed
825  * xop cluster.  When synchronizing, if idx >= 0, only cluster index (idx)
826  * is synchronized.  Otherwise the whole cluster is synchronized.  inum will
827  * be extracted from the passed-in xop and the inum argument will be ignored.
828  *
829  * If xop is passed as NULL then a new hammer2_inode is allocated with the
830  * specified inum, and returned.   For normal inodes, the inode will be
831  * indexed in memory and if it already exists the existing ip will be
832  * returned instead of allocating a new one.  The superroot and PFS inodes
833  * are not indexed in memory.
834  *
835  * The passed-in cluster must be locked and will remain locked on return.
836  * The returned inode will be locked and the caller may dispose of both
837  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
838  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
839  *
840  * The hammer2_inode structure regulates the interface between the high level
841  * kernel VNOPS API and the filesystem backend (the chains).
842  *
843  * On return the inode is locked with the supplied cluster.
844  */
845 hammer2_inode_t *
846 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
847 		  hammer2_tid_t inum, int idx)
848 {
849 	hammer2_inode_t *nip;
850 	const hammer2_inode_data_t *iptmp;
851 	const hammer2_inode_data_t *nipdata;
852 
853 	KKASSERT(xop == NULL ||
854 		 hammer2_cluster_type(&xop->cluster) ==
855 		 HAMMER2_BREF_TYPE_INODE);
856 	KKASSERT(pmp);
857 
858 	/*
859 	 * Interlocked lookup/ref of the inode.  This code is only needed
860 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
861 	 * otherwise and test for duplicates).
862 	 *
863 	 * Cluster can be NULL during the initial pfs allocation.
864 	 */
865 	if (xop) {
866 		iptmp = &hammer2_xop_gdata(xop)->ipdata;
867 		inum = iptmp->meta.inum;
868 		hammer2_xop_pdata(xop);
869 	}
870 again:
871 	nip = hammer2_inode_lookup(pmp, inum);
872 	if (nip) {
873 		/*
874 		 * We may have to unhold the cluster to avoid a deadlock
875 		 * against vnlru (and possibly other XOPs).
876 		 */
877 		if (xop) {
878 			if (hammer2_mtx_ex_try(&nip->lock) != 0) {
879 				hammer2_cluster_unhold(&xop->cluster);
880 				hammer2_mtx_ex(&nip->lock);
881 				hammer2_cluster_rehold(&xop->cluster);
882 			}
883 		} else {
884 			hammer2_mtx_ex(&nip->lock);
885 		}
886 
887 		/*
888 		 * Handle SMP race (not applicable to the super-root spmp
889 		 * which can't index inodes due to duplicative inode numbers).
890 		 */
891 		if (pmp->spmp_hmp == NULL &&
892 		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
893 			hammer2_mtx_unlock(&nip->lock);
894 			hammer2_inode_drop(nip);
895 			goto again;
896 		}
897 		if (xop) {
898 			if (idx >= 0)
899 				hammer2_inode_repoint_one(nip, &xop->cluster,
900 							  idx);
901 			else
902 				hammer2_inode_repoint(nip, &xop->cluster);
903 		}
904 		return nip;
905 	}
906 
907 	/*
908 	 * We couldn't find the inode number, create a new inode and try to
909 	 * insert it, handle insertion races.
910 	 */
911 	nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
912 	hammer2_spin_init(&nip->cluster_spin, "h2clspin");
913 	atomic_add_long(&pmp->inmem_inodes, 1);
914 
915 	/*
916 	 * Initialize nip's cluster.  A cluster is provided for normal
917 	 * inodes but typically not for the super-root or PFS inodes.
918 	 */
919 	{
920 		hammer2_inode_t *nnip = nip;
921 		nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip));
922 	}
923 
924 	nip->cluster.refs = 1;
925 	nip->cluster.pmp = pmp;
926 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
927 	if (xop) {
928 		nipdata = &hammer2_xop_gdata(xop)->ipdata;
929 		nip->meta = nipdata->meta;
930 		hammer2_xop_pdata(xop);
931 		hammer2_inode_repoint(nip, &xop->cluster);
932 	} else {
933 		nip->meta.inum = inum;		/* PFS inum is always 1 XXX */
934 		/* mtime will be updated when a cluster is available */
935 	}
936 
937 	nip->pmp = pmp;
938 
939 	/*
940 	 * ref and lock on nip gives it state compatible to after a
941 	 * hammer2_inode_lock() call.
942 	 */
943 	nip->refs = 1;
944 	hammer2_mtx_init(&nip->lock, "h2inode");
945 	hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
946 	hammer2_mtx_ex(&nip->lock);
947 	TAILQ_INIT(&nip->depend_static.sideq);
948 	/* combination of thread lock and chain lock == inode lock */
949 
950 	/*
951 	 * Attempt to add the inode.  If it fails we raced another inode
952 	 * get.  Undo all the work and try again.
953 	 */
954 	if (pmp->spmp_hmp == NULL) {
955 		hammer2_inum_hash_t *hash;
956 		hammer2_inode_t *xip;
957 		hammer2_inode_t **xipp;
958 
959 		hash = inumhash(pmp, nip->meta.inum);
960 		hammer2_spin_ex(&hash->spin);
961 		for (xipp = &hash->base;
962 		     (xip = *xipp) != NULL;
963 		     xipp = &xip->next)
964 		{
965 			if (xip->meta.inum == nip->meta.inum) {
966 				hammer2_spin_unex(&hash->spin);
967 				hammer2_mtx_unlock(&nip->lock);
968 				hammer2_inode_drop(nip);
969 				goto again;
970 			}
971 		}
972 		nip->next = NULL;
973 		*xipp = nip;
974 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
975 		atomic_add_long(&pmp->inum_count, 1);
976 		hammer2_spin_unex(&hash->spin);
977 	}
978 	return (nip);
979 }
980 
981 /*
982  * Create a PFS inode under the superroot.  This function will create the
983  * inode, its media chains, and also insert it into the media.
984  *
985  * Caller must be in a flush transaction because we are inserting the inode
986  * onto the media.
987  */
988 hammer2_inode_t *
989 hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
990 		     const char *name, size_t name_len,
991 		     int *errorp)
992 {
993 	hammer2_xop_create_t *xop;
994 	hammer2_inode_t *pip;
995 	hammer2_inode_t *nip;
996 	int error;
997 	uint8_t pip_comp_algo;
998 	uint8_t pip_check_algo;
999 	hammer2_tid_t pip_inum;
1000 	hammer2_key_t lhc;
1001 
1002 	pip = spmp->iroot;
1003 	nip = NULL;
1004 
1005 	lhc = hammer2_dirhash(name, name_len);
1006 	*errorp = 0;
1007 
1008 	/*
1009 	 * Locate the inode or indirect block to create the new
1010 	 * entry in.  At the same time check for key collisions
1011 	 * and iterate until we don't get one.
1012 	 *
1013 	 * Lock the directory exclusively for now to guarantee that
1014 	 * we can find an unused lhc for the name.  Due to collisions,
1015 	 * two different creates can end up with the same lhc so we
1016 	 * cannot depend on the OS to prevent the collision.
1017 	 */
1018 	hammer2_inode_lock(pip, 0);
1019 
1020 	pip_comp_algo = pip->meta.comp_algo;
1021 	pip_check_algo = pip->meta.check_algo;
1022 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1023 
1024 	/*
1025 	 * Locate an unused key in the collision space.
1026 	 */
1027 	{
1028 		hammer2_xop_scanlhc_t *sxop;
1029 		hammer2_key_t lhcbase;
1030 
1031 		lhcbase = lhc;
1032 		sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1033 		sxop->lhc = lhc;
1034 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1035 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1036 			if (lhc != sxop->head.cluster.focus->bref.key)
1037 				break;
1038 			++lhc;
1039 		}
1040 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1041 
1042 		if (error) {
1043 			if (error != HAMMER2_ERROR_ENOENT)
1044 				goto done2;
1045 			++lhc;
1046 			error = 0;
1047 		}
1048 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1049 			error = HAMMER2_ERROR_ENOSPC;
1050 			goto done2;
1051 		}
1052 	}
1053 
1054 	/*
1055 	 * Create the inode with the lhc as the key.
1056 	 */
1057 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1058 	xop->lhc = lhc;
1059 	xop->flags = HAMMER2_INSERT_PFSROOT;
1060 	bzero(&xop->meta, sizeof(xop->meta));
1061 
1062 	xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1063 	xop->meta.inum = 1;
1064 	xop->meta.iparent = pip_inum;
1065 
1066 	/* Inherit parent's inode compression mode. */
1067 	xop->meta.comp_algo = pip_comp_algo;
1068 	xop->meta.check_algo = pip_check_algo;
1069 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1070 	hammer2_update_time(&xop->meta.ctime);
1071 	xop->meta.mtime = xop->meta.ctime;
1072 	xop->meta.mode = 0755;
1073 	xop->meta.nlinks = 1;
1074 
1075 	/*
1076 	 * Regular files and softlinks allow a small amount of data to be
1077 	 * directly embedded in the inode.  This flag will be cleared if
1078 	 * the size is extended past the embedded limit.
1079 	 */
1080 	if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1081 	    xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1082 		xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1083 	}
1084 	hammer2_xop_setname(&xop->head, name, name_len);
1085 	xop->meta.name_len = name_len;
1086 	xop->meta.name_key = lhc;
1087 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1088 
1089 	hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
1090 
1091 	error = hammer2_xop_collect(&xop->head, 0);
1092 #if INODE_DEBUG
1093 	kprintf("CREATE INODE %*.*s\n",
1094 		(int)name_len, (int)name_len, name);
1095 #endif
1096 
1097 	if (error) {
1098 		*errorp = error;
1099 		goto done;
1100 	}
1101 
1102 	/*
1103 	 * Set up the new inode if not a hardlink pointer.
1104 	 *
1105 	 * NOTE: *_get() integrates chain's lock into the inode lock.
1106 	 *
1107 	 * NOTE: Only one new inode can currently be created per
1108 	 *	 transaction.  If the need arises we can adjust
1109 	 *	 hammer2_trans_init() to allow more.
1110 	 *
1111 	 * NOTE: nipdata will have chain's blockset data.
1112 	 */
1113 	nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
1114 	nip->comp_heuristic = 0;
1115 done:
1116 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1117 done2:
1118 	hammer2_inode_unlock(pip);
1119 
1120 	return (nip);
1121 }
1122 
1123 /*
1124  * Create a new, normal inode.  This function will create the inode,
1125  * the media chains, but will not insert the chains onto the media topology
1126  * (doing so would require a flush transaction and cause long stalls).
1127  *
1128  * Caller must be in a normal transaction.
1129  */
1130 hammer2_inode_t *
1131 hammer2_inode_create_normal(hammer2_inode_t *pip,
1132 			    struct vattr *vap, struct ucred *cred,
1133 			    hammer2_key_t inum, int *errorp)
1134 {
1135 	hammer2_xop_create_t *xop;
1136 	hammer2_inode_t *dip;
1137 	hammer2_inode_t *nip;
1138 	int error;
1139 	uid_t xuid;
1140 	uuid_t pip_uid;
1141 	uuid_t pip_gid;
1142 	uint32_t pip_mode;
1143 	uint8_t pip_comp_algo;
1144 	uint8_t pip_check_algo;
1145 	hammer2_tid_t pip_inum;
1146 
1147 	dip = pip->pmp->iroot;
1148 	KKASSERT(dip != NULL);
1149 
1150 	*errorp = 0;
1151 
1152 	/*hammer2_inode_lock(dip, 0);*/
1153 
1154 	pip_uid = pip->meta.uid;
1155 	pip_gid = pip->meta.gid;
1156 	pip_mode = pip->meta.mode;
1157 	pip_comp_algo = pip->meta.comp_algo;
1158 	pip_check_algo = pip->meta.check_algo;
1159 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1160 
1161 	/*
1162 	 * Create the in-memory hammer2_inode structure for the specified
1163 	 * inode.
1164 	 */
1165 	nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
1166 	nip->comp_heuristic = 0;
1167 	KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
1168 		 nip->cluster.nchains == 0);
1169 	atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
1170 
1171 	/*
1172 	 * Setup the inode meta-data
1173 	 */
1174 	nip->meta.type = hammer2_get_obj_type(vap->va_type);
1175 
1176 	switch (nip->meta.type) {
1177 	case HAMMER2_OBJTYPE_CDEV:
1178 	case HAMMER2_OBJTYPE_BDEV:
1179 		nip->meta.rmajor = vap->va_rmajor;
1180 		nip->meta.rminor = vap->va_rminor;
1181 		break;
1182 	default:
1183 		break;
1184 	}
1185 
1186 	KKASSERT(nip->meta.inum == inum);
1187 	nip->meta.iparent = pip_inum;
1188 
1189 	/* Inherit parent's inode compression mode. */
1190 	nip->meta.comp_algo = pip_comp_algo;
1191 	nip->meta.check_algo = pip_check_algo;
1192 	nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1193 	hammer2_update_time(&nip->meta.ctime);
1194 	nip->meta.mtime = nip->meta.ctime;
1195 	nip->meta.mode = vap->va_mode;
1196 	nip->meta.nlinks = 1;
1197 
1198 	xuid = hammer2_to_unix_xid(&pip_uid);
1199 	xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
1200 				     xuid, cred,
1201 				     &vap->va_mode);
1202 	if (vap->va_vaflags & VA_UID_UUID_VALID)
1203 		nip->meta.uid = vap->va_uid_uuid;
1204 	else if (vap->va_uid != (uid_t)VNOVAL)
1205 		hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
1206 	else
1207 		hammer2_guid_to_uuid(&nip->meta.uid, xuid);
1208 
1209 	if (vap->va_vaflags & VA_GID_UUID_VALID)
1210 		nip->meta.gid = vap->va_gid_uuid;
1211 	else if (vap->va_gid != (gid_t)VNOVAL)
1212 		hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
1213 	else
1214 		nip->meta.gid = pip_gid;
1215 
1216 	/*
1217 	 * Regular files and softlinks allow a small amount of data to be
1218 	 * directly embedded in the inode.  This flag will be cleared if
1219 	 * the size is extended past the embedded limit.
1220 	 */
1221 	if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1222 	    nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1223 		nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1224 	}
1225 
1226 	/*
1227 	 * Create the inode using (inum) as the key.  Pass pip for
1228 	 * method inheritance.
1229 	 */
1230 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1231 	xop->lhc = inum;
1232 	xop->flags = 0;
1233 	xop->meta = nip->meta;
1234 	KKASSERT(vap);
1235 
1236 	xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
1237 	xop->meta.name_key = inum;
1238 	nip->meta.name_len = xop->meta.name_len;
1239 	nip->meta.name_key = xop->meta.name_key;
1240 	hammer2_inode_modify(nip);
1241 
1242 	/*
1243 	 * Create the inode media chains but leave them detached.  We are
1244 	 * not in a flush transaction so we can't mess with media topology
1245 	 * above normal inodes (i.e. the index of the inodes themselves).
1246 	 *
1247 	 * We've already set the INODE_CREATING flag.  The inode's media
1248 	 * chains will be inserted onto the media topology on the next
1249 	 * filesystem sync.
1250 	 */
1251 	hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
1252 
1253 	error = hammer2_xop_collect(&xop->head, 0);
1254 #if INODE_DEBUG
1255 	kprintf("create inode type %d error %d\n", nip->meta.type, error);
1256 #endif
1257 
1258 	if (error) {
1259 		*errorp = error;
1260 		goto done;
1261 	}
1262 
1263 	/*
1264 	 * Associate the media chains created by the backend with the
1265 	 * frontend inode.
1266 	 */
1267 	hammer2_inode_repoint(nip, &xop->head.cluster);
1268 done:
1269 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1270 	/*hammer2_inode_unlock(dip);*/
1271 
1272 	return (nip);
1273 }
1274 
1275 /*
1276  * Create a directory entry under dip with the specified name, inode number,
1277  * and OBJTYPE (type).
1278  *
1279  * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1280  *
1281  * Caller must hold dip locked.
1282  */
1283 int
1284 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
1285 		      hammer2_key_t inum, uint8_t type)
1286 {
1287 	hammer2_xop_mkdirent_t *xop;
1288 	hammer2_key_t lhc;
1289 	int error;
1290 
1291 	lhc = 0;
1292 	error = 0;
1293 
1294 	KKASSERT(name != NULL);
1295 	lhc = hammer2_dirhash(name, name_len);
1296 
1297 	/*
1298 	 * Locate the inode or indirect block to create the new
1299 	 * entry in.  At the same time check for key collisions
1300 	 * and iterate until we don't get one.
1301 	 *
1302 	 * Lock the directory exclusively for now to guarantee that
1303 	 * we can find an unused lhc for the name.  Due to collisions,
1304 	 * two different creates can end up with the same lhc so we
1305 	 * cannot depend on the OS to prevent the collision.
1306 	 */
1307 	hammer2_inode_modify(dip);
1308 
1309 	/*
1310 	 * If name specified, locate an unused key in the collision space.
1311 	 * Otherwise use the passed-in lhc directly.
1312 	 */
1313 	{
1314 		hammer2_xop_scanlhc_t *sxop;
1315 		hammer2_key_t lhcbase;
1316 
1317 		lhcbase = lhc;
1318 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1319 		sxop->lhc = lhc;
1320 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1321 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1322 			if (lhc != sxop->head.cluster.focus->bref.key)
1323 				break;
1324 			++lhc;
1325 		}
1326 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1327 
1328 		if (error) {
1329 			if (error != HAMMER2_ERROR_ENOENT)
1330 				goto done2;
1331 			++lhc;
1332 			error = 0;
1333 		}
1334 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1335 			error = HAMMER2_ERROR_ENOSPC;
1336 			goto done2;
1337 		}
1338 	}
1339 
1340 	/*
1341 	 * Create the directory entry with the lhc as the key.
1342 	 */
1343 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1344 	xop->lhc = lhc;
1345 	bzero(&xop->dirent, sizeof(xop->dirent));
1346 	xop->dirent.inum = inum;
1347 	xop->dirent.type = type;
1348 	xop->dirent.namlen = name_len;
1349 
1350 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1351 	hammer2_xop_setname(&xop->head, name, name_len);
1352 
1353 	hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
1354 
1355 	error = hammer2_xop_collect(&xop->head, 0);
1356 
1357 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1358 done2:
1359 	error = hammer2_error_to_errno(error);
1360 
1361 	return error;
1362 }
1363 
1364 /*
1365  * Repoint ip->cluster's chains to cluster's chains and fixup the default
1366  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
1367  * filters out invalid or non-matching elements.
1368  *
1369  * Caller must hold the inode and cluster exclusive locked, if not NULL,
1370  * must also be locked.
1371  *
1372  * Cluster may be NULL to clean out any chains in ip->cluster.
1373  */
1374 void
1375 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
1376 {
1377 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1378 	hammer2_chain_t *ochain;
1379 	hammer2_chain_t *nchain;
1380 	int i;
1381 
1382 	bzero(dropch, sizeof(dropch));
1383 
1384 	/*
1385 	 * Drop any cached (typically data) chains related to this inode
1386 	 */
1387 	hammer2_spin_ex(&ip->cluster_spin);
1388 	for (i = 0; i < ip->ccache_nchains; ++i) {
1389 		dropch[i] = ip->ccache[i].chain;
1390 		ip->ccache[i].flags = 0;
1391 		ip->ccache[i].chain = NULL;
1392 	}
1393 	ip->ccache_nchains = 0;
1394 	hammer2_spin_unex(&ip->cluster_spin);
1395 
1396 	while (--i >= 0) {
1397 		if (dropch[i]) {
1398 			hammer2_chain_drop(dropch[i]);
1399 			dropch[i] = NULL;
1400 		}
1401 	}
1402 
1403 	/*
1404 	 * Replace chains in ip->cluster with chains from cluster and
1405 	 * adjust the focus if necessary.
1406 	 *
1407 	 * NOTE: nchain and/or ochain can be NULL due to gaps
1408 	 *	 in the cluster arrays.
1409 	 */
1410 	hammer2_spin_ex(&ip->cluster_spin);
1411 	for (i = 0; cluster && i < cluster->nchains; ++i) {
1412 		/*
1413 		 * Do not replace elements which are the same.  Also handle
1414 		 * element count discrepancies.
1415 		 */
1416 		nchain = cluster->array[i].chain;
1417 		if (i < ip->cluster.nchains) {
1418 			ochain = ip->cluster.array[i].chain;
1419 			if (ochain == nchain)
1420 				continue;
1421 		} else {
1422 			ochain = NULL;
1423 		}
1424 
1425 		/*
1426 		 * Make adjustments
1427 		 */
1428 		ip->cluster.array[i].chain = nchain;
1429 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1430 		ip->cluster.array[i].flags |= cluster->array[i].flags &
1431 					      HAMMER2_CITEM_INVALID;
1432 		if (nchain)
1433 			hammer2_chain_ref(nchain);
1434 		dropch[i] = ochain;
1435 	}
1436 
1437 	/*
1438 	 * Release any left-over chains in ip->cluster.
1439 	 */
1440 	while (i < ip->cluster.nchains) {
1441 		nchain = ip->cluster.array[i].chain;
1442 		if (nchain) {
1443 			ip->cluster.array[i].chain = NULL;
1444 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1445 		}
1446 		dropch[i] = nchain;
1447 		++i;
1448 	}
1449 
1450 	/*
1451 	 * Fixup fields.  Note that the inode-embedded cluster is never
1452 	 * directly locked.
1453 	 */
1454 	if (cluster) {
1455 		ip->cluster.nchains = cluster->nchains;
1456 		ip->cluster.focus = cluster->focus;
1457 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1458 	} else {
1459 		ip->cluster.nchains = 0;
1460 		ip->cluster.focus = NULL;
1461 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1462 	}
1463 
1464 	hammer2_spin_unex(&ip->cluster_spin);
1465 
1466 	/*
1467 	 * Cleanup outside of spinlock
1468 	 */
1469 	while (--i >= 0) {
1470 		if (dropch[i])
1471 			hammer2_chain_drop(dropch[i]);
1472 	}
1473 }
1474 
1475 /*
1476  * Repoint a single element from the cluster to the ip.  Used by the
1477  * synchronization threads to piecemeal update inodes.  Does not change
1478  * focus and requires inode to be re-locked to clean-up flags (XXX).
1479  */
1480 void
1481 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1482 			  int idx)
1483 {
1484 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1485 	hammer2_chain_t *ochain;
1486 	hammer2_chain_t *nchain;
1487 	int i;
1488 
1489 	/*
1490 	 * Drop any cached (typically data) chains related to this inode
1491 	 */
1492 	hammer2_spin_ex(&ip->cluster_spin);
1493 	for (i = 0; i < ip->ccache_nchains; ++i) {
1494 		dropch[i] = ip->ccache[i].chain;
1495 		ip->ccache[i].chain = NULL;
1496 	}
1497 	ip->ccache_nchains = 0;
1498 	hammer2_spin_unex(&ip->cluster_spin);
1499 
1500 	while (--i >= 0) {
1501 		if (dropch[i])
1502 			hammer2_chain_drop(dropch[i]);
1503 	}
1504 
1505 	/*
1506 	 * Replace inode chain at index
1507 	 */
1508 	hammer2_spin_ex(&ip->cluster_spin);
1509 	KKASSERT(idx < cluster->nchains);
1510 	if (idx < ip->cluster.nchains) {
1511 		ochain = ip->cluster.array[idx].chain;
1512 		nchain = cluster->array[idx].chain;
1513 	} else {
1514 		ochain = NULL;
1515 		nchain = cluster->array[idx].chain;
1516 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1517 			bzero(&ip->cluster.array[i],
1518 			      sizeof(ip->cluster.array[i]));
1519 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1520 		}
1521 		ip->cluster.nchains = idx + 1;
1522 	}
1523 	if (ochain != nchain) {
1524 		/*
1525 		 * Make adjustments.
1526 		 */
1527 		ip->cluster.array[idx].chain = nchain;
1528 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1529 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1530 						HAMMER2_CITEM_INVALID;
1531 	}
1532 	hammer2_spin_unex(&ip->cluster_spin);
1533 	if (ochain != nchain) {
1534 		if (nchain)
1535 			hammer2_chain_ref(nchain);
1536 		if (ochain)
1537 			hammer2_chain_drop(ochain);
1538 	}
1539 }
1540 
1541 hammer2_key_t
1542 hammer2_inode_data_count(const hammer2_inode_t *ip)
1543 {
1544 	hammer2_chain_t *chain;
1545 	hammer2_key_t count = 0;
1546 	int i;
1547 
1548 	for (i = 0; i < ip->cluster.nchains; ++i) {
1549 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1550 			if (count < chain->bref.embed.stats.data_count)
1551 				count = chain->bref.embed.stats.data_count;
1552 		}
1553 	}
1554 	return count;
1555 }
1556 
1557 hammer2_key_t
1558 hammer2_inode_inode_count(const hammer2_inode_t *ip)
1559 {
1560 	hammer2_chain_t *chain;
1561 	hammer2_key_t count = 0;
1562 	int i;
1563 
1564 	for (i = 0; i < ip->cluster.nchains; ++i) {
1565 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1566 			if (count < chain->bref.embed.stats.inode_count)
1567 				count = chain->bref.embed.stats.inode_count;
1568 		}
1569 	}
1570 	return count;
1571 }
1572 
1573 /*
1574  * Called with a locked inode to finish unlinking an inode after xop_unlink
1575  * had been run.  This function is responsible for decrementing nlinks.
1576  *
1577  * We don't bother decrementing nlinks if the file is not open and this was
1578  * the last link.
1579  *
1580  * If the inode is a hardlink target it's chain has not yet been deleted,
1581  * otherwise it's chain has been deleted.
1582  *
1583  * If isopen then any prior deletion was not permanent and the inode is
1584  * left intact with nlinks == 0;
1585  */
1586 int
1587 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct vnode **vprecyclep)
1588 {
1589 	struct vnode *vp;
1590 
1591 	/*
1592 	 * Decrement nlinks.  Catch a bad nlinks count here too (e.g. 0 or
1593 	 * negative), and just assume a transition to 0.
1594 	 */
1595 	if ((int64_t)ip->meta.nlinks <= 1) {
1596 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1597 
1598 		/*
1599 		 * Scrap the vnode as quickly as possible.  The vp association
1600 		 * stays intact while we hold the inode locked.  However, vp
1601 		 * can be NULL here.
1602 		 */
1603 		vp = ip->vp;
1604 		cpu_ccfence();
1605 
1606 		/*
1607 		 * If no vp is associated there is no high-level state to
1608 		 * deal with and we can scrap the inode immediately.
1609 		 */
1610 		if (vp == NULL) {
1611 			if ((ip->flags & HAMMER2_INODE_DELETING) == 0) {
1612 				atomic_set_int(&ip->flags,
1613 					       HAMMER2_INODE_DELETING);
1614 				hammer2_inode_delayed_sideq(ip);
1615 			}
1616 			return 0;
1617 		}
1618 
1619 		/*
1620 		 * Because INODE_ISUNLINKED is set with the inode lock
1621 		 * held, the vnode cannot be ripped up from under us.
1622 		 * There may still be refs so knote anyone waiting for
1623 		 * a delete notification.
1624 		 *
1625 		 * The vnode is not necessarily ref'd due to the unlinking
1626 		 * itself, so we have to defer handling to the end of the
1627 		 * VOP, which will then call hammer2_inode_vprecycle().
1628 		 */
1629 		if (vprecyclep) {
1630 			vhold(vp);
1631 			*vprecyclep = vp;
1632 		}
1633 	}
1634 
1635 	/*
1636 	 * Adjust nlinks and retain the inode on the media for now
1637 	 */
1638 	hammer2_inode_modify(ip);
1639 	if ((int64_t)ip->meta.nlinks > 1)
1640 		--ip->meta.nlinks;
1641 	else
1642 		ip->meta.nlinks = 0;
1643 
1644 	return 0;
1645 }
1646 
1647 /*
1648  * Called at the end of a VOP that removes a file with a vnode that
1649  * we want to try to dispose of quickly due to a file deletion.  If
1650  * we don't do this, the vnode can hang around with 0 refs for a very
1651  * long time and prevent reclamation of the underlying file and inode
1652  * (inode remains on-media with nlinks == 0 until the vnode is recycled
1653  * due to random system activity or a umount).
1654  */
1655 void
1656 hammer2_inode_vprecycle(struct vnode *vp)
1657 {
1658 	if (vget(vp, LK_EXCLUSIVE) == 0) {
1659 		vfinalize(vp);
1660 		hammer2_knote(vp, NOTE_DELETE);
1661 		vdrop(vp);
1662 		vput(vp);
1663 	} else {
1664 		vdrop(vp);
1665 	}
1666 }
1667 
1668 
1669 /*
1670  * Mark an inode as being modified, meaning that the caller will modify
1671  * ip->meta.
1672  *
1673  * If a vnode is present we set the vnode dirty and the nominal filesystem
1674  * sync will also handle synchronizing the inode meta-data.  Unless NOSIDEQ
1675  * we must ensure that the inode is on pmp->sideq.
1676  *
1677  * NOTE: We must always queue the inode to the sideq.  This allows H2 to
1678  *	 shortcut vsyncscan() and flush inodes and their related vnodes
1679  *	 in a two stages.  H2 still calls vfsync() for each vnode.
1680  *
1681  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1682  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1683  *	 later when the inode gets flushed.
1684  *
1685  * NOTE: As an exception to the general rule, the inode MAY be locked
1686  *	 shared for this particular call.
1687  */
1688 void
1689 hammer2_inode_modify(hammer2_inode_t *ip)
1690 {
1691 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1692 	if (ip->vp)
1693 		vsetisdirty(ip->vp);
1694 	if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
1695 		hammer2_inode_delayed_sideq(ip);
1696 }
1697 
1698 /*
1699  * Synchronize the inode's frontend state with the chain state prior
1700  * to any explicit flush of the inode or any strategy write call.  This
1701  * does not flush the inode's chain or its sub-topology to media (higher
1702  * level layers are responsible for doing that).
1703  *
1704  * Called with a locked inode inside a normal transaction.
1705  *
1706  * inode must be locked.
1707  */
1708 int
1709 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1710 {
1711 	int error;
1712 
1713 	error = 0;
1714 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1715 		hammer2_xop_fsync_t *xop;
1716 
1717 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1718 		xop->clear_directdata = 0;
1719 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1720 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1721 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1722 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1723 				xop->clear_directdata = 1;
1724 			}
1725 			xop->osize = ip->osize;
1726 		} else {
1727 			xop->osize = ip->meta.size;	/* safety */
1728 		}
1729 		xop->ipflags = ip->flags;
1730 		xop->meta = ip->meta;
1731 
1732 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1733 					     HAMMER2_INODE_MODIFIED);
1734 		hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
1735 		error = hammer2_xop_collect(&xop->head, 0);
1736 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1737 		if (error == HAMMER2_ERROR_ENOENT)
1738 			error = 0;
1739 		if (error) {
1740 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1741 			/*
1742 			atomic_set_int(&ip->flags,
1743 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1744 						       HAMMER2_INODE_MODIFIED));
1745 			*/
1746 			/* XXX return error somehow? */
1747 		}
1748 	}
1749 	return error;
1750 }
1751 
1752 /*
1753  * When an inode is flagged INODE_CREATING its chains have not actually
1754  * been inserting into the on-media tree yet.
1755  */
1756 int
1757 hammer2_inode_chain_ins(hammer2_inode_t *ip)
1758 {
1759 	int error;
1760 
1761 	error = 0;
1762 	if (ip->flags & HAMMER2_INODE_CREATING) {
1763 		hammer2_xop_create_t *xop;
1764 
1765 		atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
1766 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1767 		xop->lhc = ip->meta.inum;
1768 		xop->flags = 0;
1769 		hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
1770 		error = hammer2_xop_collect(&xop->head, 0);
1771 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1772 		if (error == HAMMER2_ERROR_ENOENT)
1773 			error = 0;
1774 		if (error) {
1775 			kprintf("hammer2: backend unable to "
1776 				"insert inode %p %ld\n", ip, (long)ip->meta.inum);
1777 			/* XXX return error somehow? */
1778 		}
1779 	}
1780 	return error;
1781 }
1782 
1783 /*
1784  * When an inode is flagged INODE_DELETING it has been deleted (no directory
1785  * entry or open refs are left, though as an optimization H2 might leave
1786  * nlinks == 1 to avoid unnecessary block updates).  The backend flush then
1787  * needs to actually remove it from the topology.
1788  *
1789  * NOTE: backend flush must still sync and flush the deleted inode to clean
1790  *	 out related chains.
1791  *
1792  * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
1793  *	 to prevent the vnode reclaim code from trying to delete it twice.
1794  */
1795 int
1796 hammer2_inode_chain_des(hammer2_inode_t *ip)
1797 {
1798 	int error;
1799 
1800 	error = 0;
1801 	if (ip->flags & HAMMER2_INODE_DELETING) {
1802 		hammer2_xop_destroy_t *xop;
1803 
1804 		atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING |
1805 					     HAMMER2_INODE_ISUNLINKED);
1806 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1807 		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1808 		error = hammer2_xop_collect(&xop->head, 0);
1809 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1810 
1811 		if (error == HAMMER2_ERROR_ENOENT)
1812 			error = 0;
1813 		if (error) {
1814 			kprintf("hammer2: backend unable to "
1815 				"delete inode %p %ld\n", ip, (long)ip->meta.inum);
1816 			/* XXX return error somehow? */
1817 		}
1818 	}
1819 	return error;
1820 }
1821 
1822 /*
1823  * Flushes the inode's chain and its sub-topology to media.  Interlocks
1824  * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush.  Any strategy
1825  * function creating or modifying a chain under this inode will re-set the
1826  * flag.
1827  *
1828  * inode must be locked.
1829  */
1830 int
1831 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
1832 {
1833 	hammer2_xop_flush_t *xop;
1834 	int error;
1835 
1836 	atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1837 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
1838 	hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
1839 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1840 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1841 	if (error == HAMMER2_ERROR_ENOENT)
1842 		error = 0;
1843 
1844 	return error;
1845 }
1846