xref: /dragonfly/sys/vfs/hammer2/hammer2_inode.c (revision f984587a)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41 #include <sys/vnode.h>
42 
43 #include "hammer2.h"
44 
45 #define INODE_DEBUG	0
46 
47 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
48 	     hammer2_tid_t, meta.inum);
49 
50 int
51 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
52 {
53 	if (ip1->meta.inum < ip2->meta.inum)
54 		return(-1);
55 	if (ip1->meta.inum > ip2->meta.inum)
56 		return(1);
57 	return(0);
58 }
59 
60 /*
61  * Caller holds pmp->list_spin and the inode should be locked.  Merge ip
62  * with the specified depend.
63  *
64  * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
65  * that successive calls must ensure the ip is on a pass2 depend (or they are
66  * all SYNCQ).  If the passed-in depend is not NULL and not (void *)-1 then
67  * we can set pass2 on it and return.
68  *
69  * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
70  * a self-depend if necessary, and depend->pass2 is set according
71  * to the PASS2 flag.  SIDEQ is set.
72  */
73 static __noinline
74 hammer2_depend_t *
75 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
76 {
77 	hammer2_pfs_t *pmp = ip->pmp;
78 	hammer2_depend_t *dtmp;
79 	hammer2_inode_t *iptmp;
80 
81 	/*
82 	 * If ip is SYNCQ its entry is used for the syncq list and it will
83 	 * no longer be associated with a dependency.  Merging this status
84 	 * with a passed-in depend implies PASS2.
85 	 */
86 	if (ip->flags & HAMMER2_INODE_SYNCQ) {
87 		if (depend == (void *)-1 ||
88 		    depend == NULL) {
89 			return ((void *)-1);
90 		}
91 		depend->pass2 = 1;
92 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
93 
94 		return depend;
95 	}
96 
97 	/*
98 	 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
99 	 * If it is not, associate the ip with the passed-in depend, creating
100 	 * a single-entry dependency using depend_static if necessary.
101 	 *
102 	 * NOTE: The use of ip->depend_static always requires that the
103 	 *	 specific ip containing the structure is part of that
104 	 *	 particular depend_static's dependency group.
105 	 */
106 	if (ip->flags & HAMMER2_INODE_SIDEQ) {
107 		/*
108 		 * Merge ip->depend with the passed-in depend.  If the
109 		 * passed-in depend is not a special case, all ips associated
110 		 * with ip->depend (including the original ip) must be moved
111 		 * to the passed-in depend.
112 		 */
113 		if (depend == NULL) {
114 			depend = ip->depend;
115 		} else if (depend == (void *)-1) {
116 			depend = ip->depend;
117 			depend->pass2 = 1;
118 		} else if (depend != ip->depend) {
119 #ifdef INVARIANTS
120 			int sanitychk = 0;
121 #endif
122 			dtmp = ip->depend;
123 			while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
124 #ifdef INVARIANTS
125 				if (iptmp == ip)
126 					sanitychk = 1;
127 #endif
128 				TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
129 				TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
130 				iptmp->depend = depend;
131 			}
132 			KKASSERT(sanitychk == 1);
133 			depend->count += dtmp->count;
134 			depend->pass2 |= dtmp->pass2;
135 			TAILQ_REMOVE(&pmp->depq, dtmp, entry);
136 			dtmp->count = 0;
137 			dtmp->pass2 = 0;
138 		}
139 	} else {
140 		/*
141 		 * Add ip to the sideq, creating a self-dependency if
142 		 * necessary.
143 		 */
144 		hammer2_inode_ref(ip);
145 		atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
146 		if (depend == NULL) {
147 			depend = &ip->depend_static;
148 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
149 		} else if (depend == (void *)-1) {
150 			depend = &ip->depend_static;
151 			depend->pass2 = 1;
152 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
153 		} /* else add ip to passed-in depend */
154 		TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
155 		ip->depend = depend;
156 		++depend->count;
157 		++pmp->sideq_count;
158 	}
159 
160 	if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
161 		depend->pass2 = 1;
162 	if (depend->pass2)
163 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
164 
165 	return depend;
166 }
167 
168 /*
169  * Put a solo inode on the SIDEQ (meaning that its dirty).  This can also
170  * occur from inode_lock4() and inode_depend().
171  *
172  * Caller must pass-in a locked inode.
173  */
174 void
175 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
176 {
177 	hammer2_pfs_t *pmp = ip->pmp;
178 
179 	/*
180 	 * Optimize case to avoid pmp spinlock.
181 	 */
182 	if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
183 		hammer2_spin_ex(&pmp->list_spin);
184 		hammer2_inode_setdepend_locked(ip, NULL);
185 		hammer2_spin_unex(&pmp->list_spin);
186 	}
187 }
188 
189 /*
190  * Lock an inode, with SYNCQ semantics.
191  *
192  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
193  * flags for options:
194  *
195  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.
196  *	  shared locks are not subject to SYNCQ semantics, exclusive locks
197  *	  are.
198  *
199  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
200  *	  Most front-end inode locks do.
201  *
202  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
203  *	  the inode data be resolved.  This is used by the syncthr because
204  *	  it can run on an unresolved/out-of-sync cluster, and also by the
205  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
206  *	  disposing of hundreds of thousands of cached vnodes).
207  *
208  * This function, along with lock4, has SYNCQ semantics.  If the inode being
209  * locked is on the SYNCQ, that is it has been staged by the syncer, we must
210  * block until the operation is complete (even if we can lock the inode).  In
211  * order to reduce the stall time, we re-order the inode to the front of the
212  * pmp->syncq prior to blocking.  This reordering VERY significantly improves
213  * performance.
214  *
215  * The inode locking function locks the inode itself, resolves any stale
216  * chains in the inode's cluster, and allocates a fresh copy of the
217  * cluster with 1 ref and all the underlying chains locked.
218  *
219  * ip->cluster will be stable while the inode is locked.
220  *
221  * NOTE: We don't combine the inode/chain lock because putting away an
222  *       inode would otherwise confuse multiple lock holders of the inode.
223  */
224 void
225 hammer2_inode_lock(hammer2_inode_t *ip, int how)
226 {
227 	hammer2_pfs_t *pmp;
228 
229 	hammer2_inode_ref(ip);
230 	pmp = ip->pmp;
231 
232 	/*
233 	 * Inode structure mutex - Shared lock
234 	 */
235 	if (how & HAMMER2_RESOLVE_SHARED) {
236 		hammer2_mtx_sh(&ip->lock);
237 		return;
238 	}
239 
240 	/*
241 	 * Inode structure mutex - Exclusive lock
242 	 *
243 	 * An exclusive lock (if not recursive) must wait for inodes on
244 	 * SYNCQ to flush first, to ensure that meta-data dependencies such
245 	 * as the nlink count and related directory entries are not split
246 	 * across flushes.
247 	 *
248 	 * If the vnode is locked by the current thread it must be unlocked
249 	 * across the tsleep() to avoid a deadlock.
250 	 */
251 	hammer2_mtx_ex(&ip->lock);
252 	if (hammer2_mtx_refs(&ip->lock) > 1)
253 		return;
254 	while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
255 		hammer2_spin_ex(&pmp->list_spin);
256 		if (ip->flags & HAMMER2_INODE_SYNCQ) {
257 			tsleep_interlock(&ip->flags, 0);
258 			atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
259 			TAILQ_REMOVE(&pmp->syncq, ip, entry);
260 			TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
261 			hammer2_spin_unex(&pmp->list_spin);
262 			hammer2_mtx_unlock(&ip->lock);
263 			tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
264 			hammer2_mtx_ex(&ip->lock);
265 			continue;
266 		}
267 		hammer2_spin_unex(&pmp->list_spin);
268 		break;
269 	}
270 }
271 
272 /*
273  * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
274  * ip1 and ip2 must not be NULL.  ip3 and ip4 may be NULL, but if ip3 is
275  * NULL then ip4 must also be NULL.
276  *
277  * This creates a dependency between up to four inodes.
278  */
279 void
280 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
281 		    hammer2_inode_t *ip3, hammer2_inode_t *ip4)
282 {
283 	hammer2_inode_t *ips[4];
284 	hammer2_inode_t *iptmp;
285 	hammer2_inode_t *ipslp;
286 	hammer2_depend_t *depend;
287 	hammer2_pfs_t *pmp;
288 	size_t count;
289 	size_t i;
290 
291 	pmp = ip1->pmp;			/* may be NULL */
292 	KKASSERT(pmp == ip2->pmp);
293 
294 	ips[0] = ip1;
295 	ips[1] = ip2;
296 	if (ip3 == NULL) {
297 		count = 2;
298 	} else if (ip4 == NULL) {
299 		count = 3;
300 		ips[2] = ip3;
301 		KKASSERT(pmp == ip3->pmp);
302 	} else {
303 		count = 4;
304 		ips[2] = ip3;
305 		ips[3] = ip4;
306 		KKASSERT(pmp == ip3->pmp);
307 		KKASSERT(pmp == ip4->pmp);
308 	}
309 
310 	for (i = 0; i < count; ++i)
311 		hammer2_inode_ref(ips[i]);
312 
313 restart:
314 	/*
315 	 * Lock the inodes in order
316 	 */
317 	for (i = 0; i < count; ++i) {
318 		hammer2_mtx_ex(&ips[i]->lock);
319 	}
320 
321 	/*
322 	 * Associate dependencies, record the first inode found on SYNCQ
323 	 * (operation is allowed to proceed for inodes on PASS2) for our
324 	 * sleep operation, this inode is theoretically the last one sync'd
325 	 * in the sequence.
326 	 *
327 	 * All inodes found on SYNCQ are moved to the head of the syncq
328 	 * to reduce stalls.
329 	 */
330 	hammer2_spin_ex(&pmp->list_spin);
331 	depend = NULL;
332 	ipslp = NULL;
333 	for (i = 0; i < count; ++i) {
334 		iptmp = ips[i];
335 		depend = hammer2_inode_setdepend_locked(iptmp, depend);
336 		if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
337 			TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
338 			TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
339 			if (ipslp == NULL)
340 				ipslp = iptmp;
341 		}
342 	}
343 	hammer2_spin_unex(&pmp->list_spin);
344 
345 	/*
346 	 * Block and retry if any of the inodes are on SYNCQ.  It is
347 	 * important that we allow the operation to proceed in the
348 	 * PASS2 case, to avoid deadlocking against the vnode.
349 	 */
350 	if (ipslp) {
351 		for (i = 0; i < count; ++i)
352 			hammer2_mtx_unlock(&ips[i]->lock);
353 		tsleep(&ipslp->flags, 0, "h2sync", 2);
354 		goto restart;
355 	}
356 }
357 
358 /*
359  * Release an inode lock.  If another thread is blocked on SYNCQ_WAKEUP
360  * we wake them up.
361  */
362 void
363 hammer2_inode_unlock(hammer2_inode_t *ip)
364 {
365 	if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
366 		atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
367 		hammer2_mtx_unlock(&ip->lock);
368 		wakeup(&ip->flags);
369 	} else {
370 		hammer2_mtx_unlock(&ip->lock);
371 	}
372 	hammer2_inode_drop(ip);
373 }
374 
375 /*
376  * If either ip1 or ip2 have been tapped by the syncer, make sure that both
377  * are.  This ensure that dependencies (e.g. dirent-v-inode) are synced
378  * together.  For dirent-v-inode depends, pass the dirent as ip1.
379  *
380  * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
381  * single dependency.  Dependencies are entered into pmp->depq.  This
382  * effectively flags the inodes SIDEQ.
383  *
384  * Both ip1 and ip2 must be locked by the caller.  This also ensures
385  * that we can't race the end of the syncer's queue run.
386  */
387 void
388 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
389 {
390 	hammer2_pfs_t *pmp;
391 	hammer2_depend_t *depend;
392 
393 	pmp = ip1->pmp;
394 	hammer2_spin_ex(&pmp->list_spin);
395 	depend = hammer2_inode_setdepend_locked(ip1, NULL);
396 	depend = hammer2_inode_setdepend_locked(ip2, depend);
397 	hammer2_spin_unex(&pmp->list_spin);
398 }
399 
400 /*
401  * Select a chain out of an inode's cluster and lock it.
402  *
403  * The inode does not have to be locked.
404  */
405 hammer2_chain_t *
406 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
407 {
408 	hammer2_chain_t *chain;
409 	hammer2_cluster_t *cluster;
410 
411 	hammer2_spin_sh(&ip->cluster_spin);
412 	cluster = &ip->cluster;
413 	if (clindex >= cluster->nchains)
414 		chain = NULL;
415 	else
416 		chain = cluster->array[clindex].chain;
417 	if (chain) {
418 		hammer2_chain_ref(chain);
419 		hammer2_spin_unsh(&ip->cluster_spin);
420 		hammer2_chain_lock(chain, how);
421 	} else {
422 		hammer2_spin_unsh(&ip->cluster_spin);
423 	}
424 	return chain;
425 }
426 
427 hammer2_chain_t *
428 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
429 			       hammer2_chain_t **parentp, int how)
430 {
431 	hammer2_chain_t *chain;
432 	hammer2_chain_t *parent;
433 
434 	for (;;) {
435 		hammer2_spin_sh(&ip->cluster_spin);
436 		if (clindex >= ip->cluster.nchains)
437 			chain = NULL;
438 		else
439 			chain = ip->cluster.array[clindex].chain;
440 		if (chain) {
441 			hammer2_chain_ref(chain);
442 			hammer2_spin_unsh(&ip->cluster_spin);
443 			hammer2_chain_lock(chain, how);
444 		} else {
445 			hammer2_spin_unsh(&ip->cluster_spin);
446 		}
447 
448 		/*
449 		 * Get parent, lock order must be (parent, chain).
450 		 */
451 		parent = chain->parent;
452 		if (parent) {
453 			hammer2_chain_ref(parent);
454 			hammer2_chain_unlock(chain);
455 			hammer2_chain_lock(parent, how);
456 			hammer2_chain_lock(chain, how);
457 		}
458 		if (ip->cluster.array[clindex].chain == chain &&
459 		    chain->parent == parent) {
460 			break;
461 		}
462 
463 		/*
464 		 * Retry
465 		 */
466 		hammer2_chain_unlock(chain);
467 		hammer2_chain_drop(chain);
468 		if (parent) {
469 			hammer2_chain_unlock(parent);
470 			hammer2_chain_drop(parent);
471 		}
472 	}
473 	*parentp = parent;
474 
475 	return chain;
476 }
477 
478 /*
479  * Temporarily release a lock held shared or exclusive.  Caller must
480  * hold the lock shared or exclusive on call and lock will be released
481  * on return.
482  *
483  * Restore a lock that was temporarily released.
484  */
485 hammer2_mtx_state_t
486 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
487 {
488 	return hammer2_mtx_temp_release(&ip->lock);
489 }
490 
491 void
492 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
493 {
494 	hammer2_mtx_temp_restore(&ip->lock, ostate);
495 }
496 
497 /*
498  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
499  * is already held exclusively this is a NOP.
500  *
501  * The caller MUST hold the inode lock either shared or exclusive on call
502  * and will own the lock exclusively on return.
503  *
504  * Returns non-zero if the lock was already exclusive prior to the upgrade.
505  */
506 int
507 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
508 {
509 	int wasexclusive;
510 
511 	if (mtx_islocked_ex(&ip->lock)) {
512 		wasexclusive = 1;
513 	} else {
514 		hammer2_mtx_unlock(&ip->lock);
515 		hammer2_mtx_ex(&ip->lock);
516 		wasexclusive = 0;
517 	}
518 	return wasexclusive;
519 }
520 
521 /*
522  * Downgrade an inode lock from exclusive to shared only if the inode
523  * lock was previously shared.  If the inode lock was previously exclusive,
524  * this is a NOP.
525  */
526 void
527 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
528 {
529 	if (wasexclusive == 0)
530 		hammer2_mtx_downgrade(&ip->lock);
531 }
532 
533 /*
534  * Lookup an inode by inode number
535  */
536 hammer2_inode_t *
537 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
538 {
539 	hammer2_inode_t *ip;
540 
541 	KKASSERT(pmp);
542 	if (pmp->spmp_hmp) {
543 		ip = NULL;
544 	} else {
545 		hammer2_spin_ex(&pmp->inum_spin);
546 		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
547 		if (ip)
548 			hammer2_inode_ref(ip);
549 		hammer2_spin_unex(&pmp->inum_spin);
550 	}
551 	return(ip);
552 }
553 
554 /*
555  * Adding a ref to an inode is only legal if the inode already has at least
556  * one ref.
557  *
558  * (can be called with spinlock held)
559  */
560 void
561 hammer2_inode_ref(hammer2_inode_t *ip)
562 {
563 	atomic_add_int(&ip->refs, 1);
564 	if (hammer2_debug & 0x80000) {
565 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
566 		print_backtrace(8);
567 	}
568 }
569 
570 /*
571  * Drop an inode reference, freeing the inode when the last reference goes
572  * away.
573  */
574 void
575 hammer2_inode_drop(hammer2_inode_t *ip)
576 {
577 	hammer2_pfs_t *pmp;
578 	u_int refs;
579 
580 	while (ip) {
581 		if (hammer2_debug & 0x80000) {
582 			kprintf("INODE-1 %p (%d->%d)\n",
583 				ip, ip->refs, ip->refs - 1);
584 			print_backtrace(8);
585 		}
586 		refs = ip->refs;
587 		cpu_ccfence();
588 		if (refs == 1) {
589 			/*
590 			 * Transition to zero, must interlock with
591 			 * the inode inumber lookup tree (if applicable).
592 			 * It should not be possible for anyone to race
593 			 * the transition to 0.
594 			 */
595 			pmp = ip->pmp;
596 			KKASSERT(pmp);
597 			hammer2_spin_ex(&pmp->inum_spin);
598 
599 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
600 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
601 				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
602 					atomic_clear_int(&ip->flags,
603 						     HAMMER2_INODE_ONRBTREE);
604 					RB_REMOVE(hammer2_inode_tree,
605 						  &pmp->inum_tree, ip);
606 					--pmp->inum_count;
607 				}
608 				hammer2_spin_unex(&pmp->inum_spin);
609 
610 				ip->pmp = NULL;
611 
612 				/*
613 				 * Cleaning out ip->cluster isn't entirely
614 				 * trivial.
615 				 */
616 				hammer2_inode_repoint(ip, NULL);
617 
618 				kfree_obj(ip, pmp->minode);
619 				atomic_add_long(&pmp->inmem_inodes, -1);
620 				ip = NULL;	/* will terminate loop */
621 			} else {
622 				hammer2_spin_unex(&ip->pmp->inum_spin);
623 			}
624 		} else {
625 			/*
626 			 * Non zero transition
627 			 */
628 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
629 				break;
630 		}
631 	}
632 }
633 
634 /*
635  * Get the vnode associated with the given inode, allocating the vnode if
636  * necessary.  The vnode will be returned exclusively locked.
637  *
638  * *errorp is set to a UNIX error, not a HAMMER2 error.
639  *
640  * The caller must lock the inode (shared or exclusive).
641  *
642  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
643  * races.
644  */
645 struct vnode *
646 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
647 {
648 	hammer2_pfs_t *pmp;
649 	struct vnode *vp;
650 
651 	pmp = ip->pmp;
652 	KKASSERT(pmp != NULL);
653 	*errorp = 0;
654 
655 	for (;;) {
656 		/*
657 		 * Attempt to reuse an existing vnode assignment.  It is
658 		 * possible to race a reclaim so the vget() may fail.  The
659 		 * inode must be unlocked during the vget() to avoid a
660 		 * deadlock against a reclaim.
661 		 */
662 		int wasexclusive;
663 
664 		vp = ip->vp;
665 		if (vp) {
666 			/*
667 			 * Inode must be unlocked during the vget() to avoid
668 			 * possible deadlocks, but leave the ip ref intact.
669 			 *
670 			 * vnode is held to prevent destruction during the
671 			 * vget().  The vget() can still fail if we lost
672 			 * a reclaim race on the vnode.
673 			 */
674 			hammer2_mtx_state_t ostate;
675 
676 			vhold(vp);
677 			ostate = hammer2_inode_lock_temp_release(ip);
678 			if (vget(vp, LK_EXCLUSIVE)) {
679 				vdrop(vp);
680 				hammer2_inode_lock_temp_restore(ip, ostate);
681 				continue;
682 			}
683 			hammer2_inode_lock_temp_restore(ip, ostate);
684 			vdrop(vp);
685 			/* vp still locked and ref from vget */
686 			if (ip->vp != vp) {
687 				kprintf("hammer2: igetv race %p/%p\n",
688 					ip->vp, vp);
689 				vput(vp);
690 				continue;
691 			}
692 			*errorp = 0;
693 			break;
694 		}
695 
696 		/*
697 		 * No vnode exists, allocate a new vnode.  Beware of
698 		 * allocation races.  This function will return an
699 		 * exclusively locked and referenced vnode.
700 		 */
701 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
702 		if (*errorp) {
703 			kprintf("hammer2: igetv getnewvnode failed %d\n",
704 				*errorp);
705 			vp = NULL;
706 			break;
707 		}
708 
709 		/*
710 		 * Lock the inode and check for an allocation race.
711 		 */
712 		wasexclusive = hammer2_inode_lock_upgrade(ip);
713 		if (ip->vp != NULL) {
714 			vp->v_type = VBAD;
715 			vx_put(vp);
716 			hammer2_inode_lock_downgrade(ip, wasexclusive);
717 			continue;
718 		}
719 
720 		switch (ip->meta.type) {
721 		case HAMMER2_OBJTYPE_DIRECTORY:
722 			vp->v_type = VDIR;
723 			break;
724 		case HAMMER2_OBJTYPE_REGFILE:
725 			/*
726 			 * Regular file must use buffer cache I/O
727 			 * (VKVABIO cpu sync semantics supported)
728 			 */
729 			vp->v_type = VREG;
730 			vsetflags(vp, VKVABIO);
731 			vinitvmio(vp, ip->meta.size,
732 				  HAMMER2_LBUFSIZE,
733 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
734 			break;
735 		case HAMMER2_OBJTYPE_SOFTLINK:
736 			/*
737 			 * XXX for now we are using the generic file_read
738 			 * and file_write code so we need a buffer cache
739 			 * association.
740 			 *
741 			 * (VKVABIO cpu sync semantics supported)
742 			 */
743 			vp->v_type = VLNK;
744 			vsetflags(vp, VKVABIO);
745 			vinitvmio(vp, ip->meta.size,
746 				  HAMMER2_LBUFSIZE,
747 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
748 			break;
749 		case HAMMER2_OBJTYPE_CDEV:
750 			vp->v_type = VCHR;
751 			/* fall through */
752 		case HAMMER2_OBJTYPE_BDEV:
753 			vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
754 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
755 				vp->v_type = VBLK;
756 			addaliasu(vp,
757 				  ip->meta.rmajor,
758 				  ip->meta.rminor);
759 			break;
760 		case HAMMER2_OBJTYPE_FIFO:
761 			vp->v_type = VFIFO;
762 			vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
763 			break;
764 		case HAMMER2_OBJTYPE_SOCKET:
765 			vp->v_type = VSOCK;
766 			break;
767 		default:
768 			panic("hammer2: unhandled objtype %d",
769 			      ip->meta.type);
770 			break;
771 		}
772 
773 		if (ip == pmp->iroot)
774 			vsetflags(vp, VROOT);
775 
776 		vp->v_data = ip;
777 		ip->vp = vp;
778 		hammer2_inode_ref(ip);		/* vp association */
779 		hammer2_inode_lock_downgrade(ip, wasexclusive);
780 		vx_downgrade(vp);
781 		break;
782 	}
783 
784 	/*
785 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
786 	 */
787 	if (hammer2_debug & 0x0002) {
788 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
789 			vp, vp->v_refcnt, vp->v_auxrefs);
790 	}
791 	return (vp);
792 }
793 
794 /*
795  * XXX this API needs a rewrite.  It needs to be split into a
796  * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
797  * rid of the inode/chain lock reversal fudge.
798  *
799  * Returns the inode associated with the passed-in cluster, allocating a new
800  * hammer2_inode structure if necessary, then synchronizing it to the passed
801  * xop cluster.  When synchronizing, if idx >= 0, only cluster index (idx)
802  * is synchronized.  Otherwise the whole cluster is synchronized.  inum will
803  * be extracted from the passed-in xop and the inum argument will be ignored.
804  *
805  * If xop is passed as NULL then a new hammer2_inode is allocated with the
806  * specified inum, and returned.   For normal inodes, the inode will be
807  * indexed in memory and if it already exists the existing ip will be
808  * returned instead of allocating a new one.  The superroot and PFS inodes
809  * are not indexed in memory.
810  *
811  * The passed-in cluster must be locked and will remain locked on return.
812  * The returned inode will be locked and the caller may dispose of both
813  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
814  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
815  *
816  * The hammer2_inode structure regulates the interface between the high level
817  * kernel VNOPS API and the filesystem backend (the chains).
818  *
819  * On return the inode is locked with the supplied cluster.
820  */
821 hammer2_inode_t *
822 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
823 		  hammer2_tid_t inum, int idx)
824 {
825 	hammer2_inode_t *nip;
826 	const hammer2_inode_data_t *iptmp;
827 	const hammer2_inode_data_t *nipdata;
828 
829 	KKASSERT(xop == NULL ||
830 		 hammer2_cluster_type(&xop->cluster) ==
831 		 HAMMER2_BREF_TYPE_INODE);
832 	KKASSERT(pmp);
833 
834 	/*
835 	 * Interlocked lookup/ref of the inode.  This code is only needed
836 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
837 	 * otherwise and test for duplicates).
838 	 *
839 	 * Cluster can be NULL during the initial pfs allocation.
840 	 */
841 	if (xop) {
842 		iptmp = &hammer2_xop_gdata(xop)->ipdata;
843 		inum = iptmp->meta.inum;
844 		hammer2_xop_pdata(xop);
845 	}
846 again:
847 	nip = hammer2_inode_lookup(pmp, inum);
848 	if (nip) {
849 		/*
850 		 * We may have to unhold the cluster to avoid a deadlock
851 		 * against vnlru (and possibly other XOPs).
852 		 */
853 		if (xop) {
854 			if (hammer2_mtx_ex_try(&nip->lock) != 0) {
855 				hammer2_cluster_unhold(&xop->cluster);
856 				hammer2_mtx_ex(&nip->lock);
857 				hammer2_cluster_rehold(&xop->cluster);
858 			}
859 		} else {
860 			hammer2_mtx_ex(&nip->lock);
861 		}
862 
863 		/*
864 		 * Handle SMP race (not applicable to the super-root spmp
865 		 * which can't index inodes due to duplicative inode numbers).
866 		 */
867 		if (pmp->spmp_hmp == NULL &&
868 		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
869 			hammer2_mtx_unlock(&nip->lock);
870 			hammer2_inode_drop(nip);
871 			goto again;
872 		}
873 		if (xop) {
874 			if (idx >= 0)
875 				hammer2_inode_repoint_one(nip, &xop->cluster,
876 							  idx);
877 			else
878 				hammer2_inode_repoint(nip, &xop->cluster);
879 		}
880 		return nip;
881 	}
882 
883 	/*
884 	 * We couldn't find the inode number, create a new inode and try to
885 	 * insert it, handle insertion races.
886 	 */
887 	nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
888 	hammer2_spin_init(&nip->cluster_spin, "h2clspin");
889 	atomic_add_long(&pmp->inmem_inodes, 1);
890 
891 	/*
892 	 * Initialize nip's cluster.  A cluster is provided for normal
893 	 * inodes but typically not for the super-root or PFS inodes.
894 	 */
895 	{
896 		hammer2_inode_t *nnip = nip;
897 		nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip));
898 	}
899 
900 	nip->cluster.refs = 1;
901 	nip->cluster.pmp = pmp;
902 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
903 	if (xop) {
904 		nipdata = &hammer2_xop_gdata(xop)->ipdata;
905 		nip->meta = nipdata->meta;
906 		hammer2_xop_pdata(xop);
907 		hammer2_inode_repoint(nip, &xop->cluster);
908 	} else {
909 		nip->meta.inum = inum;		/* PFS inum is always 1 XXX */
910 		/* mtime will be updated when a cluster is available */
911 	}
912 
913 	nip->pmp = pmp;
914 
915 	/*
916 	 * ref and lock on nip gives it state compatible to after a
917 	 * hammer2_inode_lock() call.
918 	 */
919 	nip->refs = 1;
920 	hammer2_mtx_init(&nip->lock, "h2inode");
921 	hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
922 	hammer2_mtx_ex(&nip->lock);
923 	TAILQ_INIT(&nip->depend_static.sideq);
924 	/* combination of thread lock and chain lock == inode lock */
925 
926 	/*
927 	 * Attempt to add the inode.  If it fails we raced another inode
928 	 * get.  Undo all the work and try again.
929 	 */
930 	if (pmp->spmp_hmp == NULL) {
931 		hammer2_spin_ex(&pmp->inum_spin);
932 		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
933 			hammer2_spin_unex(&pmp->inum_spin);
934 			hammer2_mtx_unlock(&nip->lock);
935 			hammer2_inode_drop(nip);
936 			goto again;
937 		}
938 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
939 		++pmp->inum_count;
940 		hammer2_spin_unex(&pmp->inum_spin);
941 	}
942 	return (nip);
943 }
944 
945 /*
946  * Create a PFS inode under the superroot.  This function will create the
947  * inode, its media chains, and also insert it into the media.
948  *
949  * Caller must be in a flush transaction because we are inserting the inode
950  * onto the media.
951  */
952 hammer2_inode_t *
953 hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
954 		     const char *name, size_t name_len,
955 		     int *errorp)
956 {
957 	hammer2_xop_create_t *xop;
958 	hammer2_inode_t *pip;
959 	hammer2_inode_t *nip;
960 	int error;
961 	uint8_t pip_comp_algo;
962 	uint8_t pip_check_algo;
963 	hammer2_tid_t pip_inum;
964 	hammer2_key_t lhc;
965 
966 	pip = spmp->iroot;
967 	nip = NULL;
968 
969 	lhc = hammer2_dirhash(name, name_len);
970 	*errorp = 0;
971 
972 	/*
973 	 * Locate the inode or indirect block to create the new
974 	 * entry in.  At the same time check for key collisions
975 	 * and iterate until we don't get one.
976 	 *
977 	 * Lock the directory exclusively for now to guarantee that
978 	 * we can find an unused lhc for the name.  Due to collisions,
979 	 * two different creates can end up with the same lhc so we
980 	 * cannot depend on the OS to prevent the collision.
981 	 */
982 	hammer2_inode_lock(pip, 0);
983 
984 	pip_comp_algo = pip->meta.comp_algo;
985 	pip_check_algo = pip->meta.check_algo;
986 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
987 
988 	/*
989 	 * Locate an unused key in the collision space.
990 	 */
991 	{
992 		hammer2_xop_scanlhc_t *sxop;
993 		hammer2_key_t lhcbase;
994 
995 		lhcbase = lhc;
996 		sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
997 		sxop->lhc = lhc;
998 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
999 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1000 			if (lhc != sxop->head.cluster.focus->bref.key)
1001 				break;
1002 			++lhc;
1003 		}
1004 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1005 
1006 		if (error) {
1007 			if (error != HAMMER2_ERROR_ENOENT)
1008 				goto done2;
1009 			++lhc;
1010 			error = 0;
1011 		}
1012 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1013 			error = HAMMER2_ERROR_ENOSPC;
1014 			goto done2;
1015 		}
1016 	}
1017 
1018 	/*
1019 	 * Create the inode with the lhc as the key.
1020 	 */
1021 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1022 	xop->lhc = lhc;
1023 	xop->flags = HAMMER2_INSERT_PFSROOT;
1024 	bzero(&xop->meta, sizeof(xop->meta));
1025 
1026 	xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1027 	xop->meta.inum = 1;
1028 	xop->meta.iparent = pip_inum;
1029 
1030 	/* Inherit parent's inode compression mode. */
1031 	xop->meta.comp_algo = pip_comp_algo;
1032 	xop->meta.check_algo = pip_check_algo;
1033 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1034 	hammer2_update_time(&xop->meta.ctime);
1035 	xop->meta.mtime = xop->meta.ctime;
1036 	xop->meta.mode = 0755;
1037 	xop->meta.nlinks = 1;
1038 
1039 	/*
1040 	 * Regular files and softlinks allow a small amount of data to be
1041 	 * directly embedded in the inode.  This flag will be cleared if
1042 	 * the size is extended past the embedded limit.
1043 	 */
1044 	if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1045 	    xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1046 		xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1047 	}
1048 	hammer2_xop_setname(&xop->head, name, name_len);
1049 	xop->meta.name_len = name_len;
1050 	xop->meta.name_key = lhc;
1051 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1052 
1053 	hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
1054 
1055 	error = hammer2_xop_collect(&xop->head, 0);
1056 #if INODE_DEBUG
1057 	kprintf("CREATE INODE %*.*s\n",
1058 		(int)name_len, (int)name_len, name);
1059 #endif
1060 
1061 	if (error) {
1062 		*errorp = error;
1063 		goto done;
1064 	}
1065 
1066 	/*
1067 	 * Set up the new inode if not a hardlink pointer.
1068 	 *
1069 	 * NOTE: *_get() integrates chain's lock into the inode lock.
1070 	 *
1071 	 * NOTE: Only one new inode can currently be created per
1072 	 *	 transaction.  If the need arises we can adjust
1073 	 *	 hammer2_trans_init() to allow more.
1074 	 *
1075 	 * NOTE: nipdata will have chain's blockset data.
1076 	 */
1077 	nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
1078 	nip->comp_heuristic = 0;
1079 done:
1080 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1081 done2:
1082 	hammer2_inode_unlock(pip);
1083 
1084 	return (nip);
1085 }
1086 
1087 /*
1088  * Create a new, normal inode.  This function will create the inode,
1089  * the media chains, but will not insert the chains onto the media topology
1090  * (doing so would require a flush transaction and cause long stalls).
1091  *
1092  * Caller must be in a normal transaction.
1093  */
1094 hammer2_inode_t *
1095 hammer2_inode_create_normal(hammer2_inode_t *pip,
1096 			    struct vattr *vap, struct ucred *cred,
1097 			    hammer2_key_t inum, int *errorp)
1098 {
1099 	hammer2_xop_create_t *xop;
1100 	hammer2_inode_t *dip;
1101 	hammer2_inode_t *nip;
1102 	int error;
1103 	uid_t xuid;
1104 	uuid_t pip_uid;
1105 	uuid_t pip_gid;
1106 	uint32_t pip_mode;
1107 	uint8_t pip_comp_algo;
1108 	uint8_t pip_check_algo;
1109 	hammer2_tid_t pip_inum;
1110 
1111 	dip = pip->pmp->iroot;
1112 	KKASSERT(dip != NULL);
1113 
1114 	*errorp = 0;
1115 
1116 	/*hammer2_inode_lock(dip, 0);*/
1117 
1118 	pip_uid = pip->meta.uid;
1119 	pip_gid = pip->meta.gid;
1120 	pip_mode = pip->meta.mode;
1121 	pip_comp_algo = pip->meta.comp_algo;
1122 	pip_check_algo = pip->meta.check_algo;
1123 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1124 
1125 	/*
1126 	 * Create the in-memory hammer2_inode structure for the specified
1127 	 * inode.
1128 	 */
1129 	nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
1130 	nip->comp_heuristic = 0;
1131 	KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
1132 		 nip->cluster.nchains == 0);
1133 	atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
1134 
1135 	/*
1136 	 * Setup the inode meta-data
1137 	 */
1138 	nip->meta.type = hammer2_get_obj_type(vap->va_type);
1139 
1140 	switch (nip->meta.type) {
1141 	case HAMMER2_OBJTYPE_CDEV:
1142 	case HAMMER2_OBJTYPE_BDEV:
1143 		nip->meta.rmajor = vap->va_rmajor;
1144 		nip->meta.rminor = vap->va_rminor;
1145 		break;
1146 	default:
1147 		break;
1148 	}
1149 
1150 	KKASSERT(nip->meta.inum == inum);
1151 	nip->meta.iparent = pip_inum;
1152 
1153 	/* Inherit parent's inode compression mode. */
1154 	nip->meta.comp_algo = pip_comp_algo;
1155 	nip->meta.check_algo = pip_check_algo;
1156 	nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1157 	hammer2_update_time(&nip->meta.ctime);
1158 	nip->meta.mtime = nip->meta.ctime;
1159 	nip->meta.mode = vap->va_mode;
1160 	nip->meta.nlinks = 1;
1161 
1162 	xuid = hammer2_to_unix_xid(&pip_uid);
1163 	xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
1164 				     xuid, cred,
1165 				     &vap->va_mode);
1166 	if (vap->va_vaflags & VA_UID_UUID_VALID)
1167 		nip->meta.uid = vap->va_uid_uuid;
1168 	else if (vap->va_uid != (uid_t)VNOVAL)
1169 		hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
1170 	else
1171 		hammer2_guid_to_uuid(&nip->meta.uid, xuid);
1172 
1173 	if (vap->va_vaflags & VA_GID_UUID_VALID)
1174 		nip->meta.gid = vap->va_gid_uuid;
1175 	else if (vap->va_gid != (gid_t)VNOVAL)
1176 		hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
1177 	else
1178 		nip->meta.gid = pip_gid;
1179 
1180 	/*
1181 	 * Regular files and softlinks allow a small amount of data to be
1182 	 * directly embedded in the inode.  This flag will be cleared if
1183 	 * the size is extended past the embedded limit.
1184 	 */
1185 	if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1186 	    nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1187 		nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1188 	}
1189 
1190 	/*
1191 	 * Create the inode using (inum) as the key.  Pass pip for
1192 	 * method inheritance.
1193 	 */
1194 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1195 	xop->lhc = inum;
1196 	xop->flags = 0;
1197 	xop->meta = nip->meta;
1198 	KKASSERT(vap);
1199 
1200 	xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
1201 	xop->meta.name_key = inum;
1202 	nip->meta.name_len = xop->meta.name_len;
1203 	nip->meta.name_key = xop->meta.name_key;
1204 	hammer2_inode_modify(nip);
1205 
1206 	/*
1207 	 * Create the inode media chains but leave them detached.  We are
1208 	 * not in a flush transaction so we can't mess with media topology
1209 	 * above normal inodes (i.e. the index of the inodes themselves).
1210 	 *
1211 	 * We've already set the INODE_CREATING flag.  The inode's media
1212 	 * chains will be inserted onto the media topology on the next
1213 	 * filesystem sync.
1214 	 */
1215 	hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
1216 
1217 	error = hammer2_xop_collect(&xop->head, 0);
1218 #if INODE_DEBUG
1219 	kprintf("create inode type %d error %d\n", nip->meta.type, error);
1220 #endif
1221 
1222 	if (error) {
1223 		*errorp = error;
1224 		goto done;
1225 	}
1226 
1227 	/*
1228 	 * Associate the media chains created by the backend with the
1229 	 * frontend inode.
1230 	 */
1231 	hammer2_inode_repoint(nip, &xop->head.cluster);
1232 done:
1233 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1234 	/*hammer2_inode_unlock(dip);*/
1235 
1236 	return (nip);
1237 }
1238 
1239 /*
1240  * Create a directory entry under dip with the specified name, inode number,
1241  * and OBJTYPE (type).
1242  *
1243  * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1244  *
1245  * Caller must hold dip locked.
1246  */
1247 int
1248 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
1249 		      hammer2_key_t inum, uint8_t type)
1250 {
1251 	hammer2_xop_mkdirent_t *xop;
1252 	hammer2_key_t lhc;
1253 	int error;
1254 
1255 	lhc = 0;
1256 	error = 0;
1257 
1258 	KKASSERT(name != NULL);
1259 	lhc = hammer2_dirhash(name, name_len);
1260 
1261 	/*
1262 	 * Locate the inode or indirect block to create the new
1263 	 * entry in.  At the same time check for key collisions
1264 	 * and iterate until we don't get one.
1265 	 *
1266 	 * Lock the directory exclusively for now to guarantee that
1267 	 * we can find an unused lhc for the name.  Due to collisions,
1268 	 * two different creates can end up with the same lhc so we
1269 	 * cannot depend on the OS to prevent the collision.
1270 	 */
1271 	hammer2_inode_modify(dip);
1272 
1273 	/*
1274 	 * If name specified, locate an unused key in the collision space.
1275 	 * Otherwise use the passed-in lhc directly.
1276 	 */
1277 	{
1278 		hammer2_xop_scanlhc_t *sxop;
1279 		hammer2_key_t lhcbase;
1280 
1281 		lhcbase = lhc;
1282 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1283 		sxop->lhc = lhc;
1284 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1285 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1286 			if (lhc != sxop->head.cluster.focus->bref.key)
1287 				break;
1288 			++lhc;
1289 		}
1290 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1291 
1292 		if (error) {
1293 			if (error != HAMMER2_ERROR_ENOENT)
1294 				goto done2;
1295 			++lhc;
1296 			error = 0;
1297 		}
1298 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1299 			error = HAMMER2_ERROR_ENOSPC;
1300 			goto done2;
1301 		}
1302 	}
1303 
1304 	/*
1305 	 * Create the directory entry with the lhc as the key.
1306 	 */
1307 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1308 	xop->lhc = lhc;
1309 	bzero(&xop->dirent, sizeof(xop->dirent));
1310 	xop->dirent.inum = inum;
1311 	xop->dirent.type = type;
1312 	xop->dirent.namlen = name_len;
1313 
1314 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1315 	hammer2_xop_setname(&xop->head, name, name_len);
1316 
1317 	hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
1318 
1319 	error = hammer2_xop_collect(&xop->head, 0);
1320 
1321 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1322 done2:
1323 	error = hammer2_error_to_errno(error);
1324 
1325 	return error;
1326 }
1327 
1328 /*
1329  * Repoint ip->cluster's chains to cluster's chains and fixup the default
1330  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
1331  * filters out invalid or non-matching elements.
1332  *
1333  * Caller must hold the inode and cluster exclusive locked, if not NULL,
1334  * must also be locked.
1335  *
1336  * Cluster may be NULL to clean out any chains in ip->cluster.
1337  */
1338 void
1339 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
1340 {
1341 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1342 	hammer2_chain_t *ochain;
1343 	hammer2_chain_t *nchain;
1344 	int i;
1345 
1346 	bzero(dropch, sizeof(dropch));
1347 
1348 	/*
1349 	 * Replace chains in ip->cluster with chains from cluster and
1350 	 * adjust the focus if necessary.
1351 	 *
1352 	 * NOTE: nchain and/or ochain can be NULL due to gaps
1353 	 *	 in the cluster arrays.
1354 	 */
1355 	hammer2_spin_ex(&ip->cluster_spin);
1356 	for (i = 0; cluster && i < cluster->nchains; ++i) {
1357 		/*
1358 		 * Do not replace elements which are the same.  Also handle
1359 		 * element count discrepancies.
1360 		 */
1361 		nchain = cluster->array[i].chain;
1362 		if (i < ip->cluster.nchains) {
1363 			ochain = ip->cluster.array[i].chain;
1364 			if (ochain == nchain)
1365 				continue;
1366 		} else {
1367 			ochain = NULL;
1368 		}
1369 
1370 		/*
1371 		 * Make adjustments
1372 		 */
1373 		ip->cluster.array[i].chain = nchain;
1374 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1375 		ip->cluster.array[i].flags |= cluster->array[i].flags &
1376 					      HAMMER2_CITEM_INVALID;
1377 		if (nchain)
1378 			hammer2_chain_ref(nchain);
1379 		dropch[i] = ochain;
1380 	}
1381 
1382 	/*
1383 	 * Release any left-over chains in ip->cluster.
1384 	 */
1385 	while (i < ip->cluster.nchains) {
1386 		nchain = ip->cluster.array[i].chain;
1387 		if (nchain) {
1388 			ip->cluster.array[i].chain = NULL;
1389 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1390 		}
1391 		dropch[i] = nchain;
1392 		++i;
1393 	}
1394 
1395 	/*
1396 	 * Fixup fields.  Note that the inode-embedded cluster is never
1397 	 * directly locked.
1398 	 */
1399 	if (cluster) {
1400 		ip->cluster.nchains = cluster->nchains;
1401 		ip->cluster.focus = cluster->focus;
1402 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1403 	} else {
1404 		ip->cluster.nchains = 0;
1405 		ip->cluster.focus = NULL;
1406 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1407 	}
1408 
1409 	hammer2_spin_unex(&ip->cluster_spin);
1410 
1411 	/*
1412 	 * Cleanup outside of spinlock
1413 	 */
1414 	while (--i >= 0) {
1415 		if (dropch[i])
1416 			hammer2_chain_drop(dropch[i]);
1417 	}
1418 }
1419 
1420 /*
1421  * Repoint a single element from the cluster to the ip.  Used by the
1422  * synchronization threads to piecemeal update inodes.  Does not change
1423  * focus and requires inode to be re-locked to clean-up flags (XXX).
1424  */
1425 void
1426 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1427 			  int idx)
1428 {
1429 	hammer2_chain_t *ochain;
1430 	hammer2_chain_t *nchain;
1431 	int i;
1432 
1433 	hammer2_spin_ex(&ip->cluster_spin);
1434 	KKASSERT(idx < cluster->nchains);
1435 	if (idx < ip->cluster.nchains) {
1436 		ochain = ip->cluster.array[idx].chain;
1437 		nchain = cluster->array[idx].chain;
1438 	} else {
1439 		ochain = NULL;
1440 		nchain = cluster->array[idx].chain;
1441 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1442 			bzero(&ip->cluster.array[i],
1443 			      sizeof(ip->cluster.array[i]));
1444 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1445 		}
1446 		ip->cluster.nchains = idx + 1;
1447 	}
1448 	if (ochain != nchain) {
1449 		/*
1450 		 * Make adjustments.
1451 		 */
1452 		ip->cluster.array[idx].chain = nchain;
1453 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1454 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1455 						HAMMER2_CITEM_INVALID;
1456 	}
1457 	hammer2_spin_unex(&ip->cluster_spin);
1458 	if (ochain != nchain) {
1459 		if (nchain)
1460 			hammer2_chain_ref(nchain);
1461 		if (ochain)
1462 			hammer2_chain_drop(ochain);
1463 	}
1464 }
1465 
1466 hammer2_key_t
1467 hammer2_inode_data_count(const hammer2_inode_t *ip)
1468 {
1469 	hammer2_chain_t *chain;
1470 	hammer2_key_t count = 0;
1471 	int i;
1472 
1473 	for (i = 0; i < ip->cluster.nchains; ++i) {
1474 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1475 			if (count < chain->bref.embed.stats.data_count)
1476 				count = chain->bref.embed.stats.data_count;
1477 		}
1478 	}
1479 	return count;
1480 }
1481 
1482 hammer2_key_t
1483 hammer2_inode_inode_count(const hammer2_inode_t *ip)
1484 {
1485 	hammer2_chain_t *chain;
1486 	hammer2_key_t count = 0;
1487 	int i;
1488 
1489 	for (i = 0; i < ip->cluster.nchains; ++i) {
1490 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1491 			if (count < chain->bref.embed.stats.inode_count)
1492 				count = chain->bref.embed.stats.inode_count;
1493 		}
1494 	}
1495 	return count;
1496 }
1497 
1498 /*
1499  * Called with a locked inode to finish unlinking an inode after xop_unlink
1500  * had been run.  This function is responsible for decrementing nlinks.
1501  *
1502  * We don't bother decrementing nlinks if the file is not open and this was
1503  * the last link.
1504  *
1505  * If the inode is a hardlink target it's chain has not yet been deleted,
1506  * otherwise it's chain has been deleted.
1507  *
1508  * If isopen then any prior deletion was not permanent and the inode is
1509  * left intact with nlinks == 0;
1510  */
1511 int
1512 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct vnode **vprecyclep)
1513 {
1514 	struct vnode *vp;
1515 
1516 	/*
1517 	 * Decrement nlinks.  Catch a bad nlinks count here too (e.g. 0 or
1518 	 * negative), and just assume a transition to 0.
1519 	 */
1520 	if ((int64_t)ip->meta.nlinks <= 1) {
1521 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1522 
1523 		/*
1524 		 * Scrap the vnode as quickly as possible.  The vp association
1525 		 * stays intact while we hold the inode locked.  However, vp
1526 		 * can be NULL here.
1527 		 */
1528 		vp = ip->vp;
1529 		cpu_ccfence();
1530 
1531 		/*
1532 		 * If no vp is associated there is no high-level state to
1533 		 * deal with and we can scrap the inode immediately.
1534 		 */
1535 		if (vp == NULL) {
1536 			if ((ip->flags & HAMMER2_INODE_DELETING) == 0) {
1537 				atomic_set_int(&ip->flags,
1538 					       HAMMER2_INODE_DELETING);
1539 				hammer2_inode_delayed_sideq(ip);
1540 			}
1541 			return 0;
1542 		}
1543 
1544 		/*
1545 		 * Because INODE_ISUNLINKED is set with the inode lock
1546 		 * held, the vnode cannot be ripped up from under us.
1547 		 * There may still be refs so knote anyone waiting for
1548 		 * a delete notification.
1549 		 *
1550 		 * The vnode is not necessarily ref'd due to the unlinking
1551 		 * itself, so we have to defer handling to the end of the
1552 		 * VOP, which will then call hammer2_inode_vprecycle().
1553 		 */
1554 		if (vprecyclep) {
1555 			vhold(vp);
1556 			*vprecyclep = vp;
1557 		}
1558 	}
1559 
1560 	/*
1561 	 * Adjust nlinks and retain the inode on the media for now
1562 	 */
1563 	hammer2_inode_modify(ip);
1564 	if ((int64_t)ip->meta.nlinks > 1)
1565 		--ip->meta.nlinks;
1566 	else
1567 		ip->meta.nlinks = 0;
1568 
1569 	return 0;
1570 }
1571 
1572 /*
1573  * Called at the end of a VOP that removes a file with a vnode that
1574  * we want to try to dispose of quickly due to a file deletion.  If
1575  * we don't do this, the vnode can hang around with 0 refs for a very
1576  * long time and prevent reclamation of the underlying file and inode
1577  * (inode remains on-media with nlinks == 0 until the vnode is recycled
1578  * due to random system activity or a umount).
1579  */
1580 void
1581 hammer2_inode_vprecycle(struct vnode *vp)
1582 {
1583 	if (vget(vp, LK_EXCLUSIVE) == 0) {
1584 		vfinalize(vp);
1585 		hammer2_knote(vp, NOTE_DELETE);
1586 		vdrop(vp);
1587 		vput(vp);
1588 	} else {
1589 		vdrop(vp);
1590 	}
1591 }
1592 
1593 
1594 /*
1595  * Mark an inode as being modified, meaning that the caller will modify
1596  * ip->meta.
1597  *
1598  * If a vnode is present we set the vnode dirty and the nominal filesystem
1599  * sync will also handle synchronizing the inode meta-data.  Unless NOSIDEQ
1600  * we must ensure that the inode is on pmp->sideq.
1601  *
1602  * NOTE: We must always queue the inode to the sideq.  This allows H2 to
1603  *	 shortcut vsyncscan() and flush inodes and their related vnodes
1604  *	 in a two stages.  H2 still calls vfsync() for each vnode.
1605  *
1606  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1607  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1608  *	 later when the inode gets flushed.
1609  *
1610  * NOTE: As an exception to the general rule, the inode MAY be locked
1611  *	 shared for this particular call.
1612  */
1613 void
1614 hammer2_inode_modify(hammer2_inode_t *ip)
1615 {
1616 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1617 	if (ip->vp)
1618 		vsetisdirty(ip->vp);
1619 	if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
1620 		hammer2_inode_delayed_sideq(ip);
1621 }
1622 
1623 /*
1624  * Synchronize the inode's frontend state with the chain state prior
1625  * to any explicit flush of the inode or any strategy write call.  This
1626  * does not flush the inode's chain or its sub-topology to media (higher
1627  * level layers are responsible for doing that).
1628  *
1629  * Called with a locked inode inside a normal transaction.
1630  *
1631  * inode must be locked.
1632  */
1633 int
1634 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1635 {
1636 	int error;
1637 
1638 	error = 0;
1639 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1640 		hammer2_xop_fsync_t *xop;
1641 
1642 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1643 		xop->clear_directdata = 0;
1644 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1645 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1646 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1647 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1648 				xop->clear_directdata = 1;
1649 			}
1650 			xop->osize = ip->osize;
1651 		} else {
1652 			xop->osize = ip->meta.size;	/* safety */
1653 		}
1654 		xop->ipflags = ip->flags;
1655 		xop->meta = ip->meta;
1656 
1657 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1658 					     HAMMER2_INODE_MODIFIED);
1659 		hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
1660 		error = hammer2_xop_collect(&xop->head, 0);
1661 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1662 		if (error == HAMMER2_ERROR_ENOENT)
1663 			error = 0;
1664 		if (error) {
1665 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1666 			/*
1667 			atomic_set_int(&ip->flags,
1668 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1669 						       HAMMER2_INODE_MODIFIED));
1670 			*/
1671 			/* XXX return error somehow? */
1672 		}
1673 	}
1674 	return error;
1675 }
1676 
1677 /*
1678  * When an inode is flagged INODE_CREATING its chains have not actually
1679  * been inserting into the on-media tree yet.
1680  */
1681 int
1682 hammer2_inode_chain_ins(hammer2_inode_t *ip)
1683 {
1684 	int error;
1685 
1686 	error = 0;
1687 	if (ip->flags & HAMMER2_INODE_CREATING) {
1688 		hammer2_xop_create_t *xop;
1689 
1690 		atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
1691 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1692 		xop->lhc = ip->meta.inum;
1693 		xop->flags = 0;
1694 		hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
1695 		error = hammer2_xop_collect(&xop->head, 0);
1696 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1697 		if (error == HAMMER2_ERROR_ENOENT)
1698 			error = 0;
1699 		if (error) {
1700 			kprintf("hammer2: backend unable to "
1701 				"insert inode %p %ld\n", ip, (long)ip->meta.inum);
1702 			/* XXX return error somehow? */
1703 		}
1704 	}
1705 	return error;
1706 }
1707 
1708 /*
1709  * When an inode is flagged INODE_DELETING it has been deleted (no directory
1710  * entry or open refs are left, though as an optimization H2 might leave
1711  * nlinks == 1 to avoid unnecessary block updates).  The backend flush then
1712  * needs to actually remove it from the topology.
1713  *
1714  * NOTE: backend flush must still sync and flush the deleted inode to clean
1715  *	 out related chains.
1716  *
1717  * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
1718  *	 to prevent the vnode reclaim code from trying to delete it twice.
1719  */
1720 int
1721 hammer2_inode_chain_des(hammer2_inode_t *ip)
1722 {
1723 	int error;
1724 
1725 	error = 0;
1726 	if (ip->flags & HAMMER2_INODE_DELETING) {
1727 		hammer2_xop_destroy_t *xop;
1728 
1729 		atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING |
1730 					     HAMMER2_INODE_ISUNLINKED);
1731 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1732 		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1733 		error = hammer2_xop_collect(&xop->head, 0);
1734 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1735 
1736 		if (error == HAMMER2_ERROR_ENOENT)
1737 			error = 0;
1738 		if (error) {
1739 			kprintf("hammer2: backend unable to "
1740 				"delete inode %p %ld\n", ip, (long)ip->meta.inum);
1741 			/* XXX return error somehow? */
1742 		}
1743 	}
1744 	return error;
1745 }
1746 
1747 /*
1748  * Flushes the inode's chain and its sub-topology to media.  Interlocks
1749  * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush.  Any strategy
1750  * function creating or modifying a chain under this inode will re-set the
1751  * flag.
1752  *
1753  * inode must be locked.
1754  */
1755 int
1756 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
1757 {
1758 	hammer2_xop_fsync_t *xop;
1759 	int error;
1760 
1761 	atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1762 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
1763 	hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
1764 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1765 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1766 	if (error == HAMMER2_ERROR_ENOENT)
1767 		error = 0;
1768 
1769 	return error;
1770 }
1771