xref: /dragonfly/sys/vfs/hammer2/hammer2_inode.c (revision b8c93cad)
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41 
42 #include "hammer2.h"
43 
44 #define INODE_DEBUG	0
45 
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 	     hammer2_tid_t, meta.inum);
48 
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52 	if (ip1->meta.inum < ip2->meta.inum)
53 		return(-1);
54 	if (ip1->meta.inum > ip2->meta.inum)
55 		return(1);
56 	return(0);
57 }
58 
59 static
60 void
61 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
62 {
63 	hammer2_inode_sideq_t *ipul;
64 	hammer2_pfs_t *pmp = ip->pmp;
65 
66 	if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
67 		ipul = kmalloc(sizeof(*ipul), pmp->minode,
68 			       M_WAITOK | M_ZERO);
69 		ipul->ip = ip;
70 		hammer2_spin_ex(&pmp->list_spin);
71 		if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
72 			hammer2_inode_ref(ip);
73 			atomic_set_int(&ip->flags,
74 				       HAMMER2_INODE_ONSIDEQ);
75 			TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
76 			hammer2_spin_unex(&pmp->list_spin);
77 		} else {
78 			hammer2_spin_unex(&pmp->list_spin);
79 			kfree(ipul, pmp->minode);
80 		}
81 	}
82 }
83 
84 /*
85  * HAMMER2 inode locks
86  *
87  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
88  * flags for options:
89  *
90  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
91  *	  inode locking function will automatically set the RDONLY flag.
92  *
93  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
94  *	  Most front-end inode locks do.
95  *
96  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
97  *	  the inode data be resolved.  This is used by the syncthr because
98  *	  it can run on an unresolved/out-of-sync cluster, and also by the
99  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
100  *	  disposing of hundreds of thousands of cached vnodes).
101  *
102  * The inode locking function locks the inode itself, resolves any stale
103  * chains in the inode's cluster, and allocates a fresh copy of the
104  * cluster with 1 ref and all the underlying chains locked.
105  *
106  * ip->cluster will be stable while the inode is locked.
107  *
108  * NOTE: We don't combine the inode/chain lock because putting away an
109  *       inode would otherwise confuse multiple lock holders of the inode.
110  *
111  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
112  *	 and never point to a hardlink pointer.
113  *
114  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
115  *	 will feel free to reduce the chain set in the cluster as an
116  *	 optimization.  It will still be validated against the quorum if
117  *	 appropriate, but the optimization might be able to reduce data
118  *	 accesses to one node.  This flag is automatically set if the inode
119  *	 is locked with HAMMER2_RESOLVE_SHARED.
120  */
121 void
122 hammer2_inode_lock(hammer2_inode_t *ip, int how)
123 {
124 	hammer2_inode_ref(ip);
125 
126 	/*
127 	 * Inode structure mutex
128 	 */
129 	if (how & HAMMER2_RESOLVE_SHARED) {
130 		/*how |= HAMMER2_RESOLVE_RDONLY; not used */
131 		hammer2_mtx_sh(&ip->lock);
132 	} else {
133 		hammer2_mtx_ex(&ip->lock);
134 	}
135 }
136 
137 /*
138  * Select a chain out of an inode's cluster and lock it.
139  *
140  * The inode does not have to be locked.
141  */
142 hammer2_chain_t *
143 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
144 {
145 	hammer2_chain_t *chain;
146 	hammer2_cluster_t *cluster;
147 
148 	hammer2_spin_sh(&ip->cluster_spin);
149 #if 0
150 	cluster = ip->cluster_cache;
151 	if (cluster) {
152 		if (clindex >= cluster->nchains)
153 			chain = NULL;
154 		else
155 			chain = cluster->array[clindex].chain;
156 		if (chain) {
157 			hammer2_chain_ref(chain);
158 			hammer2_spin_unsh(&ip->cluster_spin);
159 			hammer2_chain_lock(chain, how);
160 			return chain;
161 		}
162 	}
163 #endif
164 
165 	cluster = &ip->cluster;
166 	if (clindex >= cluster->nchains)
167 		chain = NULL;
168 	else
169 		chain = cluster->array[clindex].chain;
170 	if (chain) {
171 		hammer2_chain_ref(chain);
172 		hammer2_spin_unsh(&ip->cluster_spin);
173 		hammer2_chain_lock(chain, how);
174 	} else {
175 		hammer2_spin_unsh(&ip->cluster_spin);
176 	}
177 	return chain;
178 }
179 
180 hammer2_chain_t *
181 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
182 			       hammer2_chain_t **parentp, int how)
183 {
184 	hammer2_chain_t *chain;
185 	hammer2_chain_t *parent;
186 
187 	for (;;) {
188 		hammer2_spin_sh(&ip->cluster_spin);
189 		if (clindex >= ip->cluster.nchains)
190 			chain = NULL;
191 		else
192 			chain = ip->cluster.array[clindex].chain;
193 		if (chain) {
194 			hammer2_chain_ref(chain);
195 			hammer2_spin_unsh(&ip->cluster_spin);
196 			hammer2_chain_lock(chain, how);
197 		} else {
198 			hammer2_spin_unsh(&ip->cluster_spin);
199 		}
200 
201 		/*
202 		 * Get parent, lock order must be (parent, chain).
203 		 */
204 		parent = chain->parent;
205 		if (parent) {
206 			hammer2_chain_ref(parent);
207 			hammer2_chain_unlock(chain);
208 			hammer2_chain_lock(parent, how);
209 			hammer2_chain_lock(chain, how);
210 		}
211 		if (ip->cluster.array[clindex].chain == chain &&
212 		    chain->parent == parent) {
213 			break;
214 		}
215 
216 		/*
217 		 * Retry
218 		 */
219 		hammer2_chain_unlock(chain);
220 		hammer2_chain_drop(chain);
221 		if (parent) {
222 			hammer2_chain_unlock(parent);
223 			hammer2_chain_drop(parent);
224 		}
225 	}
226 	*parentp = parent;
227 
228 	return chain;
229 }
230 
231 void
232 hammer2_inode_unlock(hammer2_inode_t *ip)
233 {
234 	hammer2_mtx_unlock(&ip->lock);
235 	hammer2_inode_drop(ip);
236 }
237 
238 /*
239  * Temporarily release a lock held shared or exclusive.  Caller must
240  * hold the lock shared or exclusive on call and lock will be released
241  * on return.
242  *
243  * Restore a lock that was temporarily released.
244  */
245 hammer2_mtx_state_t
246 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
247 {
248 	return hammer2_mtx_temp_release(&ip->lock);
249 }
250 
251 void
252 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
253 {
254 	hammer2_mtx_temp_restore(&ip->lock, ostate);
255 }
256 
257 /*
258  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
259  * is already held exclusively this is a NOP.
260  *
261  * The caller MUST hold the inode lock either shared or exclusive on call
262  * and will own the lock exclusively on return.
263  *
264  * Returns non-zero if the lock was already exclusive prior to the upgrade.
265  */
266 int
267 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
268 {
269 	int wasexclusive;
270 
271 	if (mtx_islocked_ex(&ip->lock)) {
272 		wasexclusive = 1;
273 	} else {
274 		hammer2_mtx_unlock(&ip->lock);
275 		hammer2_mtx_ex(&ip->lock);
276 		wasexclusive = 0;
277 	}
278 	return wasexclusive;
279 }
280 
281 /*
282  * Downgrade an inode lock from exclusive to shared only if the inode
283  * lock was previously shared.  If the inode lock was previously exclusive,
284  * this is a NOP.
285  */
286 void
287 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
288 {
289 	if (wasexclusive == 0)
290 		mtx_downgrade(&ip->lock);
291 }
292 
293 /*
294  * Lookup an inode by inode number
295  */
296 hammer2_inode_t *
297 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
298 {
299 	hammer2_inode_t *ip;
300 
301 	KKASSERT(pmp);
302 	if (pmp->spmp_hmp) {
303 		ip = NULL;
304 	} else {
305 		hammer2_spin_ex(&pmp->inum_spin);
306 		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
307 		if (ip)
308 			hammer2_inode_ref(ip);
309 		hammer2_spin_unex(&pmp->inum_spin);
310 	}
311 	return(ip);
312 }
313 
314 /*
315  * Adding a ref to an inode is only legal if the inode already has at least
316  * one ref.
317  *
318  * (can be called with spinlock held)
319  */
320 void
321 hammer2_inode_ref(hammer2_inode_t *ip)
322 {
323 	atomic_add_int(&ip->refs, 1);
324 	if (hammer2_debug & 0x80000) {
325 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
326 		print_backtrace(8);
327 	}
328 }
329 
330 /*
331  * Drop an inode reference, freeing the inode when the last reference goes
332  * away.
333  */
334 void
335 hammer2_inode_drop(hammer2_inode_t *ip)
336 {
337 	hammer2_pfs_t *pmp;
338 	u_int refs;
339 
340 	while (ip) {
341 		if (hammer2_debug & 0x80000) {
342 			kprintf("INODE-1 %p (%d->%d)\n",
343 				ip, ip->refs, ip->refs - 1);
344 			print_backtrace(8);
345 		}
346 		refs = ip->refs;
347 		cpu_ccfence();
348 		if (refs == 1) {
349 			/*
350 			 * Transition to zero, must interlock with
351 			 * the inode inumber lookup tree (if applicable).
352 			 * It should not be possible for anyone to race
353 			 * the transition to 0.
354 			 */
355 			pmp = ip->pmp;
356 			KKASSERT(pmp);
357 			hammer2_spin_ex(&pmp->inum_spin);
358 
359 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
360 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
361 				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
362 					atomic_clear_int(&ip->flags,
363 						     HAMMER2_INODE_ONRBTREE);
364 					RB_REMOVE(hammer2_inode_tree,
365 						  &pmp->inum_tree, ip);
366 				}
367 				hammer2_spin_unex(&pmp->inum_spin);
368 
369 				ip->pmp = NULL;
370 
371 #if 0
372 				/*
373 				 * Clean out the cluster cache
374 				 */
375 				hammer2_cluster_t *tmpclu;
376 				tmpclu = ip->cluster_cache;
377 				if (tmpclu) {
378 					ip->cluster_cache = NULL;
379 					hammer2_cluster_drop(tmpclu);
380 				}
381 #endif
382 
383 				/*
384 				 * Cleaning out ip->cluster isn't entirely
385 				 * trivial.
386 				 */
387 				hammer2_inode_repoint(ip, NULL, NULL);
388 
389 				kfree(ip, pmp->minode);
390 				atomic_add_long(&pmp->inmem_inodes, -1);
391 				ip = NULL;	/* will terminate loop */
392 			} else {
393 				hammer2_spin_unex(&ip->pmp->inum_spin);
394 			}
395 		} else {
396 			/*
397 			 * Non zero transition
398 			 */
399 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
400 				break;
401 		}
402 	}
403 }
404 
405 /*
406  * Get the vnode associated with the given inode, allocating the vnode if
407  * necessary.  The vnode will be returned exclusively locked.
408  *
409  * The caller must lock the inode (shared or exclusive).
410  *
411  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
412  * races.
413  */
414 struct vnode *
415 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
416 {
417 	hammer2_pfs_t *pmp;
418 	struct vnode *vp;
419 
420 	pmp = ip->pmp;
421 	KKASSERT(pmp != NULL);
422 	*errorp = 0;
423 
424 	for (;;) {
425 		/*
426 		 * Attempt to reuse an existing vnode assignment.  It is
427 		 * possible to race a reclaim so the vget() may fail.  The
428 		 * inode must be unlocked during the vget() to avoid a
429 		 * deadlock against a reclaim.
430 		 */
431 		int wasexclusive;
432 
433 		vp = ip->vp;
434 		if (vp) {
435 			/*
436 			 * Inode must be unlocked during the vget() to avoid
437 			 * possible deadlocks, but leave the ip ref intact.
438 			 *
439 			 * vnode is held to prevent destruction during the
440 			 * vget().  The vget() can still fail if we lost
441 			 * a reclaim race on the vnode.
442 			 */
443 			hammer2_mtx_state_t ostate;
444 
445 			vhold(vp);
446 			ostate = hammer2_inode_lock_temp_release(ip);
447 			if (vget(vp, LK_EXCLUSIVE)) {
448 				vdrop(vp);
449 				hammer2_inode_lock_temp_restore(ip, ostate);
450 				continue;
451 			}
452 			hammer2_inode_lock_temp_restore(ip, ostate);
453 			vdrop(vp);
454 			/* vp still locked and ref from vget */
455 			if (ip->vp != vp) {
456 				kprintf("hammer2: igetv race %p/%p\n",
457 					ip->vp, vp);
458 				vput(vp);
459 				continue;
460 			}
461 			*errorp = 0;
462 			break;
463 		}
464 
465 		/*
466 		 * No vnode exists, allocate a new vnode.  Beware of
467 		 * allocation races.  This function will return an
468 		 * exclusively locked and referenced vnode.
469 		 */
470 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
471 		if (*errorp) {
472 			kprintf("hammer2: igetv getnewvnode failed %d\n",
473 				*errorp);
474 			vp = NULL;
475 			break;
476 		}
477 
478 		/*
479 		 * Lock the inode and check for an allocation race.
480 		 */
481 		wasexclusive = hammer2_inode_lock_upgrade(ip);
482 		if (ip->vp != NULL) {
483 			vp->v_type = VBAD;
484 			vx_put(vp);
485 			hammer2_inode_lock_downgrade(ip, wasexclusive);
486 			continue;
487 		}
488 
489 		switch (ip->meta.type) {
490 		case HAMMER2_OBJTYPE_DIRECTORY:
491 			vp->v_type = VDIR;
492 			break;
493 		case HAMMER2_OBJTYPE_REGFILE:
494 			vp->v_type = VREG;
495 			vinitvmio(vp, ip->meta.size,
496 				  HAMMER2_LBUFSIZE,
497 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
498 			break;
499 		case HAMMER2_OBJTYPE_SOFTLINK:
500 			/*
501 			 * XXX for now we are using the generic file_read
502 			 * and file_write code so we need a buffer cache
503 			 * association.
504 			 */
505 			vp->v_type = VLNK;
506 			vinitvmio(vp, ip->meta.size,
507 				  HAMMER2_LBUFSIZE,
508 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
509 			break;
510 		case HAMMER2_OBJTYPE_CDEV:
511 			vp->v_type = VCHR;
512 			/* fall through */
513 		case HAMMER2_OBJTYPE_BDEV:
514 			vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
515 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
516 				vp->v_type = VBLK;
517 			addaliasu(vp,
518 				  ip->meta.rmajor,
519 				  ip->meta.rminor);
520 			break;
521 		case HAMMER2_OBJTYPE_FIFO:
522 			vp->v_type = VFIFO;
523 			vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
524 			break;
525 		case HAMMER2_OBJTYPE_SOCKET:
526 			vp->v_type = VSOCK;
527 			break;
528 		default:
529 			panic("hammer2: unhandled objtype %d",
530 			      ip->meta.type);
531 			break;
532 		}
533 
534 		if (ip == pmp->iroot)
535 			vsetflags(vp, VROOT);
536 
537 		vp->v_data = ip;
538 		ip->vp = vp;
539 		hammer2_inode_ref(ip);		/* vp association */
540 		hammer2_inode_lock_downgrade(ip, wasexclusive);
541 		break;
542 	}
543 
544 	/*
545 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
546 	 */
547 	if (hammer2_debug & 0x0002) {
548 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
549 			vp, vp->v_refcnt, vp->v_auxrefs);
550 	}
551 	return (vp);
552 }
553 
554 /*
555  * Returns the inode associated with the passed-in cluster, creating the
556  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
557  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
558  * Otherwise the whole cluster is synchronized.
559  *
560  * The passed-in cluster must be locked and will remain locked on return.
561  * The returned inode will be locked and the caller may dispose of both
562  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
563  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
564  *
565  * The hammer2_inode structure regulates the interface between the high level
566  * kernel VNOPS API and the filesystem backend (the chains).
567  *
568  * On return the inode is locked with the supplied cluster.
569  */
570 hammer2_inode_t *
571 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
572 		  hammer2_cluster_t *cluster, int idx)
573 {
574 	hammer2_inode_t *nip;
575 	const hammer2_inode_data_t *iptmp;
576 	const hammer2_inode_data_t *nipdata;
577 
578 	KKASSERT(cluster == NULL ||
579 		 hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
580 	KKASSERT(pmp);
581 
582 	/*
583 	 * Interlocked lookup/ref of the inode.  This code is only needed
584 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
585 	 * otherwise and test for duplicates).
586 	 *
587 	 * Cluster can be NULL during the initial pfs allocation.
588 	 */
589 again:
590 	while (cluster) {
591 		iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
592 		nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
593 		if (nip == NULL)
594 			break;
595 
596 		hammer2_mtx_ex(&nip->lock);
597 
598 		/*
599 		 * Handle SMP race (not applicable to the super-root spmp
600 		 * which can't index inodes due to duplicative inode numbers).
601 		 */
602 		if (pmp->spmp_hmp == NULL &&
603 		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
604 			hammer2_mtx_unlock(&nip->lock);
605 			hammer2_inode_drop(nip);
606 			continue;
607 		}
608 		if (idx >= 0)
609 			hammer2_inode_repoint_one(nip, cluster, idx);
610 		else
611 			hammer2_inode_repoint(nip, NULL, cluster);
612 
613 		return nip;
614 	}
615 
616 	/*
617 	 * We couldn't find the inode number, create a new inode.
618 	 */
619 	nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
620 	spin_init(&nip->cluster_spin, "h2clspin");
621 	atomic_add_long(&pmp->inmem_inodes, 1);
622 	hammer2_pfs_memory_inc(pmp);
623 	hammer2_pfs_memory_wakeup(pmp);
624 	if (pmp->spmp_hmp)
625 		nip->flags = HAMMER2_INODE_SROOT;
626 
627 	/*
628 	 * Initialize nip's cluster.  A cluster is provided for normal
629 	 * inodes but typically not for the super-root or PFS inodes.
630 	 */
631 	nip->cluster.refs = 1;
632 	nip->cluster.pmp = pmp;
633 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
634 	if (cluster) {
635 		nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
636 		nip->meta = nipdata->meta;
637 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
638 		hammer2_inode_repoint(nip, NULL, cluster);
639 	} else {
640 		nip->meta.inum = 1;		/* PFS inum is always 1 XXX */
641 		/* mtime will be updated when a cluster is available */
642 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
643 	}
644 
645 	nip->pmp = pmp;
646 
647 	/*
648 	 * ref and lock on nip gives it state compatible to after a
649 	 * hammer2_inode_lock() call.
650 	 */
651 	nip->refs = 1;
652 	hammer2_mtx_init(&nip->lock, "h2inode");
653 	hammer2_mtx_ex(&nip->lock);
654 	/* combination of thread lock and chain lock == inode lock */
655 
656 	/*
657 	 * Attempt to add the inode.  If it fails we raced another inode
658 	 * get.  Undo all the work and try again.
659 	 */
660 	if (pmp->spmp_hmp == NULL) {
661 		hammer2_spin_ex(&pmp->inum_spin);
662 		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
663 			hammer2_spin_unex(&pmp->inum_spin);
664 			hammer2_mtx_unlock(&nip->lock);
665 			hammer2_inode_drop(nip);
666 			goto again;
667 		}
668 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
669 		hammer2_spin_unex(&pmp->inum_spin);
670 	}
671 
672 	return (nip);
673 }
674 
675 /*
676  * Create a new inode in the specified directory using the vattr to
677  * figure out the type.  A non-zero type field overrides vattr.
678  *
679  * If no error occurs the new inode with its cluster locked is returned.
680  * However, when creating an OBJTYPE_HARDLINK, the caller can assume
681  * that NULL will be returned (that is, the caller already has the inode
682  * in-hand and is creating a hardlink to it, we do not need to return a
683  * representitive ip).
684  *
685  * If vap and/or cred are NULL the related fields are not set and the
686  * inode type defaults to a directory.  This is used when creating PFSs
687  * under the super-root, so the inode number is set to 1 in this case.
688  *
689  * dip is not locked on entry.
690  *
691  * NOTE: This function is used to create all manners of inodes, including
692  *	 super-root entries for snapshots and PFSs.  When used to create a
693  *	 snapshot the inode will be temporarily associated with the spmp.
694  *
695  * NOTE: When creating a normal file or directory the caller must call this
696  *	 function twice, once to create the actual inode and once to create
697  *	 the hardlink representing the directory entry.  This function is
698  *	 only called once when creating a softlink.  The softlink itself.
699  *
700  * NOTE: When creating a hardlink target (a real inode), name/name_len is
701  *	 passed as NULL/0, and caller should pass lhc as inum.
702  */
703 hammer2_inode_t *
704 hammer2_inode_create(hammer2_inode_t *dip, hammer2_inode_t *pip,
705 		     struct vattr *vap, struct ucred *cred,
706 		     const uint8_t *name, size_t name_len, hammer2_key_t lhc,
707 		     hammer2_key_t inum,
708 		     uint8_t type, uint8_t target_type,
709 		     int flags, int *errorp)
710 {
711 	hammer2_xop_create_t *xop;
712 	hammer2_inode_t *nip;
713 	int error;
714 	uid_t xuid;
715 	uuid_t pip_uid;
716 	uuid_t pip_gid;
717 	uint32_t pip_mode;
718 	uint8_t pip_comp_algo;
719 	uint8_t pip_check_algo;
720 	hammer2_tid_t pip_inum;
721 
722 	if (name)
723 		lhc = hammer2_dirhash(name, name_len);
724 	*errorp = 0;
725 	nip = NULL;
726 
727 	/*
728 	 * Locate the inode or indirect block to create the new
729 	 * entry in.  At the same time check for key collisions
730 	 * and iterate until we don't get one.
731 	 *
732 	 * Lock the directory exclusively for now to guarantee that
733 	 * we can find an unused lhc for the name.  Due to collisions,
734 	 * two different creates can end up with the same lhc so we
735 	 * cannot depend on the OS to prevent the collision.
736 	 */
737 	hammer2_inode_lock(dip, 0);
738 
739 	pip_uid = pip->meta.uid;
740 	pip_gid = pip->meta.gid;
741 	pip_mode = pip->meta.mode;
742 	pip_comp_algo = pip->meta.comp_algo;
743 	pip_check_algo = pip->meta.check_algo;
744 	pip_inum = (pip == pip->pmp->iroot) ? 0 : pip->meta.inum;
745 
746 	/*
747 	 * If name specified, locate an unused key in the collision space.
748 	 * Otherwise use the passed-in lhc directly.
749 	 */
750 	if (name) {
751 		hammer2_xop_scanlhc_t *sxop;
752 		hammer2_key_t lhcbase;
753 
754 		lhcbase = lhc;
755 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
756 		sxop->lhc = lhc;
757 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
758 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
759 			if (lhc != sxop->head.cluster.focus->bref.key)
760 				break;
761 			++lhc;
762 		}
763 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
764 
765 		if (error) {
766 			if (error != ENOENT)
767 				goto done2;
768 			++lhc;
769 			error = 0;
770 		}
771 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
772 			error = ENOSPC;
773 			goto done2;
774 		}
775 	}
776 
777 	/*
778 	 * Create the inode with the lhc as the key.
779 	 */
780 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
781 	xop->lhc = lhc;
782 	xop->flags = flags;
783 	bzero(&xop->meta, sizeof(xop->meta));
784 
785 	if (vap) {
786 		xop->meta.type = hammer2_get_obj_type(vap->va_type);
787 
788 		switch (xop->meta.type) {
789 		case HAMMER2_OBJTYPE_CDEV:
790 		case HAMMER2_OBJTYPE_BDEV:
791 			xop->meta.rmajor = vap->va_rmajor;
792 			xop->meta.rminor = vap->va_rminor;
793 			break;
794 		default:
795 			break;
796 		}
797 		type = xop->meta.type;
798 	} else {
799 		xop->meta.type = type;
800 		xop->meta.target_type = target_type;
801 	}
802 	xop->meta.inum = inum;
803 	xop->meta.iparent = pip_inum;
804 
805 	/* Inherit parent's inode compression mode. */
806 	xop->meta.comp_algo = pip_comp_algo;
807 	xop->meta.check_algo = pip_check_algo;
808 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
809 	hammer2_update_time(&xop->meta.ctime);
810 	xop->meta.mtime = xop->meta.ctime;
811 	if (vap)
812 		xop->meta.mode = vap->va_mode;
813 	xop->meta.nlinks = 1;
814 	if (vap) {
815 		if (dip->pmp) {
816 			xuid = hammer2_to_unix_xid(&pip_uid);
817 			xuid = vop_helper_create_uid(dip->pmp->mp,
818 						     pip_mode,
819 						     xuid,
820 						     cred,
821 						     &vap->va_mode);
822 		} else {
823 			/* super-root has no dip and/or pmp */
824 			xuid = 0;
825 		}
826 		if (vap->va_vaflags & VA_UID_UUID_VALID)
827 			xop->meta.uid = vap->va_uid_uuid;
828 		else if (vap->va_uid != (uid_t)VNOVAL)
829 			hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
830 		else
831 			hammer2_guid_to_uuid(&xop->meta.uid, xuid);
832 
833 		if (vap->va_vaflags & VA_GID_UUID_VALID)
834 			xop->meta.gid = vap->va_gid_uuid;
835 		else if (vap->va_gid != (gid_t)VNOVAL)
836 			hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
837 		else
838 			xop->meta.gid = pip_gid;
839 	}
840 
841 	/*
842 	 * Regular files and softlinks allow a small amount of data to be
843 	 * directly embedded in the inode.  This flag will be cleared if
844 	 * the size is extended past the embedded limit.
845 	 */
846 	if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
847 	    xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
848 	    xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
849 		xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
850 	}
851 	if (name) {
852 		hammer2_xop_setname(&xop->head, name, name_len);
853 	} else {
854 		name_len = hammer2_xop_setname_inum(&xop->head, inum);
855 		KKASSERT(lhc == inum);
856 	}
857 	xop->meta.name_len = name_len;
858 	xop->meta.name_key = lhc;
859 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
860 
861 	hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
862 
863 	error = hammer2_xop_collect(&xop->head, 0);
864 #if INODE_DEBUG
865 	kprintf("CREATE INODE %*.*s\n",
866 		(int)name_len, (int)name_len, name);
867 #endif
868 
869 	if (error) {
870 		*errorp = error;
871 		goto done;
872 	}
873 
874 	/*
875 	 * Set up the new inode if not a hardlink pointer.
876 	 *
877 	 * NOTE: *_get() integrates chain's lock into the inode lock.
878 	 *
879 	 * NOTE: Only one new inode can currently be created per
880 	 *	 transaction.  If the need arises we can adjust
881 	 *	 hammer2_trans_init() to allow more.
882 	 *
883 	 * NOTE: nipdata will have chain's blockset data.
884 	 */
885 	if (type != HAMMER2_OBJTYPE_HARDLINK) {
886 		nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
887 		nip->comp_heuristic = 0;
888 	} else {
889 		nip = NULL;
890 	}
891 
892 done:
893 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
894 done2:
895 	hammer2_inode_unlock(dip);
896 
897 	return (nip);
898 }
899 
900 /*
901  * Repoint ip->cluster's chains to cluster's chains and fixup the default
902  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
903  * filters out invalid or non-matching elements.
904  *
905  * Caller must hold the inode and cluster exclusive locked, if not NULL,
906  * must also be locked.
907  *
908  * Cluster may be NULL to clean out any chains in ip->cluster.
909  */
910 void
911 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
912 		      hammer2_cluster_t *cluster)
913 {
914 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
915 	hammer2_chain_t *ochain;
916 	hammer2_chain_t *nchain;
917 	int i;
918 
919 	bzero(dropch, sizeof(dropch));
920 
921 	/*
922 	 * Replace chains in ip->cluster with chains from cluster and
923 	 * adjust the focus if necessary.
924 	 *
925 	 * NOTE: nchain and/or ochain can be NULL due to gaps
926 	 *	 in the cluster arrays.
927 	 */
928 	hammer2_spin_ex(&ip->cluster_spin);
929 	for (i = 0; cluster && i < cluster->nchains; ++i) {
930 		/*
931 		 * Do not replace elements which are the same.  Also handle
932 		 * element count discrepancies.
933 		 */
934 		nchain = cluster->array[i].chain;
935 		if (i < ip->cluster.nchains) {
936 			ochain = ip->cluster.array[i].chain;
937 			if (ochain == nchain)
938 				continue;
939 		} else {
940 			ochain = NULL;
941 		}
942 
943 		/*
944 		 * Make adjustments
945 		 */
946 		ip->cluster.array[i].chain = nchain;
947 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
948 		ip->cluster.array[i].flags |= cluster->array[i].flags &
949 					      HAMMER2_CITEM_INVALID;
950 		if (nchain)
951 			hammer2_chain_ref(nchain);
952 		dropch[i] = ochain;
953 	}
954 
955 	/*
956 	 * Release any left-over chains in ip->cluster.
957 	 */
958 	while (i < ip->cluster.nchains) {
959 		nchain = ip->cluster.array[i].chain;
960 		if (nchain) {
961 			ip->cluster.array[i].chain = NULL;
962 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
963 		}
964 		dropch[i] = nchain;
965 		++i;
966 	}
967 
968 	/*
969 	 * Fixup fields.  Note that the inode-embedded cluster is never
970 	 * directly locked.
971 	 */
972 	if (cluster) {
973 		ip->cluster.nchains = cluster->nchains;
974 		ip->cluster.focus = cluster->focus;
975 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
976 	} else {
977 		ip->cluster.nchains = 0;
978 		ip->cluster.focus = NULL;
979 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
980 	}
981 
982 	hammer2_spin_unex(&ip->cluster_spin);
983 
984 	/*
985 	 * Cleanup outside of spinlock
986 	 */
987 	while (--i >= 0) {
988 		if (dropch[i])
989 			hammer2_chain_drop(dropch[i]);
990 	}
991 }
992 
993 /*
994  * Repoint a single element from the cluster to the ip.  Used by the
995  * synchronization threads to piecemeal update inodes.  Does not change
996  * focus and requires inode to be re-locked to clean-up flags (XXX).
997  */
998 void
999 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1000 			  int idx)
1001 {
1002 	hammer2_chain_t *ochain;
1003 	hammer2_chain_t *nchain;
1004 	int i;
1005 
1006 	hammer2_spin_ex(&ip->cluster_spin);
1007 	KKASSERT(idx < cluster->nchains);
1008 	if (idx < ip->cluster.nchains) {
1009 		ochain = ip->cluster.array[idx].chain;
1010 		nchain = cluster->array[idx].chain;
1011 	} else {
1012 		ochain = NULL;
1013 		nchain = cluster->array[idx].chain;
1014 		ip->cluster.nchains = idx + 1;
1015 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1016 			bzero(&ip->cluster.array[i],
1017 			      sizeof(ip->cluster.array[i]));
1018 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1019 		}
1020 	}
1021 	if (ochain != nchain) {
1022 		/*
1023 		 * Make adjustments.
1024 		 */
1025 		ip->cluster.array[idx].chain = nchain;
1026 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1027 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1028 						HAMMER2_CITEM_INVALID;
1029 	}
1030 	hammer2_spin_unex(&ip->cluster_spin);
1031 	if (ochain != nchain) {
1032 		if (nchain)
1033 			hammer2_chain_ref(nchain);
1034 		if (ochain)
1035 			hammer2_chain_drop(ochain);
1036 	}
1037 }
1038 
1039 /*
1040  * Called with a locked inode to finish unlinking an inode after xop_unlink
1041  * had been run.  This function is responsible for decrementing nlinks.
1042  *
1043  * We don't bother decrementing nlinks if the file is not open and this was
1044  * the last link.
1045  *
1046  * If the inode is a hardlink target it's chain has not yet been deleted,
1047  * otherwise it's chain has been deleted.
1048  *
1049  * If isopen then any prior deletion was not permanent and the inode is
1050  * left intact with nlinks == 0;
1051  */
1052 int
1053 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1054 {
1055 	hammer2_pfs_t *pmp;
1056 	int error;
1057 
1058 	pmp = ip->pmp;
1059 
1060 	/*
1061 	 * Decrement nlinks.  If this is the last link and the file is
1062 	 * not open we can just delete the inode and not bother dropping
1063 	 * nlinks to 0 (avoiding unnecessary block updates).
1064 	 */
1065 	if (ip->meta.nlinks == 1) {
1066 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1067 		if (isopen == 0)
1068 			goto killit;
1069 	}
1070 
1071 	hammer2_inode_modify(ip);
1072 	--ip->meta.nlinks;
1073 	if ((int64_t)ip->meta.nlinks < 0)
1074 		ip->meta.nlinks = 0;	/* safety */
1075 
1076 	/*
1077 	 * If nlinks is not zero we are done.  However, this should only be
1078 	 * possible with a hardlink target.  If the inode is an embedded
1079 	 * hardlink nlinks should have dropped to zero, warn and proceed
1080 	 * with the next step.
1081 	 */
1082 	if (ip->meta.nlinks) {
1083 		if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1084 			return 0;
1085 		kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1086 			(intmax_t)ip->meta.nlinks);
1087 		return 0;
1088 	}
1089 
1090 	/*
1091 	 * nlinks is now zero, delete the inode if not open.
1092 	 */
1093 	if (isopen == 0) {
1094 		hammer2_xop_destroy_t *xop;
1095 
1096 killit:
1097 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1098 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1099 		hammer2_xop_start(&xop->head, hammer2_inode_xop_destroy);
1100 		error = hammer2_xop_collect(&xop->head, 0);
1101 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1102 	}
1103 	error = 0;
1104 	return error;
1105 }
1106 
1107 /*
1108  * Mark an inode as being modified, meaning that the caller will modify
1109  * ip->meta.
1110  *
1111  * If a vnode is present we set the vnode dirty and the nominal filesystem
1112  * sync will also handle synchronizing the inode meta-data.  If no vnode
1113  * is present we must ensure that the inode is on pmp->sideq.
1114  *
1115  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1116  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1117  *	 later when the inode gets flushed.
1118  */
1119 void
1120 hammer2_inode_modify(hammer2_inode_t *ip)
1121 {
1122 	hammer2_pfs_t *pmp;
1123 
1124 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1125 	if (ip->vp) {
1126 		vsetisdirty(ip->vp);
1127 	} else if ((pmp = ip->pmp) != NULL) {
1128 		hammer2_inode_delayed_sideq(ip);
1129 	}
1130 }
1131 
1132 /*
1133  * Synchronize the inode's frontend state with the chain state prior
1134  * to any explicit flush of the inode or any strategy write call.
1135  *
1136  * Called with a locked inode inside a transaction.
1137  */
1138 void
1139 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1140 {
1141 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1142 		hammer2_xop_fsync_t *xop;
1143 		int error;
1144 
1145 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1146 		xop->clear_directdata = 0;
1147 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1148 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1149 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1150 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1151 				xop->clear_directdata = 1;
1152 			}
1153 			xop->osize = ip->osize;
1154 		} else {
1155 			xop->osize = ip->meta.size;	/* safety */
1156 		}
1157 		xop->ipflags = ip->flags;
1158 		xop->meta = ip->meta;
1159 
1160 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1161 					     HAMMER2_INODE_MODIFIED);
1162 		hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1163 		error = hammer2_xop_collect(&xop->head, 0);
1164 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1165 		if (error == ENOENT)
1166 			error = 0;
1167 		if (error) {
1168 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1169 			/*
1170 			atomic_set_int(&ip->flags,
1171 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1172 						       HAMMER2_INODE_MODIFIED));
1173 			*/
1174 			/* XXX return error somehow? */
1175 		}
1176 	}
1177 }
1178 
1179 /*
1180  * The normal filesystem sync no longer has visibility to an inode structure
1181  * after its vnode has been reclaimed.  In this situation an unlinked-but-open
1182  * inode or a dirty inode may require additional processing to synchronize
1183  * ip->meta to its underlying cluster nodes.
1184  *
1185  * In particular, reclaims can occur in almost any state (for example, when
1186  * doing operations on unrelated vnodes) and flushing the reclaimed inode
1187  * in the reclaim path itself is a non-starter.
1188  *
1189  * Caller must be in a transaction.
1190  */
1191 void
1192 hammer2_inode_run_sideq(hammer2_pfs_t *pmp)
1193 {
1194 	hammer2_xop_destroy_t *xop;
1195 	hammer2_inode_sideq_t *ipul;
1196 	hammer2_inode_t *ip;
1197 	int error;
1198 
1199 	if (TAILQ_EMPTY(&pmp->sideq))
1200 		return;
1201 
1202 	LOCKSTART;
1203 	hammer2_spin_ex(&pmp->list_spin);
1204 	while ((ipul = TAILQ_FIRST(&pmp->sideq)) != NULL) {
1205 		TAILQ_REMOVE(&pmp->sideq, ipul, entry);
1206 		ip = ipul->ip;
1207 		KKASSERT(ip->flags & HAMMER2_INODE_ONSIDEQ);
1208 		atomic_clear_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
1209 		hammer2_spin_unex(&pmp->list_spin);
1210 		kfree(ipul, pmp->minode);
1211 
1212 		hammer2_inode_lock(ip, 0);
1213 		if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
1214 			/*
1215 			 * The inode was unlinked while open.  The inode must
1216 			 * be deleted and destroyed.
1217 			 */
1218 			xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1219 			hammer2_xop_start(&xop->head,
1220 					  hammer2_inode_xop_destroy);
1221 			error = hammer2_xop_collect(&xop->head, 0);
1222 			hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1223 		} else {
1224 			/*
1225 			 * The inode was dirty as-of the reclaim, requiring
1226 			 * synchronization of ip->meta with its underlying
1227 			 * chains.
1228 			 */
1229 			hammer2_inode_chain_sync(ip);
1230 		}
1231 
1232 		hammer2_inode_unlock(ip);
1233 		hammer2_inode_drop(ip);			/* ipul ref */
1234 
1235 		hammer2_spin_ex(&pmp->list_spin);
1236 	}
1237 	hammer2_spin_unex(&pmp->list_spin);
1238 	LOCKSTOP;
1239 }
1240 
1241 /*
1242  * Inode create helper (threaded, backend)
1243  *
1244  * Used by ncreate, nmknod, nsymlink, nmkdir.
1245  * Used by nlink and rename to create HARDLINK pointers.
1246  *
1247  * Frontend holds the parent directory ip locked exclusively.  We
1248  * create the inode and feed the exclusively locked chain to the
1249  * frontend.
1250  */
1251 void
1252 hammer2_inode_xop_create(hammer2_thread_t *thr, hammer2_xop_t *arg)
1253 {
1254 	hammer2_xop_create_t *xop = &arg->xop_create;
1255 	hammer2_chain_t *parent;
1256 	hammer2_chain_t *chain;
1257 	hammer2_key_t key_next;
1258 	int cache_index = -1;
1259 	int error;
1260 
1261 	if (hammer2_debug & 0x0001)
1262 		kprintf("inode_create lhc %016jx clindex %d\n",
1263 			xop->lhc, thr->clindex);
1264 
1265 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1266 				     HAMMER2_RESOLVE_ALWAYS);
1267 	if (parent == NULL) {
1268 		error = EIO;
1269 		chain = NULL;
1270 		goto fail;
1271 	}
1272 	chain = hammer2_chain_lookup(&parent, &key_next,
1273 				     xop->lhc, xop->lhc,
1274 				     &cache_index, 0);
1275 	if (chain) {
1276 		error = EEXIST;
1277 		goto fail;
1278 	}
1279 
1280 	error = hammer2_chain_create(&parent, &chain,
1281 				     xop->head.ip1->pmp, HAMMER2_METH_DEFAULT,
1282 				     xop->lhc, 0,
1283 				     HAMMER2_BREF_TYPE_INODE,
1284 				     HAMMER2_INODE_BYTES,
1285 				     xop->head.mtid, 0, xop->flags);
1286 	if (error == 0) {
1287 		hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1288 		chain->data->ipdata.meta = xop->meta;
1289 		if (xop->head.name1) {
1290 			bcopy(xop->head.name1,
1291 			      chain->data->ipdata.filename,
1292 			      xop->head.name1_len);
1293 			chain->data->ipdata.meta.name_len = xop->head.name1_len;
1294 		}
1295 		chain->data->ipdata.meta.name_key = xop->lhc;
1296 	}
1297 fail:
1298 	if (parent) {
1299 		hammer2_chain_unlock(parent);
1300 		hammer2_chain_drop(parent);
1301 	}
1302 	hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
1303 	if (chain) {
1304 		hammer2_chain_unlock(chain);
1305 		hammer2_chain_drop(chain);
1306 	}
1307 }
1308 
1309 /*
1310  * Inode delete helper (backend, threaded)
1311  *
1312  * Generally used by hammer2_run_sideq()
1313  */
1314 void
1315 hammer2_inode_xop_destroy(hammer2_thread_t *thr, hammer2_xop_t *arg)
1316 {
1317 	hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1318 	hammer2_pfs_t *pmp;
1319 	hammer2_chain_t *parent;
1320 	hammer2_chain_t *chain;
1321 	hammer2_inode_t *ip;
1322 	int error;
1323 
1324 	/*
1325 	 * We need the precise parent chain to issue the deletion.
1326 	 */
1327 	ip = xop->head.ip1;
1328 	pmp = ip->pmp;
1329 	chain = NULL;
1330 
1331 again:
1332 	parent = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
1333 	if (parent)
1334 		hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1335 	if (parent == NULL) {
1336 		error = EIO;
1337 		goto done;
1338 	}
1339 	chain = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
1340 	if (chain == NULL) {
1341 		error = EIO;
1342 		goto done;
1343 	}
1344 	if (chain->parent != parent) {
1345 		kprintf("hammer2_inode_xop_destroy: "
1346 			"parent changed %p->(%p,%p)\n",
1347 			chain, parent, chain->parent);
1348 		hammer2_chain_unlock(parent);
1349 		hammer2_chain_drop(parent);
1350 		hammer2_chain_unlock(chain);
1351 		hammer2_chain_drop(chain);
1352 		goto again;
1353 	}
1354 
1355 	hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1356 	error = 0;
1357 done:
1358 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1359 	if (parent) {
1360 		hammer2_chain_unlock(parent);
1361 		hammer2_chain_drop(parent);
1362 	}
1363 	if (chain) {
1364 		hammer2_chain_unlock(chain);
1365 		hammer2_chain_drop(chain);
1366 	}
1367 }
1368 
1369 void
1370 hammer2_inode_xop_unlinkall(hammer2_thread_t *thr, hammer2_xop_t *arg)
1371 {
1372 	hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1373 	hammer2_chain_t *parent;
1374 	hammer2_chain_t *chain;
1375 	hammer2_key_t key_next;
1376 	int cache_index = -1;
1377 
1378 	/*
1379 	 * We need the precise parent chain to issue the deletion.
1380 	 */
1381 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1382 				     HAMMER2_RESOLVE_ALWAYS);
1383 	chain = NULL;
1384 	if (parent == NULL) {
1385 		/* XXX error */
1386 		goto done;
1387 	}
1388 	chain = hammer2_chain_lookup(&parent, &key_next,
1389 				     xop->key_beg, xop->key_end,
1390 				     &cache_index,
1391 				     HAMMER2_LOOKUP_ALWAYS);
1392 	while (chain) {
1393 		hammer2_chain_delete(parent, chain,
1394 				     xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1395 		hammer2_xop_feed(&xop->head, chain, thr->clindex, chain->error);
1396 		/* depend on function to unlock the shared lock */
1397 		chain = hammer2_chain_next(&parent, chain, &key_next,
1398 					   key_next, xop->key_end,
1399 					   &cache_index,
1400 					   HAMMER2_LOOKUP_ALWAYS);
1401 	}
1402 done:
1403 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, ENOENT);
1404 	if (parent) {
1405 		hammer2_chain_unlock(parent);
1406 		hammer2_chain_drop(parent);
1407 	}
1408 	if (chain) {
1409 		hammer2_chain_unlock(chain);
1410 		hammer2_chain_drop(chain);
1411 	}
1412 }
1413 
1414 void
1415 hammer2_inode_xop_connect(hammer2_thread_t *thr, hammer2_xop_t *arg)
1416 {
1417 	hammer2_xop_connect_t *xop = &arg->xop_connect;
1418 	hammer2_inode_data_t *wipdata;
1419 	hammer2_chain_t *parent;
1420 	hammer2_chain_t *chain;
1421 	hammer2_pfs_t *pmp;
1422 	hammer2_key_t key_dummy;
1423 	int cache_index = -1;
1424 	int error;
1425 
1426 	/*
1427 	 * Get directory, then issue a lookup to prime the parent chain
1428 	 * for the create.  The lookup is expected to fail.
1429 	 */
1430 	pmp = xop->head.ip1->pmp;
1431 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1432 				     HAMMER2_RESOLVE_ALWAYS);
1433 	if (parent == NULL) {
1434 		chain = NULL;
1435 		error = EIO;
1436 		goto fail;
1437 	}
1438 	chain = hammer2_chain_lookup(&parent, &key_dummy,
1439 				     xop->lhc, xop->lhc,
1440 				     &cache_index, 0);
1441 	if (chain) {
1442 		hammer2_chain_unlock(chain);
1443 		hammer2_chain_drop(chain);
1444 		chain = NULL;
1445 		error = EEXIST;
1446 		goto fail;
1447 	}
1448 
1449 	/*
1450 	 * Adjust the filename in the inode, set the name key.
1451 	 *
1452 	 * NOTE: Frontend must also adjust ip2->meta on success, we can't
1453 	 *	 do it here.
1454 	 */
1455 	chain = hammer2_inode_chain(xop->head.ip2, thr->clindex,
1456 				    HAMMER2_RESOLVE_ALWAYS);
1457 	hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1458 	wipdata = &chain->data->ipdata;
1459 
1460 	hammer2_inode_modify(xop->head.ip2);
1461 	if (xop->head.name1) {
1462 		bzero(wipdata->filename, sizeof(wipdata->filename));
1463 		bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1464 		wipdata->meta.name_len = xop->head.name1_len;
1465 	}
1466 	wipdata->meta.name_key = xop->lhc;
1467 
1468 	/*
1469 	 * Reconnect the chain to the new parent directory
1470 	 */
1471 	error = hammer2_chain_create(&parent, &chain,
1472 				     pmp, HAMMER2_METH_DEFAULT,
1473 				     xop->lhc, 0,
1474 				     HAMMER2_BREF_TYPE_INODE,
1475 				     HAMMER2_INODE_BYTES,
1476 				     xop->head.mtid, 0, 0);
1477 
1478 	/*
1479 	 * Feed result back.
1480 	 */
1481 fail:
1482 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1483 	if (parent) {
1484 		hammer2_chain_unlock(parent);
1485 		hammer2_chain_drop(parent);
1486 	}
1487 	if (chain) {
1488 		hammer2_chain_unlock(chain);
1489 		hammer2_chain_drop(chain);
1490 	}
1491 }
1492 
1493 /*
1494  * Synchronize the in-memory inode with the chain.
1495  */
1496 void
1497 hammer2_inode_xop_chain_sync(hammer2_thread_t *thr, hammer2_xop_t *arg)
1498 {
1499 	hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1500 	hammer2_chain_t	*parent;
1501 	hammer2_chain_t	*chain;
1502 	int error;
1503 
1504 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1505 				     HAMMER2_RESOLVE_ALWAYS);
1506 	chain = NULL;
1507 	if (parent == NULL) {
1508 		error = EIO;
1509 		goto done;
1510 	}
1511 	if (parent->error) {
1512 		error = parent->error;
1513 		goto done;
1514 	}
1515 
1516 	error = 0;
1517 
1518 	if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1519 		/* osize must be ignored */
1520 	} else if (xop->meta.size < xop->osize) {
1521 		/*
1522 		 * We must delete any chains beyond the EOF.  The chain
1523 		 * straddling the EOF will be pending in the bioq.
1524 		 */
1525 		hammer2_key_t lbase;
1526 		hammer2_key_t key_next;
1527 		int cache_index = -1;
1528 
1529 		lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1530 			~HAMMER2_PBUFMASK64;
1531 		chain = hammer2_chain_lookup(&parent, &key_next,
1532 					     lbase, HAMMER2_KEY_MAX,
1533 					     &cache_index,
1534 					     HAMMER2_LOOKUP_NODATA |
1535 					     HAMMER2_LOOKUP_NODIRECT);
1536 		while (chain) {
1537 			/*
1538 			 * Degenerate embedded case, nothing to loop on
1539 			 */
1540 			switch (chain->bref.type) {
1541 			case HAMMER2_BREF_TYPE_INODE:
1542 				KKASSERT(0);
1543 				break;
1544 			case HAMMER2_BREF_TYPE_DATA:
1545 				hammer2_chain_delete(parent, chain,
1546 						     xop->head.mtid,
1547 						     HAMMER2_DELETE_PERMANENT);
1548 				break;
1549 			}
1550 			chain = hammer2_chain_next(&parent, chain, &key_next,
1551 						   key_next, HAMMER2_KEY_MAX,
1552 						   &cache_index,
1553 						   HAMMER2_LOOKUP_NODATA |
1554 						   HAMMER2_LOOKUP_NODIRECT);
1555 		}
1556 
1557 		/*
1558 		 * Reset to point at inode for following code, if necessary.
1559 		 */
1560 		if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1561 			hammer2_chain_unlock(parent);
1562 			hammer2_chain_drop(parent);
1563 			parent = hammer2_inode_chain(xop->head.ip1,
1564 						     thr->clindex,
1565 						     HAMMER2_RESOLVE_ALWAYS);
1566 			kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1567 				parent->data->ipdata.filename);
1568 		}
1569 	}
1570 
1571 	/*
1572 	 * Sync the inode meta-data, potentially clear the blockset area
1573 	 * of direct data so it can be used for blockrefs.
1574 	 */
1575 	hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1576 	parent->data->ipdata.meta = xop->meta;
1577 	if (xop->clear_directdata) {
1578 		bzero(&parent->data->ipdata.u.blockset,
1579 		      sizeof(parent->data->ipdata.u.blockset));
1580 	}
1581 done:
1582 	if (chain) {
1583 		hammer2_chain_unlock(chain);
1584 		hammer2_chain_drop(chain);
1585 	}
1586 	if (parent) {
1587 		hammer2_chain_unlock(parent);
1588 		hammer2_chain_drop(parent);
1589 	}
1590 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1591 }
1592 
1593