xref: /dragonfly/sys/vfs/hammer2/hammer2_inode.c (revision 745703c7)
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41 
42 #include "hammer2.h"
43 
44 #define INODE_DEBUG	0
45 
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 	     hammer2_tid_t, meta.inum);
48 
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52 	if (ip1->meta.inum < ip2->meta.inum)
53 		return(-1);
54 	if (ip1->meta.inum > ip2->meta.inum)
55 		return(1);
56 	return(0);
57 }
58 
59 /*
60  * HAMMER2 inode locks
61  *
62  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
63  * flags for options:
64  *
65  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
66  *	  inode locking function will automatically set the RDONLY flag.
67  *
68  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
69  *	  Most front-end inode locks do.
70  *
71  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
72  *	  the inode data be resolved.  This is used by the syncthr because
73  *	  it can run on an unresolved/out-of-sync cluster, and also by the
74  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
75  *	  disposing of hundreds of thousands of cached vnodes).
76  *
77  * The inode locking function locks the inode itself, resolves any stale
78  * chains in the inode's cluster, and allocates a fresh copy of the
79  * cluster with 1 ref and all the underlying chains locked.
80  *
81  * ip->cluster will be stable while the inode is locked.
82  *
83  * NOTE: We don't combine the inode/chain lock because putting away an
84  *       inode would otherwise confuse multiple lock holders of the inode.
85  *
86  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
87  *	 and never point to a hardlink pointer.
88  *
89  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
90  *	 will feel free to reduce the chain set in the cluster as an
91  *	 optimization.  It will still be validated against the quorum if
92  *	 appropriate, but the optimization might be able to reduce data
93  *	 accesses to one node.  This flag is automatically set if the inode
94  *	 is locked with HAMMER2_RESOLVE_SHARED.
95  */
96 void
97 hammer2_inode_lock(hammer2_inode_t *ip, int how)
98 {
99 	hammer2_inode_ref(ip);
100 
101 	/*
102 	 * Inode structure mutex
103 	 */
104 	if (how & HAMMER2_RESOLVE_SHARED) {
105 		/*how |= HAMMER2_RESOLVE_RDONLY; not used */
106 		hammer2_mtx_sh(&ip->lock);
107 	} else {
108 		hammer2_mtx_ex(&ip->lock);
109 	}
110 }
111 
112 /*
113  * Select a chain out of an inode's cluster and lock it.
114  *
115  * The inode does not have to be locked.
116  */
117 hammer2_chain_t *
118 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
119 {
120 	hammer2_chain_t *chain;
121 
122 	hammer2_spin_sh(&ip->cluster_spin);
123 	if (clindex >= ip->cluster.nchains)
124 		chain = NULL;
125 	else
126 		chain = ip->cluster.array[clindex].chain;
127 	if (chain) {
128 		hammer2_chain_ref(chain);
129 		hammer2_spin_unsh(&ip->cluster_spin);
130 		hammer2_chain_lock(chain, how);
131 	} else {
132 		hammer2_spin_unsh(&ip->cluster_spin);
133 	}
134 	return chain;
135 }
136 
137 hammer2_chain_t *
138 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
139 			       hammer2_chain_t **parentp, int how)
140 {
141 	hammer2_chain_t *chain;
142 	hammer2_chain_t *parent;
143 
144 	for (;;) {
145 		hammer2_spin_sh(&ip->cluster_spin);
146 		if (clindex >= ip->cluster.nchains)
147 			chain = NULL;
148 		else
149 			chain = ip->cluster.array[clindex].chain;
150 		if (chain) {
151 			hammer2_chain_ref(chain);
152 			hammer2_spin_unsh(&ip->cluster_spin);
153 			hammer2_chain_lock(chain, how);
154 		} else {
155 			hammer2_spin_unsh(&ip->cluster_spin);
156 		}
157 
158 		/*
159 		 * Get parent, lock order must be (parent, chain).
160 		 */
161 		parent = chain->parent;
162 		hammer2_chain_ref(parent);
163 		hammer2_chain_unlock(chain);
164 		hammer2_chain_lock(parent, how);
165 		hammer2_chain_lock(chain, how);
166 		if (ip->cluster.array[clindex].chain == chain &&
167 		    chain->parent == parent) {
168 			break;
169 		}
170 
171 		/*
172 		 * Retry
173 		 */
174 		hammer2_chain_unlock(chain);
175 		hammer2_chain_drop(chain);
176 		hammer2_chain_unlock(parent);
177 		hammer2_chain_drop(parent);
178 	}
179 	*parentp = parent;
180 
181 	return chain;
182 }
183 
184 void
185 hammer2_inode_unlock(hammer2_inode_t *ip)
186 {
187 	hammer2_mtx_unlock(&ip->lock);
188 	hammer2_inode_drop(ip);
189 }
190 
191 /*
192  * Temporarily release a lock held shared or exclusive.  Caller must
193  * hold the lock shared or exclusive on call and lock will be released
194  * on return.
195  *
196  * Restore a lock that was temporarily released.
197  */
198 hammer2_mtx_state_t
199 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
200 {
201 	return hammer2_mtx_temp_release(&ip->lock);
202 }
203 
204 void
205 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
206 {
207 	hammer2_mtx_temp_restore(&ip->lock, ostate);
208 }
209 
210 /*
211  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
212  * is already held exclusively this is a NOP.
213  *
214  * The caller MUST hold the inode lock either shared or exclusive on call
215  * and will own the lock exclusively on return.
216  *
217  * Returns non-zero if the lock was already exclusive prior to the upgrade.
218  */
219 int
220 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
221 {
222 	int wasexclusive;
223 
224 	if (mtx_islocked_ex(&ip->lock)) {
225 		wasexclusive = 1;
226 	} else {
227 		hammer2_mtx_unlock(&ip->lock);
228 		hammer2_mtx_ex(&ip->lock);
229 		wasexclusive = 0;
230 	}
231 	return wasexclusive;
232 }
233 
234 /*
235  * Downgrade an inode lock from exclusive to shared only if the inode
236  * lock was previously shared.  If the inode lock was previously exclusive,
237  * this is a NOP.
238  */
239 void
240 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
241 {
242 	if (wasexclusive == 0)
243 		mtx_downgrade(&ip->lock);
244 }
245 
246 /*
247  * Lookup an inode by inode number
248  */
249 hammer2_inode_t *
250 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
251 {
252 	hammer2_inode_t *ip;
253 
254 	KKASSERT(pmp);
255 	if (pmp->spmp_hmp) {
256 		ip = NULL;
257 	} else {
258 		hammer2_spin_ex(&pmp->inum_spin);
259 		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
260 		if (ip)
261 			hammer2_inode_ref(ip);
262 		hammer2_spin_unex(&pmp->inum_spin);
263 	}
264 	return(ip);
265 }
266 
267 /*
268  * Adding a ref to an inode is only legal if the inode already has at least
269  * one ref.
270  *
271  * (can be called with spinlock held)
272  */
273 void
274 hammer2_inode_ref(hammer2_inode_t *ip)
275 {
276 	atomic_add_int(&ip->refs, 1);
277 }
278 
279 /*
280  * Drop an inode reference, freeing the inode when the last reference goes
281  * away.
282  */
283 void
284 hammer2_inode_drop(hammer2_inode_t *ip)
285 {
286 	hammer2_pfs_t *pmp;
287 	hammer2_inode_t *pip;
288 	u_int refs;
289 
290 	while (ip) {
291 		refs = ip->refs;
292 		cpu_ccfence();
293 		if (refs == 1) {
294 			/*
295 			 * Transition to zero, must interlock with
296 			 * the inode inumber lookup tree (if applicable).
297 			 * It should not be possible for anyone to race
298 			 * the transition to 0.
299 			 */
300 			pmp = ip->pmp;
301 			KKASSERT(pmp);
302 			hammer2_spin_ex(&pmp->inum_spin);
303 
304 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
305 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
306 				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
307 					atomic_clear_int(&ip->flags,
308 						     HAMMER2_INODE_ONRBTREE);
309 					RB_REMOVE(hammer2_inode_tree,
310 						  &pmp->inum_tree, ip);
311 				}
312 				hammer2_spin_unex(&pmp->inum_spin);
313 
314 				pip = ip->pip;
315 				ip->pip = NULL;
316 				ip->pmp = NULL;
317 
318 				/*
319 				 * Cleaning out ip->cluster isn't entirely
320 				 * trivial.
321 				 */
322 				hammer2_inode_repoint(ip, NULL, NULL);
323 
324 				/*
325 				 * We have to drop pip (if non-NULL) to
326 				 * dispose of our implied reference from
327 				 * ip->pip.  We can simply loop on it.
328 				 */
329 				kfree(ip, pmp->minode);
330 				atomic_add_long(&pmp->inmem_inodes, -1);
331 				ip = pip;
332 				/* continue with pip (can be NULL) */
333 			} else {
334 				hammer2_spin_unex(&ip->pmp->inum_spin);
335 			}
336 		} else {
337 			/*
338 			 * Non zero transition
339 			 */
340 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
341 				break;
342 		}
343 	}
344 }
345 
346 /*
347  * Get the vnode associated with the given inode, allocating the vnode if
348  * necessary.  The vnode will be returned exclusively locked.
349  *
350  * The caller must lock the inode (shared or exclusive).
351  *
352  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
353  * races.
354  */
355 struct vnode *
356 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
357 {
358 	hammer2_pfs_t *pmp;
359 	struct vnode *vp;
360 
361 	pmp = ip->pmp;
362 	KKASSERT(pmp != NULL);
363 	*errorp = 0;
364 
365 	for (;;) {
366 		/*
367 		 * Attempt to reuse an existing vnode assignment.  It is
368 		 * possible to race a reclaim so the vget() may fail.  The
369 		 * inode must be unlocked during the vget() to avoid a
370 		 * deadlock against a reclaim.
371 		 */
372 		int wasexclusive;
373 
374 		vp = ip->vp;
375 		if (vp) {
376 			/*
377 			 * Inode must be unlocked during the vget() to avoid
378 			 * possible deadlocks, but leave the ip ref intact.
379 			 *
380 			 * vnode is held to prevent destruction during the
381 			 * vget().  The vget() can still fail if we lost
382 			 * a reclaim race on the vnode.
383 			 */
384 			hammer2_mtx_state_t ostate;
385 
386 			vhold(vp);
387 			ostate = hammer2_inode_lock_temp_release(ip);
388 			if (vget(vp, LK_EXCLUSIVE)) {
389 				vdrop(vp);
390 				hammer2_inode_lock_temp_restore(ip, ostate);
391 				continue;
392 			}
393 			hammer2_inode_lock_temp_restore(ip, ostate);
394 			vdrop(vp);
395 			/* vp still locked and ref from vget */
396 			if (ip->vp != vp) {
397 				kprintf("hammer2: igetv race %p/%p\n",
398 					ip->vp, vp);
399 				vput(vp);
400 				continue;
401 			}
402 			*errorp = 0;
403 			break;
404 		}
405 
406 		/*
407 		 * No vnode exists, allocate a new vnode.  Beware of
408 		 * allocation races.  This function will return an
409 		 * exclusively locked and referenced vnode.
410 		 */
411 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
412 		if (*errorp) {
413 			kprintf("hammer2: igetv getnewvnode failed %d\n",
414 				*errorp);
415 			vp = NULL;
416 			break;
417 		}
418 
419 		/*
420 		 * Lock the inode and check for an allocation race.
421 		 */
422 		wasexclusive = hammer2_inode_lock_upgrade(ip);
423 		if (ip->vp != NULL) {
424 			vp->v_type = VBAD;
425 			vx_put(vp);
426 			hammer2_inode_lock_downgrade(ip, wasexclusive);
427 			continue;
428 		}
429 
430 		switch (ip->meta.type) {
431 		case HAMMER2_OBJTYPE_DIRECTORY:
432 			vp->v_type = VDIR;
433 			break;
434 		case HAMMER2_OBJTYPE_REGFILE:
435 			vp->v_type = VREG;
436 			vinitvmio(vp, ip->meta.size,
437 				  HAMMER2_LBUFSIZE,
438 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
439 			break;
440 		case HAMMER2_OBJTYPE_SOFTLINK:
441 			/*
442 			 * XXX for now we are using the generic file_read
443 			 * and file_write code so we need a buffer cache
444 			 * association.
445 			 */
446 			vp->v_type = VLNK;
447 			vinitvmio(vp, ip->meta.size,
448 				  HAMMER2_LBUFSIZE,
449 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
450 			break;
451 		case HAMMER2_OBJTYPE_CDEV:
452 			vp->v_type = VCHR;
453 			/* fall through */
454 		case HAMMER2_OBJTYPE_BDEV:
455 			vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
456 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
457 				vp->v_type = VBLK;
458 			addaliasu(vp,
459 				  ip->meta.rmajor,
460 				  ip->meta.rminor);
461 			break;
462 		case HAMMER2_OBJTYPE_FIFO:
463 			vp->v_type = VFIFO;
464 			vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
465 			break;
466 		default:
467 			panic("hammer2: unhandled objtype %d",
468 			      ip->meta.type);
469 			break;
470 		}
471 
472 		if (ip == pmp->iroot)
473 			vsetflags(vp, VROOT);
474 
475 		vp->v_data = ip;
476 		ip->vp = vp;
477 		hammer2_inode_ref(ip);		/* vp association */
478 		hammer2_inode_lock_downgrade(ip, wasexclusive);
479 		break;
480 	}
481 
482 	/*
483 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
484 	 */
485 	if (hammer2_debug & 0x0002) {
486 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
487 			vp, vp->v_refcnt, vp->v_auxrefs);
488 	}
489 	return (vp);
490 }
491 
492 /*
493  * Returns the inode associated with the passed-in cluster, creating the
494  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
495  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
496  * Otherwise the whole cluster is synchronized.
497  *
498  * The passed-in cluster must be locked and will remain locked on return.
499  * The returned inode will be locked and the caller may dispose of both
500  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
501  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
502  *
503  * The hammer2_inode structure regulates the interface between the high level
504  * kernel VNOPS API and the filesystem backend (the chains).
505  *
506  * On return the inode is locked with the supplied cluster.
507  */
508 hammer2_inode_t *
509 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
510 		  hammer2_cluster_t *cluster, int idx)
511 {
512 	hammer2_inode_t *nip;
513 	const hammer2_inode_data_t *iptmp;
514 	const hammer2_inode_data_t *nipdata;
515 
516 	KKASSERT(cluster == NULL ||
517 		 hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
518 	KKASSERT(pmp);
519 
520 	/*
521 	 * Interlocked lookup/ref of the inode.  This code is only needed
522 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
523 	 * otherwise and test for duplicates).
524 	 *
525 	 * Cluster can be NULL during the initial pfs allocation.
526 	 */
527 again:
528 	while (cluster) {
529 		iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
530 		nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
531 		if (nip == NULL)
532 			break;
533 
534 		hammer2_mtx_ex(&nip->lock);
535 
536 		/*
537 		 * Handle SMP race (not applicable to the super-root spmp
538 		 * which can't index inodes due to duplicative inode numbers).
539 		 */
540 		if (pmp->spmp_hmp == NULL &&
541 		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
542 			hammer2_mtx_unlock(&nip->lock);
543 			hammer2_inode_drop(nip);
544 			continue;
545 		}
546 		if (idx >= 0)
547 			hammer2_inode_repoint_one(nip, cluster, idx);
548 		else
549 			hammer2_inode_repoint(nip, NULL, cluster);
550 
551 		return nip;
552 	}
553 
554 	/*
555 	 * We couldn't find the inode number, create a new inode.
556 	 */
557 	nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
558 	spin_init(&nip->cluster_spin, "h2clspin");
559 	atomic_add_long(&pmp->inmem_inodes, 1);
560 	hammer2_pfs_memory_inc(pmp);
561 	hammer2_pfs_memory_wakeup(pmp);
562 	if (pmp->spmp_hmp)
563 		nip->flags = HAMMER2_INODE_SROOT;
564 
565 	/*
566 	 * Initialize nip's cluster.  A cluster is provided for normal
567 	 * inodes but typically not for the super-root or PFS inodes.
568 	 */
569 	nip->cluster.refs = 1;
570 	nip->cluster.pmp = pmp;
571 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
572 	if (cluster) {
573 		nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
574 		nip->meta = nipdata->meta;
575 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
576 		hammer2_inode_repoint(nip, NULL, cluster);
577 	} else {
578 		nip->meta.inum = 1;		/* PFS inum is always 1 XXX */
579 		/* mtime will be updated when a cluster is available */
580 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
581 	}
582 
583 	nip->pip = dip;				/* can be NULL */
584 	if (dip)
585 		hammer2_inode_ref(dip);	/* ref dip for nip->pip */
586 
587 	nip->pmp = pmp;
588 
589 	/*
590 	 * ref and lock on nip gives it state compatible to after a
591 	 * hammer2_inode_lock() call.
592 	 */
593 	nip->refs = 1;
594 	hammer2_mtx_init(&nip->lock, "h2inode");
595 	hammer2_mtx_ex(&nip->lock);
596 	/* combination of thread lock and chain lock == inode lock */
597 
598 	/*
599 	 * Attempt to add the inode.  If it fails we raced another inode
600 	 * get.  Undo all the work and try again.
601 	 */
602 	if (pmp->spmp_hmp == NULL) {
603 		hammer2_spin_ex(&pmp->inum_spin);
604 		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
605 			hammer2_spin_unex(&pmp->inum_spin);
606 			hammer2_mtx_unlock(&nip->lock);
607 			hammer2_inode_drop(nip);
608 			goto again;
609 		}
610 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
611 		hammer2_spin_unex(&pmp->inum_spin);
612 	}
613 
614 	return (nip);
615 }
616 
617 /*
618  * Create a new inode in the specified directory using the vattr to
619  * figure out the type of inode.
620  *
621  * If no error occurs the new inode with its cluster locked is returned in
622  * *nipp, otherwise an error is returned and *nipp is set to NULL.
623  *
624  * If vap and/or cred are NULL the related fields are not set and the
625  * inode type defaults to a directory.  This is used when creating PFSs
626  * under the super-root, so the inode number is set to 1 in this case.
627  *
628  * dip is not locked on entry.
629  *
630  * NOTE: When used to create a snapshot, the inode is temporarily associated
631  *	 with the super-root spmp. XXX should pass new pmp for snapshot.
632  */
633 hammer2_inode_t *
634 hammer2_inode_create(hammer2_inode_t *dip,
635 		     struct vattr *vap, struct ucred *cred,
636 		     const uint8_t *name, size_t name_len, hammer2_key_t lhc,
637 		     hammer2_key_t inum, uint8_t type, uint8_t target_type,
638 		     int flags, int *errorp)
639 {
640 	hammer2_xop_create_t *xop;
641 	hammer2_inode_t *nip;
642 	int error;
643 	uid_t xuid;
644 	uuid_t dip_uid;
645 	uuid_t dip_gid;
646 	uint32_t dip_mode;
647 	uint8_t dip_comp_algo;
648 	uint8_t dip_check_algo;
649 
650 	if (name)
651 		lhc = hammer2_dirhash(name, name_len);
652 	*errorp = 0;
653 	nip = NULL;
654 
655 	/*
656 	 * Locate the inode or indirect block to create the new
657 	 * entry in.  At the same time check for key collisions
658 	 * and iterate until we don't get one.
659 	 *
660 	 * NOTE: hidden inodes do not have iterators.
661 	 *
662 	 * Lock the directory exclusively for now to guarantee that
663 	 * we can find an unused lhc for the name.  Due to collisions,
664 	 * two different creates can end up with the same lhc so we
665 	 * cannot depend on the OS to prevent the collision.
666 	 */
667 	hammer2_inode_lock(dip, 0);
668 
669 	dip_uid = dip->meta.uid;
670 	dip_gid = dip->meta.gid;
671 	dip_mode = dip->meta.mode;
672 	dip_comp_algo = dip->meta.comp_algo;
673 	dip_check_algo = dip->meta.check_algo;
674 
675 	/*
676 	 * If name specified, locate an unused key in the collision space.
677 	 * Otherwise use the passed-in lhc directly.
678 	 */
679 	if (name) {
680 		hammer2_xop_scanlhc_t *sxop;
681 		hammer2_key_t lhcbase;
682 
683 		lhcbase = lhc;
684 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
685 		sxop->lhc = lhc;
686 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
687 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
688 			if (lhc != sxop->head.cluster.focus->bref.key)
689 				break;
690 			++lhc;
691 		}
692 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
693 
694 		if (error) {
695 			if (error != ENOENT)
696 				goto done2;
697 			++lhc;
698 			error = 0;
699 		}
700 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
701 			error = ENOSPC;
702 			goto done2;
703 		}
704 	}
705 
706 	/*
707 	 * Create the inode with the lhc as the key.
708 	 */
709 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
710 	xop->lhc = lhc;
711 	xop->flags = flags;
712 	bzero(&xop->meta, sizeof(xop->meta));
713 
714 	if (vap) {
715 		xop->meta.type = hammer2_get_obj_type(vap->va_type);
716 
717 		switch (xop->meta.type) {
718 		case HAMMER2_OBJTYPE_CDEV:
719 		case HAMMER2_OBJTYPE_BDEV:
720 			xop->meta.rmajor = vap->va_rmajor;
721 			xop->meta.rminor = vap->va_rminor;
722 			break;
723 		default:
724 			break;
725 		}
726 		type = xop->meta.type;
727 	} else {
728 		xop->meta.type = type;
729 		xop->meta.target_type = target_type;
730 	}
731 	xop->meta.inum = inum;
732 
733 	/* Inherit parent's inode compression mode. */
734 	xop->meta.comp_algo = dip_comp_algo;
735 	xop->meta.check_algo = dip_check_algo;
736 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
737 	hammer2_update_time(&xop->meta.ctime);
738 	xop->meta.mtime = xop->meta.ctime;
739 	if (vap)
740 		xop->meta.mode = vap->va_mode;
741 	xop->meta.nlinks = 1;
742 	if (vap) {
743 		if (dip && dip->pmp) {
744 			xuid = hammer2_to_unix_xid(&dip_uid);
745 			xuid = vop_helper_create_uid(dip->pmp->mp,
746 						     dip_mode,
747 						     xuid,
748 						     cred,
749 						     &vap->va_mode);
750 		} else {
751 			/* super-root has no dip and/or pmp */
752 			xuid = 0;
753 		}
754 		if (vap->va_vaflags & VA_UID_UUID_VALID)
755 			xop->meta.uid = vap->va_uid_uuid;
756 		else if (vap->va_uid != (uid_t)VNOVAL)
757 			hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
758 		else
759 			hammer2_guid_to_uuid(&xop->meta.uid, xuid);
760 
761 		if (vap->va_vaflags & VA_GID_UUID_VALID)
762 			xop->meta.gid = vap->va_gid_uuid;
763 		else if (vap->va_gid != (gid_t)VNOVAL)
764 			hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
765 		else if (dip)
766 			xop->meta.gid = dip_gid;
767 	}
768 
769 	/*
770 	 * Regular files and softlinks allow a small amount of data to be
771 	 * directly embedded in the inode.  This flag will be cleared if
772 	 * the size is extended past the embedded limit.
773 	 */
774 	if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
775 	    xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
776 	    xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
777 		xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
778 	}
779 	if (name)
780 		hammer2_xop_setname(&xop->head, name, name_len);
781 	xop->meta.name_len = name_len;
782 	xop->meta.name_key = lhc;
783 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
784 
785 	hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
786 
787 	error = hammer2_xop_collect(&xop->head, 0);
788 #if INODE_DEBUG
789 	kprintf("CREATE INODE %*.*s\n",
790 		(int)name_len, (int)name_len, name);
791 #endif
792 
793 	if (error) {
794 		*errorp = error;
795 		goto done;
796 	}
797 
798 	/*
799 	 * Set up the new inode if not a hardlink pointer.
800 	 *
801 	 * NOTE: *_get() integrates chain's lock into the inode lock.
802 	 *
803 	 * NOTE: Only one new inode can currently be created per
804 	 *	 transaction.  If the need arises we can adjust
805 	 *	 hammer2_trans_init() to allow more.
806 	 *
807 	 * NOTE: nipdata will have chain's blockset data.
808 	 */
809 	if (type != HAMMER2_OBJTYPE_HARDLINK) {
810 		nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
811 		nip->comp_heuristic = 0;
812 	} else {
813 		nip = NULL;
814 	}
815 
816 done:
817 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
818 done2:
819 	hammer2_inode_unlock(dip);
820 
821 	return (nip);
822 }
823 
824 /*
825  * Connect the disconnected inode (ip) to the directory (dip) with the
826  * specified (name, name_len).  If name is NULL, (lhc) will be used as
827  * the directory key and the inode's embedded name will not be modified
828  * for future recovery purposes.
829  *
830  * dip and ip must both be locked exclusively (dip in particular to avoid
831  * lhc collisions).
832  */
833 int
834 hammer2_inode_connect(hammer2_inode_t *dip, hammer2_inode_t *ip,
835 		      const char *name, size_t name_len,
836 		      hammer2_key_t lhc)
837 {
838 	hammer2_xop_scanlhc_t *sxop;
839 	hammer2_xop_connect_t *xop;
840 	hammer2_inode_t *opip;
841 	hammer2_key_t lhcbase;
842 	int error;
843 
844 	/*
845 	 * Calculate the lhc and resolve the collision space.
846 	 */
847 	if (name) {
848 		lhc = lhcbase = hammer2_dirhash(name, name_len);
849 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
850 		sxop->lhc = lhc;
851 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
852 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
853 			if (lhc != sxop->head.cluster.focus->bref.key)
854 				break;
855 			++lhc;
856 		}
857 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
858 
859 		if (error) {
860 			if (error != ENOENT)
861 				goto done;
862 			++lhc;
863 			error = 0;
864 		}
865 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
866 			error = ENOSPC;
867 			goto done;
868 		}
869 	} else {
870 		error = 0;
871 	}
872 
873 	/*
874 	 * Formally reconnect the in-memory structure.  ip must
875 	 * be locked exclusively to safely change ip->pip.
876 	 */
877 	if (ip->pip != dip) {
878 		hammer2_inode_ref(dip);
879 		opip = ip->pip;
880 		ip->pip = dip;
881 		if (opip)
882 			hammer2_inode_drop(opip);
883 	}
884 
885 	/*
886 	 * Connect her up
887 	 */
888 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
889 	if (name)
890 		hammer2_xop_setname(&xop->head, name, name_len);
891 	hammer2_xop_setip2(&xop->head, ip);
892 	xop->lhc = lhc;
893 	hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
894 	error = hammer2_xop_collect(&xop->head, 0);
895 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
896 
897 	/*
898 	 * On success make the same adjustments to ip->meta or the
899 	 * next flush may blow up the chain.
900 	 */
901 	if (error == 0) {
902 		hammer2_inode_modify(ip);
903 		ip->meta.name_key = lhc;
904 		if (name)
905 			ip->meta.name_len = name_len;
906 	}
907 done:
908 	return error;
909 }
910 
911 /*
912  * Repoint ip->cluster's chains to cluster's chains and fixup the default
913  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
914  * filters out invalid or non-matching elements.
915  *
916  * Caller must hold the inode and cluster exclusive locked, if not NULL,
917  * must also be locked.
918  *
919  * Cluster may be NULL to clean out any chains in ip->cluster.
920  */
921 void
922 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
923 		      hammer2_cluster_t *cluster)
924 {
925 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
926 	hammer2_chain_t *ochain;
927 	hammer2_chain_t *nchain;
928 	hammer2_inode_t *opip;
929 	int i;
930 
931 	bzero(dropch, sizeof(dropch));
932 
933 	/*
934 	 * Replace chains in ip->cluster with chains from cluster and
935 	 * adjust the focus if necessary.
936 	 *
937 	 * NOTE: nchain and/or ochain can be NULL due to gaps
938 	 *	 in the cluster arrays.
939 	 */
940 	hammer2_spin_ex(&ip->cluster_spin);
941 	for (i = 0; cluster && i < cluster->nchains; ++i) {
942 		/*
943 		 * Do not replace elements which are the same.  Also handle
944 		 * element count discrepancies.
945 		 */
946 		nchain = cluster->array[i].chain;
947 		if (i < ip->cluster.nchains) {
948 			ochain = ip->cluster.array[i].chain;
949 			if (ochain == nchain)
950 				continue;
951 		} else {
952 			ochain = NULL;
953 		}
954 
955 		/*
956 		 * Make adjustments
957 		 */
958 		ip->cluster.array[i].chain = nchain;
959 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
960 		ip->cluster.array[i].flags |= cluster->array[i].flags &
961 					      HAMMER2_CITEM_INVALID;
962 		if (nchain)
963 			hammer2_chain_ref(nchain);
964 		dropch[i] = ochain;
965 	}
966 
967 	/*
968 	 * Release any left-over chains in ip->cluster.
969 	 */
970 	while (i < ip->cluster.nchains) {
971 		nchain = ip->cluster.array[i].chain;
972 		if (nchain) {
973 			ip->cluster.array[i].chain = NULL;
974 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
975 		}
976 		dropch[i] = nchain;
977 		++i;
978 	}
979 
980 	/*
981 	 * Fixup fields.  Note that the inode-embedded cluster is never
982 	 * directly locked.
983 	 */
984 	if (cluster) {
985 		ip->cluster.nchains = cluster->nchains;
986 		ip->cluster.focus = cluster->focus;
987 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
988 	} else {
989 		ip->cluster.nchains = 0;
990 		ip->cluster.focus = NULL;
991 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
992 	}
993 
994 	/*
995 	 * Repoint ip->pip if requested (non-NULL pip).
996 	 */
997 	if (pip && ip->pip != pip) {
998 		opip = ip->pip;
999 		hammer2_inode_ref(pip);
1000 		ip->pip = pip;
1001 	} else {
1002 		opip = NULL;
1003 	}
1004 	hammer2_spin_unex(&ip->cluster_spin);
1005 
1006 	/*
1007 	 * Cleanup outside of spinlock
1008 	 */
1009 	while (--i >= 0) {
1010 		if (dropch[i])
1011 			hammer2_chain_drop(dropch[i]);
1012 	}
1013 	if (opip)
1014 		hammer2_inode_drop(opip);
1015 }
1016 
1017 /*
1018  * Repoint a single element from the cluster to the ip.  Used by the
1019  * synchronization threads to piecemeal update inodes.  Does not change
1020  * focus and requires inode to be re-locked to clean-up flags (XXX).
1021  */
1022 void
1023 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1024 			  int idx)
1025 {
1026 	hammer2_chain_t *ochain;
1027 	hammer2_chain_t *nchain;
1028 	int i;
1029 
1030 	hammer2_spin_ex(&ip->cluster_spin);
1031 	KKASSERT(idx < cluster->nchains);
1032 	if (idx < ip->cluster.nchains) {
1033 		ochain = ip->cluster.array[idx].chain;
1034 		nchain = cluster->array[idx].chain;
1035 	} else {
1036 		ochain = NULL;
1037 		nchain = cluster->array[idx].chain;
1038 		ip->cluster.nchains = idx + 1;
1039 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1040 			bzero(&ip->cluster.array[i],
1041 			      sizeof(ip->cluster.array[i]));
1042 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1043 		}
1044 	}
1045 	if (ochain != nchain) {
1046 		/*
1047 		 * Make adjustments.
1048 		 */
1049 		ip->cluster.array[idx].chain = nchain;
1050 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1051 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1052 						HAMMER2_CITEM_INVALID;
1053 	}
1054 	hammer2_spin_unex(&ip->cluster_spin);
1055 	if (ochain != nchain) {
1056 		if (nchain)
1057 			hammer2_chain_ref(nchain);
1058 		if (ochain)
1059 			hammer2_chain_drop(ochain);
1060 	}
1061 }
1062 
1063 /*
1064  * Called with a locked inode to finish unlinking an inode after xop_unlink
1065  * had been run.  This function is responsible for decrementing nlinks and
1066  * moving deleted inodes to the hidden directory if they are still open.
1067  *
1068  * We don't bother decrementing nlinks if the file is not open and this was
1069  * the last link.
1070  *
1071  * If the inode is a hardlink target it's chain has not yet been deleted,
1072  * otherwise it's chain has been deleted.
1073  *
1074  * If isopen then any prior deletion was not permanent and the inode must
1075  * be moved to the hidden directory.
1076  */
1077 int
1078 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1079 {
1080 	hammer2_pfs_t *pmp;
1081 	int error;
1082 
1083 	pmp = ip->pmp;
1084 
1085 	/*
1086 	 * Decrement nlinks.  If this is the last link and the file is
1087 	 * not open, the chain has already been removed and we don't bother
1088 	 * dirtying the inode.
1089 	 */
1090 	if (ip->meta.nlinks == 1) {
1091 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1092 		if (isopen == 0)
1093 			return 0;
1094 	}
1095 
1096 	hammer2_inode_modify(ip);
1097 	--ip->meta.nlinks;
1098 	if ((int64_t)ip->meta.nlinks < 0)
1099 		ip->meta.nlinks = 0;	/* safety */
1100 
1101 	/*
1102 	 * If nlinks is not zero we are done.  However, this should only be
1103 	 * possible with a hardlink target.  If the inode is an embedded
1104 	 * hardlink nlinks should have dropped to zero, warn and proceed
1105 	 * with the next step.
1106 	 */
1107 	if (ip->meta.nlinks) {
1108 		if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1109 			return 0;
1110 		kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1111 			(intmax_t)ip->meta.nlinks);
1112 		return 0;
1113 	}
1114 
1115 	/*
1116 	 * nlinks is now zero, the inode should have already been deleted.
1117 	 * If the file is open it was deleted non-permanently and must be
1118 	 * moved to the hidden directory.
1119 	 *
1120 	 * When moving to the hidden directory we force the name_key to the
1121 	 * inode number to avoid collisions.
1122 	 */
1123 	if (isopen) {
1124 		hammer2_inode_lock(pmp->ihidden, 0);
1125 		error = hammer2_inode_connect(pmp->ihidden, ip,
1126 					      NULL, 0, ip->meta.inum);
1127 		hammer2_inode_unlock(pmp->ihidden);
1128 	} else {
1129 		error = 0;
1130 	}
1131 	return error;
1132 }
1133 
1134 /*
1135  * This is called from the mount code to initialize pmp->ihidden
1136  */
1137 void
1138 hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
1139 {
1140 	int error;
1141 
1142 	if (pmp->ihidden)
1143 		return;
1144 
1145 	hammer2_trans_init(pmp, 0);
1146 	hammer2_inode_lock(pmp->iroot, 0);
1147 
1148 	/*
1149 	 * Find the hidden directory
1150 	 */
1151 	{
1152 		hammer2_xop_lookup_t *xop;
1153 
1154 		xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1155 		xop->lhc = HAMMER2_INODE_HIDDENDIR;
1156 		hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1157 		error = hammer2_xop_collect(&xop->head, 0);
1158 
1159 		if (error == 0) {
1160 			/*
1161 			 * Found the hidden directory
1162 			 */
1163 			kprintf("PFS FOUND HIDDEN DIR\n");
1164 			pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot,
1165 							 &xop->head.cluster,
1166 							 -1);
1167 			hammer2_inode_ref(pmp->ihidden);
1168 			hammer2_inode_unlock(pmp->ihidden);
1169 		}
1170 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1171 	}
1172 
1173 	/*
1174 	 * Create the hidden directory if it could not be found.
1175 	 */
1176 	if (error == ENOENT) {
1177 		kprintf("PFS CREATE HIDDEN DIR\n");
1178 
1179 		pmp->ihidden = hammer2_inode_create(pmp->iroot, NULL, NULL,
1180 						    NULL, 0,
1181 				/* lhc */	    HAMMER2_INODE_HIDDENDIR,
1182 				/* inum */	    HAMMER2_INODE_HIDDENDIR,
1183 				/* type */	    HAMMER2_OBJTYPE_DIRECTORY,
1184 				/* target_type */   0,
1185 				/* flags */	    0,
1186 						    &error);
1187 		if (pmp->ihidden) {
1188 			hammer2_inode_ref(pmp->ihidden);
1189 			hammer2_inode_unlock(pmp->ihidden);
1190 		}
1191 		if (error)
1192 			kprintf("PFS CREATE ERROR %d\n", error);
1193 	}
1194 
1195 	/*
1196 	 * Scan the hidden directory on-mount and destroy its contents
1197 	 */
1198 	if (error == 0) {
1199 		hammer2_xop_unlinkall_t *xop;
1200 
1201 		hammer2_inode_lock(pmp->ihidden, 0);
1202 		xop = hammer2_xop_alloc(pmp->ihidden, HAMMER2_XOP_MODIFYING);
1203 		xop->key_beg = HAMMER2_KEY_MIN;
1204 		xop->key_end = HAMMER2_KEY_MAX;
1205 		hammer2_xop_start(&xop->head, hammer2_inode_xop_unlinkall);
1206 
1207 		while ((error = hammer2_xop_collect(&xop->head, 0)) == 0) {
1208 			;
1209 		}
1210 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1211 		hammer2_inode_unlock(pmp->ihidden);
1212 	}
1213 
1214 	hammer2_inode_unlock(pmp->iroot);
1215 	hammer2_trans_done(pmp);
1216 }
1217 
1218 /*
1219  * Find the directory common to both fdip and tdip.
1220  *
1221  * Returns a held but not locked inode.  Caller typically locks the inode,
1222  * and when through unlocks AND drops it.
1223  */
1224 hammer2_inode_t *
1225 hammer2_inode_common_parent(hammer2_inode_t *fdip, hammer2_inode_t *tdip)
1226 {
1227 	hammer2_inode_t *scan1;
1228 	hammer2_inode_t *scan2;
1229 
1230 	/*
1231 	 * We used to have a depth field but it complicated matters too
1232 	 * much for directory renames.  So now its ugly.  Check for
1233 	 * simple cases before giving up and doing it the expensive way.
1234 	 *
1235 	 * XXX need a bottom-up topology stability lock
1236 	 */
1237 	if (fdip == tdip || fdip == tdip->pip) {
1238 		hammer2_inode_ref(fdip);
1239 		return(fdip);
1240 	}
1241 	if (fdip->pip == tdip) {
1242 		hammer2_inode_ref(tdip);
1243 		return(tdip);
1244 	}
1245 
1246 	/*
1247 	 * XXX not MPSAFE
1248 	 */
1249 	for (scan1 = fdip; scan1->pmp == fdip->pmp; scan1 = scan1->pip) {
1250 		scan2 = tdip;
1251 		while (scan2->pmp == tdip->pmp) {
1252 			if (scan1 == scan2) {
1253 				hammer2_inode_ref(scan1);
1254 				return(scan1);
1255 			}
1256 			scan2 = scan2->pip;
1257 			if (scan2 == NULL)
1258 				break;
1259 		}
1260 	}
1261 	panic("hammer2_inode_common_parent: no common parent %p %p\n",
1262 	      fdip, tdip);
1263 	/* NOT REACHED */
1264 	return(NULL);
1265 }
1266 
1267 /*
1268  * Mark an inode as being modified, meaning that the caller will modify
1269  * ip->meta.
1270  *
1271  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1272  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1273  *	 later when the inode gets flushed.
1274  */
1275 void
1276 hammer2_inode_modify(hammer2_inode_t *ip)
1277 {
1278 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1279 	if (ip->vp)
1280 		vsetisdirty(ip->vp);
1281 }
1282 
1283 /*
1284  * Synchronize the inode's frontend state with the chain state prior
1285  * to any explicit flush of the inode or any strategy write call.
1286  *
1287  * Called with a locked inode inside a transaction.
1288  */
1289 void
1290 hammer2_inode_fsync(hammer2_inode_t *ip)
1291 {
1292 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1293 		hammer2_xop_fsync_t *xop;
1294 		int error;
1295 
1296 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1297 		xop->clear_directdata = 0;
1298 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1299 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1300 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1301 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1302 				xop->clear_directdata = 1;
1303 			}
1304 			xop->osize = ip->osize;
1305 		} else {
1306 			xop->osize = ip->meta.size;	/* safety */
1307 		}
1308 		xop->ipflags = ip->flags;
1309 		xop->meta = ip->meta;
1310 
1311 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1312 					     HAMMER2_INODE_MODIFIED);
1313 		hammer2_xop_start(&xop->head, hammer2_inode_xop_fsync);
1314 		error = hammer2_xop_collect(&xop->head, 0);
1315 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1316 		if (error == ENOENT)
1317 			error = 0;
1318 		if (error) {
1319 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1320 			/*
1321 			atomic_set_int(&ip->flags,
1322 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1323 						       HAMMER2_INODE_MODIFIED));
1324 			*/
1325 			/* XXX return error somehow? */
1326 		}
1327 	}
1328 }
1329 
1330 /*
1331  * This handles unlinked open files after the vnode is finally dereferenced.
1332  * To avoid deadlocks it cannot be called from the normal vnode recycling
1333  * path, so we call it (1) after a unlink, rmdir, or rename, (2) on every
1334  * flush, and (3) on umount.
1335  *
1336  * Caller must be in a transaction.
1337  */
1338 void
1339 hammer2_inode_run_unlinkq(hammer2_pfs_t *pmp)
1340 {
1341 	hammer2_xop_destroy_t *xop;
1342 	hammer2_inode_unlink_t *ipul;
1343 	hammer2_inode_t *ip;
1344 	int error;
1345 
1346 	if (TAILQ_EMPTY(&pmp->unlinkq))
1347 		return;
1348 
1349 	LOCKSTART;
1350 	hammer2_spin_ex(&pmp->list_spin);
1351 	while ((ipul = TAILQ_FIRST(&pmp->unlinkq)) != NULL) {
1352 		TAILQ_REMOVE(&pmp->unlinkq, ipul, entry);
1353 		hammer2_spin_unex(&pmp->list_spin);
1354 		ip = ipul->ip;
1355 		kfree(ipul, pmp->minode);
1356 
1357 		hammer2_inode_lock(ip, 0);
1358 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1359 		hammer2_xop_start(&xop->head, hammer2_inode_xop_destroy);
1360 		error = hammer2_xop_collect(&xop->head, 0);
1361 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1362 
1363 		hammer2_inode_unlock(ip);
1364 		hammer2_inode_drop(ip);			/* ipul ref */
1365 
1366 		hammer2_spin_ex(&pmp->list_spin);
1367 	}
1368 	hammer2_spin_unex(&pmp->list_spin);
1369 	LOCKSTOP;
1370 }
1371 
1372 /*
1373  * Inode create helper (threaded, backend)
1374  *
1375  * Used by ncreate, nmknod, nsymlink, nmkdir.
1376  * Used by nlink and rename to create HARDLINK pointers.
1377  *
1378  * Frontend holds the parent directory ip locked exclusively.  We
1379  * create the inode and feed the exclusively locked chain to the
1380  * frontend.
1381  */
1382 void
1383 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1384 {
1385 	hammer2_xop_create_t *xop = &arg->xop_create;
1386 	hammer2_chain_t *parent;
1387 	hammer2_chain_t *chain;
1388 	hammer2_key_t key_next;
1389 	int cache_index = -1;
1390 	int error;
1391 
1392 	kprintf("inode_create lhc %016jx clindex %d\n",
1393 		xop->lhc, clindex);
1394 
1395 	chain = NULL;
1396 	parent = hammer2_inode_chain(xop->head.ip, clindex,
1397 				     HAMMER2_RESOLVE_ALWAYS);
1398 	if (parent == NULL) {
1399 		error = EIO;
1400 		goto fail;
1401 	}
1402 	chain = hammer2_chain_lookup(&parent, &key_next,
1403 				     xop->lhc, xop->lhc,
1404 				     &cache_index, 0);
1405 	if (chain) {
1406 		hammer2_chain_unlock(chain);
1407 		error = EEXIST;
1408 		goto fail;
1409 	}
1410 
1411 	error = hammer2_chain_create(&parent, &chain,
1412 				     xop->head.ip->pmp,
1413 				     xop->lhc, 0,
1414 				     HAMMER2_BREF_TYPE_INODE,
1415 				     HAMMER2_INODE_BYTES,
1416 				     xop->head.mtid, xop->flags);
1417 	if (error == 0) {
1418 		hammer2_chain_modify(chain, xop->head.mtid, 0);
1419 		chain->data->ipdata.meta = xop->meta;
1420 		if (xop->head.name) {
1421 			bcopy(xop->head.name,
1422 			      chain->data->ipdata.filename,
1423 			      xop->head.name_len);
1424 			chain->data->ipdata.meta.name_len = xop->head.name_len;
1425 		}
1426 		chain->data->ipdata.meta.name_key = xop->lhc;
1427 	}
1428 	hammer2_chain_unlock(chain);
1429 	hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1430 				  HAMMER2_RESOLVE_SHARED);
1431 fail:
1432 	if (parent) {
1433 		hammer2_chain_unlock(parent);
1434 		hammer2_chain_drop(parent);
1435 	}
1436 	hammer2_xop_feed(&xop->head, chain, clindex, error);
1437 	if (chain)
1438 		hammer2_chain_drop(chain);
1439 }
1440 
1441 /*
1442  * Inode delete helper (backend, threaded)
1443  *
1444  * Generally used by hammer2_run_unlinkq()
1445  */
1446 void
1447 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1448 {
1449 	hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1450 	hammer2_pfs_t *pmp;
1451 	hammer2_chain_t *parent;
1452 	hammer2_chain_t *chain;
1453 	hammer2_inode_t *ip;
1454 	int error;
1455 
1456 	/*
1457 	 * We need the precise parent chain to issue the deletion.
1458 	 */
1459 	ip = xop->head.ip;
1460 	pmp = ip->pmp;
1461 	chain = NULL;
1462 
1463 	parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1464 	if (parent)
1465 		hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1466 	if (parent == NULL) {
1467 		error = EIO;
1468 		goto done;
1469 	}
1470 	chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1471 	if (chain == NULL) {
1472 		error = EIO;
1473 		goto done;
1474 	}
1475 	hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1476 	error = 0;
1477 done:
1478 	hammer2_xop_feed(&xop->head, NULL, clindex, error);
1479 	if (parent) {
1480 		hammer2_chain_unlock(parent);
1481 		hammer2_chain_drop(parent);
1482 	}
1483 	if (chain) {
1484 		hammer2_chain_unlock(chain);
1485 		hammer2_chain_drop(chain);
1486 	}
1487 }
1488 
1489 void
1490 hammer2_inode_xop_unlinkall(hammer2_xop_t *arg, int clindex)
1491 {
1492 	hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1493 	hammer2_chain_t *parent;
1494 	hammer2_chain_t *chain;
1495 	hammer2_key_t key_next;
1496 	int cache_index = -1;
1497 
1498 	/*
1499 	 * We need the precise parent chain to issue the deletion.
1500 	 */
1501 	parent = hammer2_inode_chain(xop->head.ip, clindex,
1502 				     HAMMER2_RESOLVE_ALWAYS);
1503 	chain = NULL;
1504 	if (parent == NULL) {
1505 		/* XXX error */
1506 		goto done;
1507 	}
1508 	chain = hammer2_chain_lookup(&parent, &key_next,
1509 				     xop->key_beg, xop->key_end,
1510 				     &cache_index,
1511 				     HAMMER2_LOOKUP_ALWAYS);
1512 	while (chain) {
1513 		hammer2_chain_delete(parent, chain,
1514 				     xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1515 		hammer2_chain_unlock(chain);
1516 		hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1517 					  HAMMER2_RESOLVE_SHARED);
1518 		hammer2_xop_feed(&xop->head, chain, clindex, chain->error);
1519 		chain = hammer2_chain_next(&parent, chain, &key_next,
1520 					   key_next, xop->key_end,
1521 					   &cache_index,
1522 					   HAMMER2_LOOKUP_ALWAYS |
1523 					   HAMMER2_LOOKUP_NOUNLOCK);
1524 	}
1525 done:
1526 	hammer2_xop_feed(&xop->head, NULL, clindex, ENOENT);
1527 	if (parent) {
1528 		hammer2_chain_unlock(parent);
1529 		hammer2_chain_drop(parent);
1530 	}
1531 	if (chain) {
1532 		hammer2_chain_unlock(chain);
1533 		hammer2_chain_drop(chain);
1534 	}
1535 }
1536 
1537 void
1538 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1539 {
1540 	hammer2_xop_connect_t *xop = &arg->xop_connect;
1541 	hammer2_inode_data_t *wipdata;
1542 	hammer2_chain_t *parent;
1543 	hammer2_chain_t *chain;
1544 	hammer2_pfs_t *pmp;
1545 	hammer2_key_t key_dummy;
1546 	int cache_index = -1;
1547 	int error;
1548 
1549 	/*
1550 	 * Get directory, then issue a lookup to prime the parent chain
1551 	 * for the create.  The lookup is expected to fail.
1552 	 */
1553 	pmp = xop->head.ip->pmp;
1554 	parent = hammer2_inode_chain(xop->head.ip, clindex,
1555 				     HAMMER2_RESOLVE_ALWAYS);
1556 	if (parent == NULL) {
1557 		chain = NULL;
1558 		error = EIO;
1559 		goto fail;
1560 	}
1561 	chain = hammer2_chain_lookup(&parent, &key_dummy,
1562 				     xop->lhc, xop->lhc,
1563 				     &cache_index, 0);
1564 	if (chain) {
1565 		hammer2_chain_unlock(chain);
1566 		hammer2_chain_drop(chain);
1567 		chain = NULL;
1568 		error = EEXIST;
1569 		goto fail;
1570 	}
1571 
1572 	/*
1573 	 * Adjust the filename in the inode, set the name key.
1574 	 *
1575 	 * NOTE: Frontend must also adjust ip2->meta on success, we can't
1576 	 *	 do it here.
1577 	 */
1578 	chain = hammer2_inode_chain(xop->head.ip2, clindex,
1579 				    HAMMER2_RESOLVE_ALWAYS);
1580 	hammer2_chain_modify(chain, xop->head.mtid, 0);
1581 	wipdata = &chain->data->ipdata;
1582 
1583 	hammer2_inode_modify(xop->head.ip2);
1584 	if (xop->head.name) {
1585 		bzero(wipdata->filename, sizeof(wipdata->filename));
1586 		bcopy(xop->head.name, wipdata->filename, xop->head.name_len);
1587 		wipdata->meta.name_len = xop->head.name_len;
1588 	}
1589 	wipdata->meta.name_key = xop->lhc;
1590 
1591 	/*
1592 	 * Reconnect the chain to the new parent directory
1593 	 */
1594 	error = hammer2_chain_create(&parent, &chain, pmp,
1595 				     xop->lhc, 0,
1596 				     HAMMER2_BREF_TYPE_INODE,
1597 				     HAMMER2_INODE_BYTES,
1598 				     xop->head.mtid, 0);
1599 
1600 	/*
1601 	 * Feed result back.
1602 	 */
1603 fail:
1604 	hammer2_xop_feed(&xop->head, NULL, clindex, error);
1605 	if (parent) {
1606 		hammer2_chain_unlock(parent);
1607 		hammer2_chain_drop(parent);
1608 	}
1609 	if (chain) {
1610 		hammer2_chain_unlock(chain);
1611 		hammer2_chain_drop(chain);
1612 	}
1613 }
1614 
1615 void
1616 hammer2_inode_xop_fsync(hammer2_xop_t *arg, int clindex)
1617 {
1618 	hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1619 	hammer2_chain_t	*parent;
1620 	hammer2_chain_t	*chain;
1621 	int error;
1622 
1623 	parent = hammer2_inode_chain(xop->head.ip, clindex,
1624 				     HAMMER2_RESOLVE_ALWAYS);
1625 	chain = NULL;
1626 	if (parent == NULL) {
1627 		error = EIO;
1628 		goto done;
1629 	}
1630 	if (parent->error) {
1631 		error = parent->error;
1632 		goto done;
1633 	}
1634 
1635 	error = 0;
1636 
1637 	if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1638 		/* osize must be ignored */
1639 	} else if (xop->meta.size < xop->osize) {
1640 		/*
1641 		 * We must delete any chains beyond the EOF.  The chain
1642 		 * straddling the EOF will be pending in the bioq.
1643 		 */
1644 		hammer2_key_t lbase;
1645 		hammer2_key_t key_next;
1646 		int cache_index = -1;
1647 
1648 		lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1649 			~HAMMER2_PBUFMASK64;
1650 		chain = hammer2_chain_lookup(&parent, &key_next,
1651 					     lbase, HAMMER2_KEY_MAX,
1652 					     &cache_index,
1653 					     HAMMER2_LOOKUP_NODATA |
1654 					     HAMMER2_LOOKUP_NODIRECT);
1655 		while (chain) {
1656 			/*
1657 			 * Degenerate embedded case, nothing to loop on
1658 			 */
1659 			switch (chain->bref.type) {
1660 			case HAMMER2_BREF_TYPE_INODE:
1661 				KKASSERT(0);
1662 				break;
1663 			case HAMMER2_BREF_TYPE_DATA:
1664 				hammer2_chain_delete(parent, chain,
1665 						     xop->head.mtid,
1666 						     HAMMER2_DELETE_PERMANENT);
1667 				break;
1668 			}
1669 			chain = hammer2_chain_next(&parent, chain, &key_next,
1670 						   key_next, HAMMER2_KEY_MAX,
1671 						   &cache_index,
1672 						   HAMMER2_LOOKUP_NODATA |
1673 						   HAMMER2_LOOKUP_NODIRECT);
1674 		}
1675 	}
1676 
1677 	/*
1678 	 * Sync the inode meta-data, potentially clear the blockset area
1679 	 * of direct data so it can be used for blockrefs.
1680 	 */
1681 	hammer2_chain_modify(parent, xop->head.mtid, 0);
1682 	parent->data->ipdata.meta = xop->meta;
1683 	if (xop->clear_directdata) {
1684 		bzero(&parent->data->ipdata.u.blockset,
1685 		      sizeof(parent->data->ipdata.u.blockset));
1686 	}
1687 done:
1688 	if (chain) {
1689 		hammer2_chain_unlock(chain);
1690 		hammer2_chain_drop(chain);
1691 	}
1692 	if (parent) {
1693 		hammer2_chain_unlock(parent);
1694 		hammer2_chain_drop(parent);
1695 	}
1696 	hammer2_xop_feed(&xop->head, NULL, clindex, error);
1697 }
1698 
1699