xref: /dragonfly/sys/vfs/hammer2/hammer2_inode.c (revision 5a08817b)
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41 
42 #include "hammer2.h"
43 
44 #define INODE_DEBUG	0
45 
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 	     hammer2_tid_t, meta.inum);
48 
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52 	if (ip1->meta.inum < ip2->meta.inum)
53 		return(-1);
54 	if (ip1->meta.inum > ip2->meta.inum)
55 		return(1);
56 	return(0);
57 }
58 
59 /*
60  * HAMMER2 inode locks
61  *
62  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
63  * flags for options:
64  *
65  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
66  *	  inode locking function will automatically set the RDONLY flag.
67  *
68  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
69  *	  Most front-end inode locks do.
70  *
71  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
72  *	  the inode data be resolved.  This is used by the syncthr because
73  *	  it can run on an unresolved/out-of-sync cluster, and also by the
74  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
75  *	  disposing of hundreds of thousands of cached vnodes).
76  *
77  * The inode locking function locks the inode itself, resolves any stale
78  * chains in the inode's cluster, and allocates a fresh copy of the
79  * cluster with 1 ref and all the underlying chains locked.
80  *
81  * ip->cluster will be stable while the inode is locked.
82  *
83  * NOTE: We don't combine the inode/chain lock because putting away an
84  *       inode would otherwise confuse multiple lock holders of the inode.
85  *
86  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
87  *	 and never point to a hardlink pointer.
88  *
89  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
90  *	 will feel free to reduce the chain set in the cluster as an
91  *	 optimization.  It will still be validated against the quorum if
92  *	 appropriate, but the optimization might be able to reduce data
93  *	 accesses to one node.  This flag is automatically set if the inode
94  *	 is locked with HAMMER2_RESOLVE_SHARED.
95  */
96 void
97 hammer2_inode_lock(hammer2_inode_t *ip, int how)
98 {
99 	hammer2_inode_ref(ip);
100 
101 	/*
102 	 * Inode structure mutex
103 	 */
104 	if (how & HAMMER2_RESOLVE_SHARED) {
105 		/*how |= HAMMER2_RESOLVE_RDONLY; not used */
106 		hammer2_mtx_sh(&ip->lock);
107 	} else {
108 		hammer2_mtx_ex(&ip->lock);
109 	}
110 }
111 
112 /*
113  * Select a chain out of an inode's cluster and lock it.
114  *
115  * The inode does not have to be locked.
116  */
117 hammer2_chain_t *
118 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
119 {
120 	hammer2_chain_t *chain;
121 
122 	hammer2_spin_sh(&ip->cluster_spin);
123 	if (clindex >= ip->cluster.nchains)
124 		chain = NULL;
125 	else
126 		chain = ip->cluster.array[clindex].chain;
127 	if (chain) {
128 		hammer2_chain_ref(chain);
129 		hammer2_spin_unsh(&ip->cluster_spin);
130 		hammer2_chain_lock(chain, how);
131 	} else {
132 		hammer2_spin_unsh(&ip->cluster_spin);
133 	}
134 	return chain;
135 }
136 
137 hammer2_chain_t *
138 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
139 			       hammer2_chain_t **parentp, int how)
140 {
141 	hammer2_chain_t *chain;
142 	hammer2_chain_t *parent;
143 
144 	for (;;) {
145 		hammer2_spin_sh(&ip->cluster_spin);
146 		if (clindex >= ip->cluster.nchains)
147 			chain = NULL;
148 		else
149 			chain = ip->cluster.array[clindex].chain;
150 		if (chain) {
151 			hammer2_chain_ref(chain);
152 			hammer2_spin_unsh(&ip->cluster_spin);
153 			hammer2_chain_lock(chain, how);
154 		} else {
155 			hammer2_spin_unsh(&ip->cluster_spin);
156 		}
157 
158 		/*
159 		 * Get parent, lock order must be (parent, chain).
160 		 */
161 		parent = chain->parent;
162 		hammer2_chain_ref(parent);
163 		hammer2_chain_unlock(chain);
164 		hammer2_chain_lock(parent, how);
165 		hammer2_chain_lock(chain, how);
166 		if (ip->cluster.array[clindex].chain == chain &&
167 		    chain->parent == parent) {
168 			break;
169 		}
170 
171 		/*
172 		 * Retry
173 		 */
174 		hammer2_chain_unlock(chain);
175 		hammer2_chain_drop(chain);
176 		hammer2_chain_unlock(parent);
177 		hammer2_chain_drop(parent);
178 	}
179 	*parentp = parent;
180 
181 	return chain;
182 }
183 
184 void
185 hammer2_inode_unlock(hammer2_inode_t *ip)
186 {
187 	hammer2_mtx_unlock(&ip->lock);
188 	hammer2_inode_drop(ip);
189 }
190 
191 /*
192  * Temporarily release a lock held shared or exclusive.  Caller must
193  * hold the lock shared or exclusive on call and lock will be released
194  * on return.
195  *
196  * Restore a lock that was temporarily released.
197  */
198 hammer2_mtx_state_t
199 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
200 {
201 	return hammer2_mtx_temp_release(&ip->lock);
202 }
203 
204 void
205 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
206 {
207 	hammer2_mtx_temp_restore(&ip->lock, ostate);
208 }
209 
210 /*
211  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
212  * is already held exclusively this is a NOP.
213  *
214  * The caller MUST hold the inode lock either shared or exclusive on call
215  * and will own the lock exclusively on return.
216  *
217  * Returns non-zero if the lock was already exclusive prior to the upgrade.
218  */
219 int
220 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
221 {
222 	int wasexclusive;
223 
224 	if (mtx_islocked_ex(&ip->lock)) {
225 		wasexclusive = 1;
226 	} else {
227 		hammer2_mtx_unlock(&ip->lock);
228 		hammer2_mtx_ex(&ip->lock);
229 		wasexclusive = 0;
230 	}
231 	return wasexclusive;
232 }
233 
234 /*
235  * Downgrade an inode lock from exclusive to shared only if the inode
236  * lock was previously shared.  If the inode lock was previously exclusive,
237  * this is a NOP.
238  */
239 void
240 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
241 {
242 	if (wasexclusive == 0)
243 		mtx_downgrade(&ip->lock);
244 }
245 
246 /*
247  * Lookup an inode by inode number
248  */
249 hammer2_inode_t *
250 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
251 {
252 	hammer2_inode_t *ip;
253 
254 	KKASSERT(pmp);
255 	if (pmp->spmp_hmp) {
256 		ip = NULL;
257 	} else {
258 		hammer2_spin_ex(&pmp->inum_spin);
259 		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
260 		if (ip)
261 			hammer2_inode_ref(ip);
262 		hammer2_spin_unex(&pmp->inum_spin);
263 	}
264 	return(ip);
265 }
266 
267 /*
268  * Adding a ref to an inode is only legal if the inode already has at least
269  * one ref.
270  *
271  * (can be called with spinlock held)
272  */
273 void
274 hammer2_inode_ref(hammer2_inode_t *ip)
275 {
276 	atomic_add_int(&ip->refs, 1);
277 	if (hammer2_debug & 0x80000) {
278 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
279 		print_backtrace(8);
280 	}
281 }
282 
283 /*
284  * Drop an inode reference, freeing the inode when the last reference goes
285  * away.
286  */
287 void
288 hammer2_inode_drop(hammer2_inode_t *ip)
289 {
290 	hammer2_pfs_t *pmp;
291 	hammer2_inode_t *pip;
292 	u_int refs;
293 
294 	while (ip) {
295 		if (hammer2_debug & 0x80000) {
296 			kprintf("INODE-1 %p (%d->%d)\n",
297 				ip, ip->refs, ip->refs - 1);
298 			print_backtrace(8);
299 		}
300 		refs = ip->refs;
301 		cpu_ccfence();
302 		if (refs == 1) {
303 			/*
304 			 * Transition to zero, must interlock with
305 			 * the inode inumber lookup tree (if applicable).
306 			 * It should not be possible for anyone to race
307 			 * the transition to 0.
308 			 */
309 			pmp = ip->pmp;
310 			KKASSERT(pmp);
311 			hammer2_spin_ex(&pmp->inum_spin);
312 
313 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
314 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
315 				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
316 					atomic_clear_int(&ip->flags,
317 						     HAMMER2_INODE_ONRBTREE);
318 					RB_REMOVE(hammer2_inode_tree,
319 						  &pmp->inum_tree, ip);
320 				}
321 				hammer2_spin_unex(&pmp->inum_spin);
322 
323 				pip = ip->pip;
324 				ip->pip = NULL;
325 				ip->pmp = NULL;
326 
327 				/*
328 				 * Cleaning out ip->cluster isn't entirely
329 				 * trivial.
330 				 */
331 				hammer2_inode_repoint(ip, NULL, NULL);
332 
333 				/*
334 				 * We have to drop pip (if non-NULL) to
335 				 * dispose of our implied reference from
336 				 * ip->pip.  We can simply loop on it.
337 				 */
338 				kfree(ip, pmp->minode);
339 				atomic_add_long(&pmp->inmem_inodes, -1);
340 				ip = pip;
341 				/* continue with pip (can be NULL) */
342 			} else {
343 				hammer2_spin_unex(&ip->pmp->inum_spin);
344 			}
345 		} else {
346 			/*
347 			 * Non zero transition
348 			 */
349 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
350 				break;
351 		}
352 	}
353 }
354 
355 /*
356  * Get the vnode associated with the given inode, allocating the vnode if
357  * necessary.  The vnode will be returned exclusively locked.
358  *
359  * The caller must lock the inode (shared or exclusive).
360  *
361  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
362  * races.
363  */
364 struct vnode *
365 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
366 {
367 	hammer2_pfs_t *pmp;
368 	struct vnode *vp;
369 
370 	pmp = ip->pmp;
371 	KKASSERT(pmp != NULL);
372 	*errorp = 0;
373 
374 	for (;;) {
375 		/*
376 		 * Attempt to reuse an existing vnode assignment.  It is
377 		 * possible to race a reclaim so the vget() may fail.  The
378 		 * inode must be unlocked during the vget() to avoid a
379 		 * deadlock against a reclaim.
380 		 */
381 		int wasexclusive;
382 
383 		vp = ip->vp;
384 		if (vp) {
385 			/*
386 			 * Inode must be unlocked during the vget() to avoid
387 			 * possible deadlocks, but leave the ip ref intact.
388 			 *
389 			 * vnode is held to prevent destruction during the
390 			 * vget().  The vget() can still fail if we lost
391 			 * a reclaim race on the vnode.
392 			 */
393 			hammer2_mtx_state_t ostate;
394 
395 			vhold(vp);
396 			ostate = hammer2_inode_lock_temp_release(ip);
397 			if (vget(vp, LK_EXCLUSIVE)) {
398 				vdrop(vp);
399 				hammer2_inode_lock_temp_restore(ip, ostate);
400 				continue;
401 			}
402 			hammer2_inode_lock_temp_restore(ip, ostate);
403 			vdrop(vp);
404 			/* vp still locked and ref from vget */
405 			if (ip->vp != vp) {
406 				kprintf("hammer2: igetv race %p/%p\n",
407 					ip->vp, vp);
408 				vput(vp);
409 				continue;
410 			}
411 			*errorp = 0;
412 			break;
413 		}
414 
415 		/*
416 		 * No vnode exists, allocate a new vnode.  Beware of
417 		 * allocation races.  This function will return an
418 		 * exclusively locked and referenced vnode.
419 		 */
420 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
421 		if (*errorp) {
422 			kprintf("hammer2: igetv getnewvnode failed %d\n",
423 				*errorp);
424 			vp = NULL;
425 			break;
426 		}
427 
428 		/*
429 		 * Lock the inode and check for an allocation race.
430 		 */
431 		wasexclusive = hammer2_inode_lock_upgrade(ip);
432 		if (ip->vp != NULL) {
433 			vp->v_type = VBAD;
434 			vx_put(vp);
435 			hammer2_inode_lock_downgrade(ip, wasexclusive);
436 			continue;
437 		}
438 
439 		switch (ip->meta.type) {
440 		case HAMMER2_OBJTYPE_DIRECTORY:
441 			vp->v_type = VDIR;
442 			break;
443 		case HAMMER2_OBJTYPE_REGFILE:
444 			vp->v_type = VREG;
445 			vinitvmio(vp, ip->meta.size,
446 				  HAMMER2_LBUFSIZE,
447 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
448 			break;
449 		case HAMMER2_OBJTYPE_SOFTLINK:
450 			/*
451 			 * XXX for now we are using the generic file_read
452 			 * and file_write code so we need a buffer cache
453 			 * association.
454 			 */
455 			vp->v_type = VLNK;
456 			vinitvmio(vp, ip->meta.size,
457 				  HAMMER2_LBUFSIZE,
458 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
459 			break;
460 		case HAMMER2_OBJTYPE_CDEV:
461 			vp->v_type = VCHR;
462 			/* fall through */
463 		case HAMMER2_OBJTYPE_BDEV:
464 			vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
465 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
466 				vp->v_type = VBLK;
467 			addaliasu(vp,
468 				  ip->meta.rmajor,
469 				  ip->meta.rminor);
470 			break;
471 		case HAMMER2_OBJTYPE_FIFO:
472 			vp->v_type = VFIFO;
473 			vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
474 			break;
475 		case HAMMER2_OBJTYPE_SOCKET:
476 			vp->v_type = VSOCK;
477 			break;
478 		default:
479 			panic("hammer2: unhandled objtype %d",
480 			      ip->meta.type);
481 			break;
482 		}
483 
484 		if (ip == pmp->iroot)
485 			vsetflags(vp, VROOT);
486 
487 		vp->v_data = ip;
488 		ip->vp = vp;
489 		hammer2_inode_ref(ip);		/* vp association */
490 		hammer2_inode_lock_downgrade(ip, wasexclusive);
491 		break;
492 	}
493 
494 	/*
495 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
496 	 */
497 	if (hammer2_debug & 0x0002) {
498 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
499 			vp, vp->v_refcnt, vp->v_auxrefs);
500 	}
501 	return (vp);
502 }
503 
504 /*
505  * Returns the inode associated with the passed-in cluster, creating the
506  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
507  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
508  * Otherwise the whole cluster is synchronized.
509  *
510  * The passed-in cluster must be locked and will remain locked on return.
511  * The returned inode will be locked and the caller may dispose of both
512  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
513  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
514  *
515  * The hammer2_inode structure regulates the interface between the high level
516  * kernel VNOPS API and the filesystem backend (the chains).
517  *
518  * On return the inode is locked with the supplied cluster.
519  */
520 hammer2_inode_t *
521 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
522 		  hammer2_cluster_t *cluster, int idx)
523 {
524 	hammer2_inode_t *nip;
525 	const hammer2_inode_data_t *iptmp;
526 	const hammer2_inode_data_t *nipdata;
527 
528 	KKASSERT(cluster == NULL ||
529 		 hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
530 	KKASSERT(pmp);
531 
532 	/*
533 	 * Interlocked lookup/ref of the inode.  This code is only needed
534 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
535 	 * otherwise and test for duplicates).
536 	 *
537 	 * Cluster can be NULL during the initial pfs allocation.
538 	 */
539 again:
540 	while (cluster) {
541 		iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
542 		nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
543 		if (nip == NULL)
544 			break;
545 
546 		hammer2_mtx_ex(&nip->lock);
547 
548 		/*
549 		 * Handle SMP race (not applicable to the super-root spmp
550 		 * which can't index inodes due to duplicative inode numbers).
551 		 */
552 		if (pmp->spmp_hmp == NULL &&
553 		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
554 			hammer2_mtx_unlock(&nip->lock);
555 			hammer2_inode_drop(nip);
556 			continue;
557 		}
558 		if (idx >= 0)
559 			hammer2_inode_repoint_one(nip, cluster, idx);
560 		else
561 			hammer2_inode_repoint(nip, NULL, cluster);
562 
563 		return nip;
564 	}
565 
566 	/*
567 	 * We couldn't find the inode number, create a new inode.
568 	 */
569 	nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
570 	spin_init(&nip->cluster_spin, "h2clspin");
571 	atomic_add_long(&pmp->inmem_inodes, 1);
572 	hammer2_pfs_memory_inc(pmp);
573 	hammer2_pfs_memory_wakeup(pmp);
574 	if (pmp->spmp_hmp)
575 		nip->flags = HAMMER2_INODE_SROOT;
576 
577 	/*
578 	 * Initialize nip's cluster.  A cluster is provided for normal
579 	 * inodes but typically not for the super-root or PFS inodes.
580 	 */
581 	nip->cluster.refs = 1;
582 	nip->cluster.pmp = pmp;
583 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
584 	if (cluster) {
585 		nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
586 		nip->meta = nipdata->meta;
587 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
588 		hammer2_inode_repoint(nip, NULL, cluster);
589 	} else {
590 		nip->meta.inum = 1;		/* PFS inum is always 1 XXX */
591 		/* mtime will be updated when a cluster is available */
592 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
593 	}
594 
595 	nip->pip = dip;				/* can be NULL */
596 	if (dip)
597 		hammer2_inode_ref(dip);	/* ref dip for nip->pip */
598 
599 	nip->pmp = pmp;
600 
601 	/*
602 	 * ref and lock on nip gives it state compatible to after a
603 	 * hammer2_inode_lock() call.
604 	 */
605 	nip->refs = 1;
606 	hammer2_mtx_init(&nip->lock, "h2inode");
607 	hammer2_mtx_ex(&nip->lock);
608 	/* combination of thread lock and chain lock == inode lock */
609 
610 	/*
611 	 * Attempt to add the inode.  If it fails we raced another inode
612 	 * get.  Undo all the work and try again.
613 	 */
614 	if (pmp->spmp_hmp == NULL) {
615 		hammer2_spin_ex(&pmp->inum_spin);
616 		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
617 			hammer2_spin_unex(&pmp->inum_spin);
618 			hammer2_mtx_unlock(&nip->lock);
619 			hammer2_inode_drop(nip);
620 			goto again;
621 		}
622 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
623 		hammer2_spin_unex(&pmp->inum_spin);
624 	}
625 
626 	return (nip);
627 }
628 
629 /*
630  * Create a new inode in the specified directory using the vattr to
631  * figure out the type of inode.
632  *
633  * If no error occurs the new inode with its cluster locked is returned in
634  * *nipp, otherwise an error is returned and *nipp is set to NULL.
635  *
636  * If vap and/or cred are NULL the related fields are not set and the
637  * inode type defaults to a directory.  This is used when creating PFSs
638  * under the super-root, so the inode number is set to 1 in this case.
639  *
640  * dip is not locked on entry.
641  *
642  * NOTE: When used to create a snapshot, the inode is temporarily associated
643  *	 with the super-root spmp. XXX should pass new pmp for snapshot.
644  */
645 hammer2_inode_t *
646 hammer2_inode_create(hammer2_inode_t *dip,
647 		     struct vattr *vap, struct ucred *cred,
648 		     const uint8_t *name, size_t name_len, hammer2_key_t lhc,
649 		     hammer2_key_t inum, uint8_t type, uint8_t target_type,
650 		     int flags, int *errorp)
651 {
652 	hammer2_xop_create_t *xop;
653 	hammer2_inode_t *nip;
654 	int error;
655 	uid_t xuid;
656 	uuid_t dip_uid;
657 	uuid_t dip_gid;
658 	uint32_t dip_mode;
659 	uint8_t dip_comp_algo;
660 	uint8_t dip_check_algo;
661 
662 	if (name)
663 		lhc = hammer2_dirhash(name, name_len);
664 	*errorp = 0;
665 	nip = NULL;
666 
667 	/*
668 	 * Locate the inode or indirect block to create the new
669 	 * entry in.  At the same time check for key collisions
670 	 * and iterate until we don't get one.
671 	 *
672 	 * NOTE: hidden inodes do not have iterators.
673 	 *
674 	 * Lock the directory exclusively for now to guarantee that
675 	 * we can find an unused lhc for the name.  Due to collisions,
676 	 * two different creates can end up with the same lhc so we
677 	 * cannot depend on the OS to prevent the collision.
678 	 */
679 	hammer2_inode_lock(dip, 0);
680 
681 	dip_uid = dip->meta.uid;
682 	dip_gid = dip->meta.gid;
683 	dip_mode = dip->meta.mode;
684 	dip_comp_algo = dip->meta.comp_algo;
685 	dip_check_algo = dip->meta.check_algo;
686 
687 	/*
688 	 * If name specified, locate an unused key in the collision space.
689 	 * Otherwise use the passed-in lhc directly.
690 	 */
691 	if (name) {
692 		hammer2_xop_scanlhc_t *sxop;
693 		hammer2_key_t lhcbase;
694 
695 		lhcbase = lhc;
696 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
697 		sxop->lhc = lhc;
698 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
699 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
700 			if (lhc != sxop->head.cluster.focus->bref.key)
701 				break;
702 			++lhc;
703 		}
704 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
705 
706 		if (error) {
707 			if (error != ENOENT)
708 				goto done2;
709 			++lhc;
710 			error = 0;
711 		}
712 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
713 			error = ENOSPC;
714 			goto done2;
715 		}
716 	}
717 
718 	/*
719 	 * Create the inode with the lhc as the key.
720 	 */
721 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
722 	xop->lhc = lhc;
723 	xop->flags = flags;
724 	bzero(&xop->meta, sizeof(xop->meta));
725 
726 	if (vap) {
727 		xop->meta.type = hammer2_get_obj_type(vap->va_type);
728 
729 		switch (xop->meta.type) {
730 		case HAMMER2_OBJTYPE_CDEV:
731 		case HAMMER2_OBJTYPE_BDEV:
732 			xop->meta.rmajor = vap->va_rmajor;
733 			xop->meta.rminor = vap->va_rminor;
734 			break;
735 		default:
736 			break;
737 		}
738 		type = xop->meta.type;
739 	} else {
740 		xop->meta.type = type;
741 		xop->meta.target_type = target_type;
742 	}
743 	xop->meta.inum = inum;
744 
745 	/* Inherit parent's inode compression mode. */
746 	xop->meta.comp_algo = dip_comp_algo;
747 	xop->meta.check_algo = dip_check_algo;
748 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
749 	hammer2_update_time(&xop->meta.ctime);
750 	xop->meta.mtime = xop->meta.ctime;
751 	if (vap)
752 		xop->meta.mode = vap->va_mode;
753 	xop->meta.nlinks = 1;
754 	if (vap) {
755 		if (dip && dip->pmp) {
756 			xuid = hammer2_to_unix_xid(&dip_uid);
757 			xuid = vop_helper_create_uid(dip->pmp->mp,
758 						     dip_mode,
759 						     xuid,
760 						     cred,
761 						     &vap->va_mode);
762 		} else {
763 			/* super-root has no dip and/or pmp */
764 			xuid = 0;
765 		}
766 		if (vap->va_vaflags & VA_UID_UUID_VALID)
767 			xop->meta.uid = vap->va_uid_uuid;
768 		else if (vap->va_uid != (uid_t)VNOVAL)
769 			hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
770 		else
771 			hammer2_guid_to_uuid(&xop->meta.uid, xuid);
772 
773 		if (vap->va_vaflags & VA_GID_UUID_VALID)
774 			xop->meta.gid = vap->va_gid_uuid;
775 		else if (vap->va_gid != (gid_t)VNOVAL)
776 			hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
777 		else if (dip)
778 			xop->meta.gid = dip_gid;
779 	}
780 
781 	/*
782 	 * Regular files and softlinks allow a small amount of data to be
783 	 * directly embedded in the inode.  This flag will be cleared if
784 	 * the size is extended past the embedded limit.
785 	 */
786 	if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
787 	    xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
788 	    xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
789 		xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
790 	}
791 	if (name)
792 		hammer2_xop_setname(&xop->head, name, name_len);
793 	xop->meta.name_len = name_len;
794 	xop->meta.name_key = lhc;
795 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
796 
797 	hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
798 
799 	error = hammer2_xop_collect(&xop->head, 0);
800 #if INODE_DEBUG
801 	kprintf("CREATE INODE %*.*s\n",
802 		(int)name_len, (int)name_len, name);
803 #endif
804 
805 	if (error) {
806 		*errorp = error;
807 		goto done;
808 	}
809 
810 	/*
811 	 * Set up the new inode if not a hardlink pointer.
812 	 *
813 	 * NOTE: *_get() integrates chain's lock into the inode lock.
814 	 *
815 	 * NOTE: Only one new inode can currently be created per
816 	 *	 transaction.  If the need arises we can adjust
817 	 *	 hammer2_trans_init() to allow more.
818 	 *
819 	 * NOTE: nipdata will have chain's blockset data.
820 	 */
821 	if (type != HAMMER2_OBJTYPE_HARDLINK) {
822 		nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
823 		nip->comp_heuristic = 0;
824 	} else {
825 		nip = NULL;
826 	}
827 
828 done:
829 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
830 done2:
831 	hammer2_inode_unlock(dip);
832 
833 	return (nip);
834 }
835 
836 /*
837  * Connect the disconnected inode (ip) to the directory (dip) with the
838  * specified (name, name_len).  If name is NULL, (lhc) will be used as
839  * the directory key and the inode's embedded name will not be modified
840  * for future recovery purposes.
841  *
842  * dip and ip must both be locked exclusively (dip in particular to avoid
843  * lhc collisions).
844  */
845 int
846 hammer2_inode_connect(hammer2_inode_t *dip, hammer2_inode_t *ip,
847 		      const char *name, size_t name_len,
848 		      hammer2_key_t lhc)
849 {
850 	hammer2_xop_scanlhc_t *sxop;
851 	hammer2_xop_connect_t *xop;
852 	hammer2_inode_t *opip;
853 	hammer2_key_t lhcbase;
854 	int error;
855 
856 	/*
857 	 * Calculate the lhc and resolve the collision space.
858 	 */
859 	if (name) {
860 		lhc = lhcbase = hammer2_dirhash(name, name_len);
861 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
862 		sxop->lhc = lhc;
863 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
864 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
865 			if (lhc != sxop->head.cluster.focus->bref.key)
866 				break;
867 			++lhc;
868 		}
869 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
870 
871 		if (error) {
872 			if (error != ENOENT)
873 				goto done;
874 			++lhc;
875 			error = 0;
876 		}
877 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
878 			error = ENOSPC;
879 			goto done;
880 		}
881 	} else {
882 		error = 0;
883 	}
884 
885 	/*
886 	 * Formally reconnect the in-memory structure.  ip must
887 	 * be locked exclusively to safely change ip->pip.
888 	 */
889 	if (ip->pip != dip) {
890 		hammer2_inode_ref(dip);
891 		opip = ip->pip;
892 		ip->pip = dip;
893 		if (opip)
894 			hammer2_inode_drop(opip);
895 	}
896 
897 	/*
898 	 * Connect her up
899 	 */
900 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
901 	if (name)
902 		hammer2_xop_setname(&xop->head, name, name_len);
903 	hammer2_xop_setip2(&xop->head, ip);
904 	xop->lhc = lhc;
905 	hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
906 	error = hammer2_xop_collect(&xop->head, 0);
907 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
908 
909 	/*
910 	 * On success make the same adjustments to ip->meta or the
911 	 * next flush may blow up the chain.
912 	 */
913 	if (error == 0) {
914 		hammer2_inode_modify(ip);
915 		ip->meta.name_key = lhc;
916 		if (name)
917 			ip->meta.name_len = name_len;
918 	}
919 done:
920 	return error;
921 }
922 
923 /*
924  * Repoint ip->cluster's chains to cluster's chains and fixup the default
925  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
926  * filters out invalid or non-matching elements.
927  *
928  * Caller must hold the inode and cluster exclusive locked, if not NULL,
929  * must also be locked.
930  *
931  * Cluster may be NULL to clean out any chains in ip->cluster.
932  */
933 void
934 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
935 		      hammer2_cluster_t *cluster)
936 {
937 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
938 	hammer2_chain_t *ochain;
939 	hammer2_chain_t *nchain;
940 	hammer2_inode_t *opip;
941 	int i;
942 
943 	bzero(dropch, sizeof(dropch));
944 
945 	/*
946 	 * Replace chains in ip->cluster with chains from cluster and
947 	 * adjust the focus if necessary.
948 	 *
949 	 * NOTE: nchain and/or ochain can be NULL due to gaps
950 	 *	 in the cluster arrays.
951 	 */
952 	hammer2_spin_ex(&ip->cluster_spin);
953 	for (i = 0; cluster && i < cluster->nchains; ++i) {
954 		/*
955 		 * Do not replace elements which are the same.  Also handle
956 		 * element count discrepancies.
957 		 */
958 		nchain = cluster->array[i].chain;
959 		if (i < ip->cluster.nchains) {
960 			ochain = ip->cluster.array[i].chain;
961 			if (ochain == nchain)
962 				continue;
963 		} else {
964 			ochain = NULL;
965 		}
966 
967 		/*
968 		 * Make adjustments
969 		 */
970 		ip->cluster.array[i].chain = nchain;
971 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
972 		ip->cluster.array[i].flags |= cluster->array[i].flags &
973 					      HAMMER2_CITEM_INVALID;
974 		if (nchain)
975 			hammer2_chain_ref(nchain);
976 		dropch[i] = ochain;
977 	}
978 
979 	/*
980 	 * Release any left-over chains in ip->cluster.
981 	 */
982 	while (i < ip->cluster.nchains) {
983 		nchain = ip->cluster.array[i].chain;
984 		if (nchain) {
985 			ip->cluster.array[i].chain = NULL;
986 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
987 		}
988 		dropch[i] = nchain;
989 		++i;
990 	}
991 
992 	/*
993 	 * Fixup fields.  Note that the inode-embedded cluster is never
994 	 * directly locked.
995 	 */
996 	if (cluster) {
997 		ip->cluster.nchains = cluster->nchains;
998 		ip->cluster.focus = cluster->focus;
999 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1000 	} else {
1001 		ip->cluster.nchains = 0;
1002 		ip->cluster.focus = NULL;
1003 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1004 	}
1005 
1006 	/*
1007 	 * Repoint ip->pip if requested (non-NULL pip).
1008 	 */
1009 	if (pip && ip->pip != pip) {
1010 		opip = ip->pip;
1011 		hammer2_inode_ref(pip);
1012 		ip->pip = pip;
1013 	} else {
1014 		opip = NULL;
1015 	}
1016 	hammer2_spin_unex(&ip->cluster_spin);
1017 
1018 	/*
1019 	 * Cleanup outside of spinlock
1020 	 */
1021 	while (--i >= 0) {
1022 		if (dropch[i])
1023 			hammer2_chain_drop(dropch[i]);
1024 	}
1025 	if (opip)
1026 		hammer2_inode_drop(opip);
1027 }
1028 
1029 /*
1030  * Repoint a single element from the cluster to the ip.  Used by the
1031  * synchronization threads to piecemeal update inodes.  Does not change
1032  * focus and requires inode to be re-locked to clean-up flags (XXX).
1033  */
1034 void
1035 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1036 			  int idx)
1037 {
1038 	hammer2_chain_t *ochain;
1039 	hammer2_chain_t *nchain;
1040 	int i;
1041 
1042 	hammer2_spin_ex(&ip->cluster_spin);
1043 	KKASSERT(idx < cluster->nchains);
1044 	if (idx < ip->cluster.nchains) {
1045 		ochain = ip->cluster.array[idx].chain;
1046 		nchain = cluster->array[idx].chain;
1047 	} else {
1048 		ochain = NULL;
1049 		nchain = cluster->array[idx].chain;
1050 		ip->cluster.nchains = idx + 1;
1051 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1052 			bzero(&ip->cluster.array[i],
1053 			      sizeof(ip->cluster.array[i]));
1054 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1055 		}
1056 	}
1057 	if (ochain != nchain) {
1058 		/*
1059 		 * Make adjustments.
1060 		 */
1061 		ip->cluster.array[idx].chain = nchain;
1062 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1063 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1064 						HAMMER2_CITEM_INVALID;
1065 	}
1066 	hammer2_spin_unex(&ip->cluster_spin);
1067 	if (ochain != nchain) {
1068 		if (nchain)
1069 			hammer2_chain_ref(nchain);
1070 		if (ochain)
1071 			hammer2_chain_drop(ochain);
1072 	}
1073 }
1074 
1075 /*
1076  * Called with a locked inode to finish unlinking an inode after xop_unlink
1077  * had been run.  This function is responsible for decrementing nlinks and
1078  * moving deleted inodes to the hidden directory if they are still open.
1079  *
1080  * We don't bother decrementing nlinks if the file is not open and this was
1081  * the last link.
1082  *
1083  * If the inode is a hardlink target it's chain has not yet been deleted,
1084  * otherwise it's chain has been deleted.
1085  *
1086  * If isopen then any prior deletion was not permanent and the inode must
1087  * be moved to the hidden directory.
1088  */
1089 int
1090 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1091 {
1092 	hammer2_pfs_t *pmp;
1093 	int error;
1094 
1095 	pmp = ip->pmp;
1096 
1097 	/*
1098 	 * Decrement nlinks.  If this is the last link and the file is
1099 	 * not open, the chain has already been removed and we don't bother
1100 	 * dirtying the inode.
1101 	 */
1102 	if (ip->meta.nlinks == 1) {
1103 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1104 		if (isopen == 0) {
1105 			atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1106 			return 0;
1107 		}
1108 	}
1109 
1110 	hammer2_inode_modify(ip);
1111 	--ip->meta.nlinks;
1112 	if ((int64_t)ip->meta.nlinks < 0)
1113 		ip->meta.nlinks = 0;	/* safety */
1114 
1115 	/*
1116 	 * If nlinks is not zero we are done.  However, this should only be
1117 	 * possible with a hardlink target.  If the inode is an embedded
1118 	 * hardlink nlinks should have dropped to zero, warn and proceed
1119 	 * with the next step.
1120 	 */
1121 	if (ip->meta.nlinks) {
1122 		if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1123 			return 0;
1124 		kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1125 			(intmax_t)ip->meta.nlinks);
1126 		return 0;
1127 	}
1128 
1129 	/*
1130 	 * nlinks is now zero, the inode should have already been deleted.
1131 	 * If the file is open it was deleted non-permanently and must be
1132 	 * moved to the hidden directory.
1133 	 *
1134 	 * When moving to the hidden directory we force the name_key to the
1135 	 * inode number to avoid collisions.
1136 	 */
1137 	if (isopen) {
1138 		hammer2_inode_lock(pmp->ihidden, 0);
1139 		error = hammer2_inode_connect(pmp->ihidden, ip,
1140 					      NULL, 0, ip->meta.inum);
1141 		hammer2_inode_unlock(pmp->ihidden);
1142 	} else {
1143 		error = 0;
1144 	}
1145 	return error;
1146 }
1147 
1148 /*
1149  * This is called from the mount code to initialize pmp->ihidden
1150  */
1151 void
1152 hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
1153 {
1154 	int error;
1155 
1156 	if (pmp->ihidden)
1157 		return;
1158 
1159 	hammer2_trans_init(pmp, 0);
1160 	hammer2_inode_lock(pmp->iroot, 0);
1161 
1162 	/*
1163 	 * Find the hidden directory
1164 	 */
1165 	{
1166 		hammer2_xop_lookup_t *xop;
1167 
1168 		xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1169 		xop->lhc = HAMMER2_INODE_HIDDENDIR;
1170 		hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1171 		error = hammer2_xop_collect(&xop->head, 0);
1172 
1173 		if (error == 0) {
1174 			/*
1175 			 * Found the hidden directory
1176 			 */
1177 			kprintf("PFS FOUND HIDDEN DIR\n");
1178 			pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot,
1179 							 &xop->head.cluster,
1180 							 -1);
1181 			hammer2_inode_ref(pmp->ihidden);
1182 			hammer2_inode_unlock(pmp->ihidden);
1183 		}
1184 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1185 	}
1186 
1187 	/*
1188 	 * Create the hidden directory if it could not be found.
1189 	 */
1190 	if (error == ENOENT) {
1191 		kprintf("PFS CREATE HIDDEN DIR\n");
1192 
1193 		pmp->ihidden = hammer2_inode_create(pmp->iroot, NULL, NULL,
1194 						    NULL, 0,
1195 				/* lhc */	    HAMMER2_INODE_HIDDENDIR,
1196 				/* inum */	    HAMMER2_INODE_HIDDENDIR,
1197 				/* type */	    HAMMER2_OBJTYPE_DIRECTORY,
1198 				/* target_type */   0,
1199 				/* flags */	    0,
1200 						    &error);
1201 		if (pmp->ihidden) {
1202 			hammer2_inode_ref(pmp->ihidden);
1203 			hammer2_inode_unlock(pmp->ihidden);
1204 		}
1205 		if (error)
1206 			kprintf("PFS CREATE ERROR %d\n", error);
1207 	}
1208 
1209 	/*
1210 	 * Scan the hidden directory on-mount and destroy its contents
1211 	 */
1212 	if (error == 0) {
1213 		hammer2_xop_unlinkall_t *xop;
1214 
1215 		hammer2_inode_lock(pmp->ihidden, 0);
1216 		xop = hammer2_xop_alloc(pmp->ihidden, HAMMER2_XOP_MODIFYING);
1217 		xop->key_beg = HAMMER2_KEY_MIN;
1218 		xop->key_end = HAMMER2_KEY_MAX;
1219 		hammer2_xop_start(&xop->head, hammer2_inode_xop_unlinkall);
1220 
1221 		while ((error = hammer2_xop_collect(&xop->head, 0)) == 0) {
1222 			;
1223 		}
1224 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1225 		hammer2_inode_unlock(pmp->ihidden);
1226 	}
1227 
1228 	hammer2_inode_unlock(pmp->iroot);
1229 	hammer2_trans_done(pmp);
1230 }
1231 
1232 /*
1233  * Find the directory common to both fdip and tdip.
1234  *
1235  * Returns a held but not locked inode.  Caller typically locks the inode,
1236  * and when through unlocks AND drops it.
1237  */
1238 hammer2_inode_t *
1239 hammer2_inode_common_parent(hammer2_inode_t *fdip, hammer2_inode_t *tdip)
1240 {
1241 	hammer2_inode_t *scan1;
1242 	hammer2_inode_t *scan2;
1243 
1244 	/*
1245 	 * We used to have a depth field but it complicated matters too
1246 	 * much for directory renames.  So now its ugly.  Check for
1247 	 * simple cases before giving up and doing it the expensive way.
1248 	 *
1249 	 * XXX need a bottom-up topology stability lock
1250 	 */
1251 	if (fdip == tdip || fdip == tdip->pip) {
1252 		hammer2_inode_ref(fdip);
1253 		return(fdip);
1254 	}
1255 	if (fdip->pip == tdip) {
1256 		hammer2_inode_ref(tdip);
1257 		return(tdip);
1258 	}
1259 
1260 	/*
1261 	 * XXX not MPSAFE
1262 	 */
1263 	for (scan1 = fdip; scan1->pmp == fdip->pmp; scan1 = scan1->pip) {
1264 		scan2 = tdip;
1265 		while (scan2->pmp == tdip->pmp) {
1266 			if (scan1 == scan2) {
1267 				hammer2_inode_ref(scan1);
1268 				return(scan1);
1269 			}
1270 			scan2 = scan2->pip;
1271 			if (scan2 == NULL)
1272 				break;
1273 		}
1274 	}
1275 	panic("hammer2_inode_common_parent: no common parent %p %p\n",
1276 	      fdip, tdip);
1277 	/* NOT REACHED */
1278 	return(NULL);
1279 }
1280 
1281 /*
1282  * Mark an inode as being modified, meaning that the caller will modify
1283  * ip->meta.
1284  *
1285  * If a vnode is present we set the vnode dirty and the nominal filesystem
1286  * sync will also handle synchronizing the inode meta-data.  If no vnode
1287  * is present we must ensure that the inode is on pmp->sideq.
1288  *
1289  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1290  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1291  *	 later when the inode gets flushed.
1292  */
1293 void
1294 hammer2_inode_modify(hammer2_inode_t *ip)
1295 {
1296 	hammer2_inode_sideq_t *ipul;
1297 	hammer2_pfs_t *pmp;
1298 
1299 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1300 	if (ip->vp) {
1301 		vsetisdirty(ip->vp);
1302 	} else if ((pmp = ip->pmp) != NULL) {
1303 		if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
1304 			ipul = kmalloc(sizeof(*ipul), pmp->minode,
1305 				       M_WAITOK | M_ZERO);
1306 			ipul->ip = ip;
1307 			hammer2_inode_ref(ip);
1308 			hammer2_spin_ex(&pmp->list_spin);
1309 			if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
1310 				atomic_set_int(&ip->flags,
1311 					       HAMMER2_INODE_ONSIDEQ);
1312 				TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
1313 				hammer2_spin_unex(&pmp->list_spin);
1314 			} else {
1315 				hammer2_spin_unex(&pmp->list_spin);
1316 				hammer2_inode_drop(ip);
1317 				kfree(ipul, pmp->minode);
1318 			}
1319 		}
1320 	}
1321 }
1322 
1323 /*
1324  * Synchronize the inode's frontend state with the chain state prior
1325  * to any explicit flush of the inode or any strategy write call.
1326  *
1327  * Called with a locked inode inside a transaction.
1328  */
1329 void
1330 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1331 {
1332 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1333 		hammer2_xop_fsync_t *xop;
1334 		int error;
1335 
1336 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1337 		xop->clear_directdata = 0;
1338 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1339 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1340 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1341 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1342 				xop->clear_directdata = 1;
1343 			}
1344 			xop->osize = ip->osize;
1345 		} else {
1346 			xop->osize = ip->meta.size;	/* safety */
1347 		}
1348 		xop->ipflags = ip->flags;
1349 		xop->meta = ip->meta;
1350 
1351 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1352 					     HAMMER2_INODE_MODIFIED);
1353 		hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1354 		error = hammer2_xop_collect(&xop->head, 0);
1355 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1356 		if (error == ENOENT)
1357 			error = 0;
1358 		if (error) {
1359 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1360 			/*
1361 			atomic_set_int(&ip->flags,
1362 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1363 						       HAMMER2_INODE_MODIFIED));
1364 			*/
1365 			/* XXX return error somehow? */
1366 		}
1367 	}
1368 }
1369 
1370 /*
1371  * The normal filesystem sync no longer has visibility to an inode structure
1372  * after its vnode has been reclaimed.  In this situation an unlinked-but-open
1373  * inode or a dirty inode may require additional processing to synchronize
1374  * ip->meta to its underlying cluster nodes.
1375  *
1376  * In particular, reclaims can occur in almost any state (for example, when
1377  * doing operations on unrelated vnodes) and flushing the reclaimed inode
1378  * in the reclaim path itself is a non-starter.
1379  *
1380  * Caller must be in a transaction.
1381  */
1382 void
1383 hammer2_inode_run_sideq(hammer2_pfs_t *pmp)
1384 {
1385 	hammer2_xop_destroy_t *xop;
1386 	hammer2_inode_sideq_t *ipul;
1387 	hammer2_inode_t *ip;
1388 	int error;
1389 
1390 	if (TAILQ_EMPTY(&pmp->sideq))
1391 		return;
1392 
1393 	LOCKSTART;
1394 	hammer2_spin_ex(&pmp->list_spin);
1395 	while ((ipul = TAILQ_FIRST(&pmp->sideq)) != NULL) {
1396 		TAILQ_REMOVE(&pmp->sideq, ipul, entry);
1397 		ip = ipul->ip;
1398 		KKASSERT(ip->flags & HAMMER2_INODE_ONSIDEQ);
1399 		atomic_clear_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
1400 		hammer2_spin_unex(&pmp->list_spin);
1401 		kfree(ipul, pmp->minode);
1402 
1403 		hammer2_inode_lock(ip, 0);
1404 		if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
1405 			/*
1406 			 * The inode was unlinked while open, causing H2
1407 			 * to relink it to a hidden directory to allow
1408 			 * cluster operations to continue until close.
1409 			 *
1410 			 * The inode must be deleted and destroyed.
1411 			 */
1412 			xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1413 			hammer2_xop_start(&xop->head,
1414 					  hammer2_inode_xop_destroy);
1415 			error = hammer2_xop_collect(&xop->head, 0);
1416 			hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1417 
1418 			atomic_clear_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1419 		} else {
1420 			/*
1421 			 * The inode was dirty as-of the reclaim, requiring
1422 			 * synchronization of ip->meta with its underlying
1423 			 * chains.
1424 			 */
1425 			hammer2_inode_chain_sync(ip);
1426 		}
1427 
1428 		hammer2_inode_unlock(ip);
1429 		hammer2_inode_drop(ip);			/* ipul ref */
1430 
1431 		hammer2_spin_ex(&pmp->list_spin);
1432 	}
1433 	hammer2_spin_unex(&pmp->list_spin);
1434 	LOCKSTOP;
1435 }
1436 
1437 /*
1438  * Inode create helper (threaded, backend)
1439  *
1440  * Used by ncreate, nmknod, nsymlink, nmkdir.
1441  * Used by nlink and rename to create HARDLINK pointers.
1442  *
1443  * Frontend holds the parent directory ip locked exclusively.  We
1444  * create the inode and feed the exclusively locked chain to the
1445  * frontend.
1446  */
1447 void
1448 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1449 {
1450 	hammer2_xop_create_t *xop = &arg->xop_create;
1451 	hammer2_chain_t *parent;
1452 	hammer2_chain_t *chain;
1453 	hammer2_key_t key_next;
1454 	int cache_index = -1;
1455 	int error;
1456 
1457 	if (hammer2_debug & 0x0001)
1458 	kprintf("inode_create lhc %016jx clindex %d\n",
1459 		xop->lhc, clindex);
1460 
1461 	chain = NULL;
1462 	parent = hammer2_inode_chain(xop->head.ip1, clindex,
1463 				     HAMMER2_RESOLVE_ALWAYS);
1464 	if (parent == NULL) {
1465 		error = EIO;
1466 		goto fail;
1467 	}
1468 	chain = hammer2_chain_lookup(&parent, &key_next,
1469 				     xop->lhc, xop->lhc,
1470 				     &cache_index, 0);
1471 	if (chain) {
1472 		hammer2_chain_unlock(chain);
1473 		error = EEXIST;
1474 		goto fail;
1475 	}
1476 
1477 	error = hammer2_chain_create(&parent, &chain,
1478 				     xop->head.ip1->pmp,
1479 				     xop->lhc, 0,
1480 				     HAMMER2_BREF_TYPE_INODE,
1481 				     HAMMER2_INODE_BYTES,
1482 				     xop->head.mtid, 0, xop->flags);
1483 	if (error == 0) {
1484 		hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1485 		chain->data->ipdata.meta = xop->meta;
1486 		if (xop->head.name1) {
1487 			bcopy(xop->head.name1,
1488 			      chain->data->ipdata.filename,
1489 			      xop->head.name1_len);
1490 			chain->data->ipdata.meta.name_len = xop->head.name1_len;
1491 		}
1492 		chain->data->ipdata.meta.name_key = xop->lhc;
1493 	}
1494 	hammer2_chain_unlock(chain);
1495 	hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1496 				  HAMMER2_RESOLVE_SHARED);
1497 fail:
1498 	if (parent) {
1499 		hammer2_chain_unlock(parent);
1500 		hammer2_chain_drop(parent);
1501 	}
1502 	hammer2_xop_feed(&xop->head, chain, clindex, error);
1503 	if (chain)
1504 		hammer2_chain_drop(chain);
1505 }
1506 
1507 /*
1508  * Inode delete helper (backend, threaded)
1509  *
1510  * Generally used by hammer2_run_sideq()
1511  */
1512 void
1513 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1514 {
1515 	hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1516 	hammer2_pfs_t *pmp;
1517 	hammer2_chain_t *parent;
1518 	hammer2_chain_t *chain;
1519 	hammer2_inode_t *ip;
1520 	int error;
1521 
1522 	/*
1523 	 * We need the precise parent chain to issue the deletion.
1524 	 */
1525 	ip = xop->head.ip1;
1526 	pmp = ip->pmp;
1527 	chain = NULL;
1528 
1529 	parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1530 	if (parent)
1531 		hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1532 	if (parent == NULL) {
1533 		error = EIO;
1534 		goto done;
1535 	}
1536 	chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1537 	if (chain == NULL) {
1538 		error = EIO;
1539 		goto done;
1540 	}
1541 	hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1542 	error = 0;
1543 done:
1544 	hammer2_xop_feed(&xop->head, NULL, clindex, error);
1545 	if (parent) {
1546 		hammer2_chain_unlock(parent);
1547 		hammer2_chain_drop(parent);
1548 	}
1549 	if (chain) {
1550 		hammer2_chain_unlock(chain);
1551 		hammer2_chain_drop(chain);
1552 	}
1553 }
1554 
1555 void
1556 hammer2_inode_xop_unlinkall(hammer2_xop_t *arg, int clindex)
1557 {
1558 	hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1559 	hammer2_chain_t *parent;
1560 	hammer2_chain_t *chain;
1561 	hammer2_key_t key_next;
1562 	int cache_index = -1;
1563 
1564 	/*
1565 	 * We need the precise parent chain to issue the deletion.
1566 	 */
1567 	parent = hammer2_inode_chain(xop->head.ip1, clindex,
1568 				     HAMMER2_RESOLVE_ALWAYS);
1569 	chain = NULL;
1570 	if (parent == NULL) {
1571 		/* XXX error */
1572 		goto done;
1573 	}
1574 	chain = hammer2_chain_lookup(&parent, &key_next,
1575 				     xop->key_beg, xop->key_end,
1576 				     &cache_index,
1577 				     HAMMER2_LOOKUP_ALWAYS);
1578 	while (chain) {
1579 		hammer2_chain_delete(parent, chain,
1580 				     xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1581 		hammer2_chain_unlock(chain);
1582 		hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1583 					  HAMMER2_RESOLVE_SHARED);
1584 		hammer2_xop_feed(&xop->head, chain, clindex, chain->error);
1585 		chain = hammer2_chain_next(&parent, chain, &key_next,
1586 					   key_next, xop->key_end,
1587 					   &cache_index,
1588 					   HAMMER2_LOOKUP_ALWAYS |
1589 					   HAMMER2_LOOKUP_NOUNLOCK);
1590 	}
1591 done:
1592 	hammer2_xop_feed(&xop->head, NULL, clindex, ENOENT);
1593 	if (parent) {
1594 		hammer2_chain_unlock(parent);
1595 		hammer2_chain_drop(parent);
1596 	}
1597 	if (chain) {
1598 		hammer2_chain_unlock(chain);
1599 		hammer2_chain_drop(chain);
1600 	}
1601 }
1602 
1603 void
1604 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1605 {
1606 	hammer2_xop_connect_t *xop = &arg->xop_connect;
1607 	hammer2_inode_data_t *wipdata;
1608 	hammer2_chain_t *parent;
1609 	hammer2_chain_t *chain;
1610 	hammer2_pfs_t *pmp;
1611 	hammer2_key_t key_dummy;
1612 	int cache_index = -1;
1613 	int error;
1614 
1615 	/*
1616 	 * Get directory, then issue a lookup to prime the parent chain
1617 	 * for the create.  The lookup is expected to fail.
1618 	 */
1619 	pmp = xop->head.ip1->pmp;
1620 	parent = hammer2_inode_chain(xop->head.ip1, clindex,
1621 				     HAMMER2_RESOLVE_ALWAYS);
1622 	if (parent == NULL) {
1623 		chain = NULL;
1624 		error = EIO;
1625 		goto fail;
1626 	}
1627 	chain = hammer2_chain_lookup(&parent, &key_dummy,
1628 				     xop->lhc, xop->lhc,
1629 				     &cache_index, 0);
1630 	if (chain) {
1631 		hammer2_chain_unlock(chain);
1632 		hammer2_chain_drop(chain);
1633 		chain = NULL;
1634 		error = EEXIST;
1635 		goto fail;
1636 	}
1637 
1638 	/*
1639 	 * Adjust the filename in the inode, set the name key.
1640 	 *
1641 	 * NOTE: Frontend must also adjust ip2->meta on success, we can't
1642 	 *	 do it here.
1643 	 */
1644 	chain = hammer2_inode_chain(xop->head.ip2, clindex,
1645 				    HAMMER2_RESOLVE_ALWAYS);
1646 	hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1647 	wipdata = &chain->data->ipdata;
1648 
1649 	hammer2_inode_modify(xop->head.ip2);
1650 	if (xop->head.name1) {
1651 		bzero(wipdata->filename, sizeof(wipdata->filename));
1652 		bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1653 		wipdata->meta.name_len = xop->head.name1_len;
1654 	}
1655 	wipdata->meta.name_key = xop->lhc;
1656 
1657 	/*
1658 	 * Reconnect the chain to the new parent directory
1659 	 */
1660 	error = hammer2_chain_create(&parent, &chain, pmp,
1661 				     xop->lhc, 0,
1662 				     HAMMER2_BREF_TYPE_INODE,
1663 				     HAMMER2_INODE_BYTES,
1664 				     xop->head.mtid, 0, 0);
1665 
1666 	/*
1667 	 * Feed result back.
1668 	 */
1669 fail:
1670 	hammer2_xop_feed(&xop->head, NULL, clindex, error);
1671 	if (parent) {
1672 		hammer2_chain_unlock(parent);
1673 		hammer2_chain_drop(parent);
1674 	}
1675 	if (chain) {
1676 		hammer2_chain_unlock(chain);
1677 		hammer2_chain_drop(chain);
1678 	}
1679 }
1680 
1681 /*
1682  * Synchronize the in-memory inode with the chain.
1683  */
1684 void
1685 hammer2_inode_xop_chain_sync(hammer2_xop_t *arg, int clindex)
1686 {
1687 	hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1688 	hammer2_chain_t	*parent;
1689 	hammer2_chain_t	*chain;
1690 	int error;
1691 
1692 	parent = hammer2_inode_chain(xop->head.ip1, clindex,
1693 				     HAMMER2_RESOLVE_ALWAYS);
1694 	chain = NULL;
1695 	if (parent == NULL) {
1696 		error = EIO;
1697 		goto done;
1698 	}
1699 	if (parent->error) {
1700 		error = parent->error;
1701 		goto done;
1702 	}
1703 
1704 	error = 0;
1705 
1706 	if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1707 		/* osize must be ignored */
1708 	} else if (xop->meta.size < xop->osize) {
1709 		/*
1710 		 * We must delete any chains beyond the EOF.  The chain
1711 		 * straddling the EOF will be pending in the bioq.
1712 		 */
1713 		hammer2_key_t lbase;
1714 		hammer2_key_t key_next;
1715 		int cache_index = -1;
1716 
1717 		lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1718 			~HAMMER2_PBUFMASK64;
1719 		chain = hammer2_chain_lookup(&parent, &key_next,
1720 					     lbase, HAMMER2_KEY_MAX,
1721 					     &cache_index,
1722 					     HAMMER2_LOOKUP_NODATA |
1723 					     HAMMER2_LOOKUP_NODIRECT);
1724 		while (chain) {
1725 			/*
1726 			 * Degenerate embedded case, nothing to loop on
1727 			 */
1728 			switch (chain->bref.type) {
1729 			case HAMMER2_BREF_TYPE_INODE:
1730 				KKASSERT(0);
1731 				break;
1732 			case HAMMER2_BREF_TYPE_DATA:
1733 				hammer2_chain_delete(parent, chain,
1734 						     xop->head.mtid,
1735 						     HAMMER2_DELETE_PERMANENT);
1736 				break;
1737 			}
1738 			chain = hammer2_chain_next(&parent, chain, &key_next,
1739 						   key_next, HAMMER2_KEY_MAX,
1740 						   &cache_index,
1741 						   HAMMER2_LOOKUP_NODATA |
1742 						   HAMMER2_LOOKUP_NODIRECT);
1743 		}
1744 
1745 		/*
1746 		 * Reset to point at inode for following code, if necessary.
1747 		 */
1748 		if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1749 			hammer2_chain_unlock(parent);
1750 			hammer2_chain_drop(parent);
1751 			parent = hammer2_inode_chain(xop->head.ip1, clindex,
1752 						     HAMMER2_RESOLVE_ALWAYS);
1753 			kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1754 				parent->data->ipdata.filename);
1755 		}
1756 	}
1757 
1758 	/*
1759 	 * Sync the inode meta-data, potentially clear the blockset area
1760 	 * of direct data so it can be used for blockrefs.
1761 	 */
1762 	hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1763 	parent->data->ipdata.meta = xop->meta;
1764 	if (xop->clear_directdata) {
1765 		bzero(&parent->data->ipdata.u.blockset,
1766 		      sizeof(parent->data->ipdata.u.blockset));
1767 	}
1768 done:
1769 	if (chain) {
1770 		hammer2_chain_unlock(chain);
1771 		hammer2_chain_drop(chain);
1772 	}
1773 	if (parent) {
1774 		hammer2_chain_unlock(parent);
1775 		hammer2_chain_drop(parent);
1776 	}
1777 	hammer2_xop_feed(&xop->head, NULL, clindex, error);
1778 }
1779 
1780