xref: /dragonfly/sys/vfs/hammer2/hammer2_inode.c (revision 6e4c95df)
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41 
42 #include "hammer2.h"
43 
44 #define INODE_DEBUG	0
45 
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 	     hammer2_tid_t, meta.inum);
48 
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52 	if (ip1->meta.inum < ip2->meta.inum)
53 		return(-1);
54 	if (ip1->meta.inum > ip2->meta.inum)
55 		return(1);
56 	return(0);
57 }
58 
59 static __inline
60 void
61 hammer2_knote(struct vnode *vp, int flags)
62 {
63 	if (flags)
64 		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
65 }
66 
67 static
68 void
69 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
70 {
71 	hammer2_inode_sideq_t *ipul;
72 	hammer2_pfs_t *pmp = ip->pmp;
73 
74 	if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
75 		ipul = kmalloc(sizeof(*ipul), pmp->minode,
76 			       M_WAITOK | M_ZERO);
77 		ipul->ip = ip;
78 		hammer2_spin_ex(&pmp->list_spin);
79 		if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
80 			hammer2_inode_ref(ip);
81 			atomic_set_int(&ip->flags,
82 				       HAMMER2_INODE_ONSIDEQ);
83 			TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
84 			hammer2_spin_unex(&pmp->list_spin);
85 		} else {
86 			hammer2_spin_unex(&pmp->list_spin);
87 			kfree(ipul, pmp->minode);
88 		}
89 	}
90 }
91 
92 /*
93  * HAMMER2 inode locks
94  *
95  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
96  * flags for options:
97  *
98  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
99  *	  inode locking function will automatically set the RDONLY flag.
100  *
101  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
102  *	  Most front-end inode locks do.
103  *
104  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
105  *	  the inode data be resolved.  This is used by the syncthr because
106  *	  it can run on an unresolved/out-of-sync cluster, and also by the
107  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
108  *	  disposing of hundreds of thousands of cached vnodes).
109  *
110  * The inode locking function locks the inode itself, resolves any stale
111  * chains in the inode's cluster, and allocates a fresh copy of the
112  * cluster with 1 ref and all the underlying chains locked.
113  *
114  * ip->cluster will be stable while the inode is locked.
115  *
116  * NOTE: We don't combine the inode/chain lock because putting away an
117  *       inode would otherwise confuse multiple lock holders of the inode.
118  *
119  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
120  *	 and never point to a hardlink pointer.
121  *
122  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
123  *	 will feel free to reduce the chain set in the cluster as an
124  *	 optimization.  It will still be validated against the quorum if
125  *	 appropriate, but the optimization might be able to reduce data
126  *	 accesses to one node.  This flag is automatically set if the inode
127  *	 is locked with HAMMER2_RESOLVE_SHARED.
128  */
129 void
130 hammer2_inode_lock(hammer2_inode_t *ip, int how)
131 {
132 	hammer2_inode_ref(ip);
133 
134 	/*
135 	 * Inode structure mutex
136 	 */
137 	if (how & HAMMER2_RESOLVE_SHARED) {
138 		/*how |= HAMMER2_RESOLVE_RDONLY; not used */
139 		hammer2_mtx_sh(&ip->lock);
140 	} else {
141 		hammer2_mtx_ex(&ip->lock);
142 	}
143 }
144 
145 /*
146  * Select a chain out of an inode's cluster and lock it.
147  *
148  * The inode does not have to be locked.
149  */
150 hammer2_chain_t *
151 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
152 {
153 	hammer2_chain_t *chain;
154 	hammer2_cluster_t *cluster;
155 
156 	hammer2_spin_sh(&ip->cluster_spin);
157 	cluster = &ip->cluster;
158 	if (clindex >= cluster->nchains)
159 		chain = NULL;
160 	else
161 		chain = cluster->array[clindex].chain;
162 	if (chain) {
163 		hammer2_chain_ref(chain);
164 		hammer2_spin_unsh(&ip->cluster_spin);
165 		hammer2_chain_lock(chain, how);
166 	} else {
167 		hammer2_spin_unsh(&ip->cluster_spin);
168 	}
169 	return chain;
170 }
171 
172 hammer2_chain_t *
173 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
174 			       hammer2_chain_t **parentp, int how)
175 {
176 	hammer2_chain_t *chain;
177 	hammer2_chain_t *parent;
178 
179 	for (;;) {
180 		hammer2_spin_sh(&ip->cluster_spin);
181 		if (clindex >= ip->cluster.nchains)
182 			chain = NULL;
183 		else
184 			chain = ip->cluster.array[clindex].chain;
185 		if (chain) {
186 			hammer2_chain_ref(chain);
187 			hammer2_spin_unsh(&ip->cluster_spin);
188 			hammer2_chain_lock(chain, how);
189 		} else {
190 			hammer2_spin_unsh(&ip->cluster_spin);
191 		}
192 
193 		/*
194 		 * Get parent, lock order must be (parent, chain).
195 		 */
196 		parent = chain->parent;
197 		if (parent) {
198 			hammer2_chain_ref(parent);
199 			hammer2_chain_unlock(chain);
200 			hammer2_chain_lock(parent, how);
201 			hammer2_chain_lock(chain, how);
202 		}
203 		if (ip->cluster.array[clindex].chain == chain &&
204 		    chain->parent == parent) {
205 			break;
206 		}
207 
208 		/*
209 		 * Retry
210 		 */
211 		hammer2_chain_unlock(chain);
212 		hammer2_chain_drop(chain);
213 		if (parent) {
214 			hammer2_chain_unlock(parent);
215 			hammer2_chain_drop(parent);
216 		}
217 	}
218 	*parentp = parent;
219 
220 	return chain;
221 }
222 
223 void
224 hammer2_inode_unlock(hammer2_inode_t *ip)
225 {
226 	hammer2_mtx_unlock(&ip->lock);
227 	hammer2_inode_drop(ip);
228 }
229 
230 /*
231  * Temporarily release a lock held shared or exclusive.  Caller must
232  * hold the lock shared or exclusive on call and lock will be released
233  * on return.
234  *
235  * Restore a lock that was temporarily released.
236  */
237 hammer2_mtx_state_t
238 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
239 {
240 	return hammer2_mtx_temp_release(&ip->lock);
241 }
242 
243 void
244 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
245 {
246 	hammer2_mtx_temp_restore(&ip->lock, ostate);
247 }
248 
249 /*
250  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
251  * is already held exclusively this is a NOP.
252  *
253  * The caller MUST hold the inode lock either shared or exclusive on call
254  * and will own the lock exclusively on return.
255  *
256  * Returns non-zero if the lock was already exclusive prior to the upgrade.
257  */
258 int
259 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
260 {
261 	int wasexclusive;
262 
263 	if (mtx_islocked_ex(&ip->lock)) {
264 		wasexclusive = 1;
265 	} else {
266 		hammer2_mtx_unlock(&ip->lock);
267 		hammer2_mtx_ex(&ip->lock);
268 		wasexclusive = 0;
269 	}
270 	return wasexclusive;
271 }
272 
273 /*
274  * Downgrade an inode lock from exclusive to shared only if the inode
275  * lock was previously shared.  If the inode lock was previously exclusive,
276  * this is a NOP.
277  */
278 void
279 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
280 {
281 	if (wasexclusive == 0)
282 		mtx_downgrade(&ip->lock);
283 }
284 
285 /*
286  * Lookup an inode by inode number
287  */
288 hammer2_inode_t *
289 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
290 {
291 	hammer2_inode_t *ip;
292 
293 	KKASSERT(pmp);
294 	if (pmp->spmp_hmp) {
295 		ip = NULL;
296 	} else {
297 		hammer2_spin_ex(&pmp->inum_spin);
298 		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
299 		if (ip)
300 			hammer2_inode_ref(ip);
301 		hammer2_spin_unex(&pmp->inum_spin);
302 	}
303 	return(ip);
304 }
305 
306 /*
307  * Adding a ref to an inode is only legal if the inode already has at least
308  * one ref.
309  *
310  * (can be called with spinlock held)
311  */
312 void
313 hammer2_inode_ref(hammer2_inode_t *ip)
314 {
315 	atomic_add_int(&ip->refs, 1);
316 	if (hammer2_debug & 0x80000) {
317 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
318 		print_backtrace(8);
319 	}
320 }
321 
322 /*
323  * Drop an inode reference, freeing the inode when the last reference goes
324  * away.
325  */
326 void
327 hammer2_inode_drop(hammer2_inode_t *ip)
328 {
329 	hammer2_pfs_t *pmp;
330 	u_int refs;
331 
332 	while (ip) {
333 		if (hammer2_debug & 0x80000) {
334 			kprintf("INODE-1 %p (%d->%d)\n",
335 				ip, ip->refs, ip->refs - 1);
336 			print_backtrace(8);
337 		}
338 		refs = ip->refs;
339 		cpu_ccfence();
340 		if (refs == 1) {
341 			/*
342 			 * Transition to zero, must interlock with
343 			 * the inode inumber lookup tree (if applicable).
344 			 * It should not be possible for anyone to race
345 			 * the transition to 0.
346 			 */
347 			pmp = ip->pmp;
348 			KKASSERT(pmp);
349 			hammer2_spin_ex(&pmp->inum_spin);
350 
351 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
352 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
353 				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
354 					atomic_clear_int(&ip->flags,
355 						     HAMMER2_INODE_ONRBTREE);
356 					RB_REMOVE(hammer2_inode_tree,
357 						  &pmp->inum_tree, ip);
358 				}
359 				hammer2_spin_unex(&pmp->inum_spin);
360 
361 				ip->pmp = NULL;
362 
363 				/*
364 				 * Cleaning out ip->cluster isn't entirely
365 				 * trivial.
366 				 */
367 				hammer2_inode_repoint(ip, NULL, NULL);
368 
369 				kfree(ip, pmp->minode);
370 				atomic_add_long(&pmp->inmem_inodes, -1);
371 				ip = NULL;	/* will terminate loop */
372 			} else {
373 				hammer2_spin_unex(&ip->pmp->inum_spin);
374 			}
375 		} else {
376 			/*
377 			 * Non zero transition
378 			 */
379 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
380 				break;
381 		}
382 	}
383 }
384 
385 /*
386  * Get the vnode associated with the given inode, allocating the vnode if
387  * necessary.  The vnode will be returned exclusively locked.
388  *
389  * *errorp is set to a UNIX error, not a HAMMER2 error.
390  *
391  * The caller must lock the inode (shared or exclusive).
392  *
393  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
394  * races.
395  */
396 struct vnode *
397 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
398 {
399 	hammer2_pfs_t *pmp;
400 	struct vnode *vp;
401 
402 	pmp = ip->pmp;
403 	KKASSERT(pmp != NULL);
404 	*errorp = 0;
405 
406 	for (;;) {
407 		/*
408 		 * Attempt to reuse an existing vnode assignment.  It is
409 		 * possible to race a reclaim so the vget() may fail.  The
410 		 * inode must be unlocked during the vget() to avoid a
411 		 * deadlock against a reclaim.
412 		 */
413 		int wasexclusive;
414 
415 		vp = ip->vp;
416 		if (vp) {
417 			/*
418 			 * Inode must be unlocked during the vget() to avoid
419 			 * possible deadlocks, but leave the ip ref intact.
420 			 *
421 			 * vnode is held to prevent destruction during the
422 			 * vget().  The vget() can still fail if we lost
423 			 * a reclaim race on the vnode.
424 			 */
425 			hammer2_mtx_state_t ostate;
426 
427 			vhold(vp);
428 			ostate = hammer2_inode_lock_temp_release(ip);
429 			if (vget(vp, LK_EXCLUSIVE)) {
430 				vdrop(vp);
431 				hammer2_inode_lock_temp_restore(ip, ostate);
432 				continue;
433 			}
434 			hammer2_inode_lock_temp_restore(ip, ostate);
435 			vdrop(vp);
436 			/* vp still locked and ref from vget */
437 			if (ip->vp != vp) {
438 				kprintf("hammer2: igetv race %p/%p\n",
439 					ip->vp, vp);
440 				vput(vp);
441 				continue;
442 			}
443 			*errorp = 0;
444 			break;
445 		}
446 
447 		/*
448 		 * No vnode exists, allocate a new vnode.  Beware of
449 		 * allocation races.  This function will return an
450 		 * exclusively locked and referenced vnode.
451 		 */
452 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
453 		if (*errorp) {
454 			kprintf("hammer2: igetv getnewvnode failed %d\n",
455 				*errorp);
456 			vp = NULL;
457 			break;
458 		}
459 
460 		/*
461 		 * Lock the inode and check for an allocation race.
462 		 */
463 		wasexclusive = hammer2_inode_lock_upgrade(ip);
464 		if (ip->vp != NULL) {
465 			vp->v_type = VBAD;
466 			vx_put(vp);
467 			hammer2_inode_lock_downgrade(ip, wasexclusive);
468 			continue;
469 		}
470 
471 		switch (ip->meta.type) {
472 		case HAMMER2_OBJTYPE_DIRECTORY:
473 			vp->v_type = VDIR;
474 			break;
475 		case HAMMER2_OBJTYPE_REGFILE:
476 			vp->v_type = VREG;
477 			vinitvmio(vp, ip->meta.size,
478 				  HAMMER2_LBUFSIZE,
479 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
480 			break;
481 		case HAMMER2_OBJTYPE_SOFTLINK:
482 			/*
483 			 * XXX for now we are using the generic file_read
484 			 * and file_write code so we need a buffer cache
485 			 * association.
486 			 */
487 			vp->v_type = VLNK;
488 			vinitvmio(vp, ip->meta.size,
489 				  HAMMER2_LBUFSIZE,
490 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
491 			break;
492 		case HAMMER2_OBJTYPE_CDEV:
493 			vp->v_type = VCHR;
494 			/* fall through */
495 		case HAMMER2_OBJTYPE_BDEV:
496 			vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
497 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
498 				vp->v_type = VBLK;
499 			addaliasu(vp,
500 				  ip->meta.rmajor,
501 				  ip->meta.rminor);
502 			break;
503 		case HAMMER2_OBJTYPE_FIFO:
504 			vp->v_type = VFIFO;
505 			vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
506 			break;
507 		case HAMMER2_OBJTYPE_SOCKET:
508 			vp->v_type = VSOCK;
509 			break;
510 		default:
511 			panic("hammer2: unhandled objtype %d",
512 			      ip->meta.type);
513 			break;
514 		}
515 
516 		if (ip == pmp->iroot)
517 			vsetflags(vp, VROOT);
518 
519 		vp->v_data = ip;
520 		ip->vp = vp;
521 		hammer2_inode_ref(ip);		/* vp association */
522 		hammer2_inode_lock_downgrade(ip, wasexclusive);
523 		break;
524 	}
525 
526 	/*
527 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
528 	 */
529 	if (hammer2_debug & 0x0002) {
530 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
531 			vp, vp->v_refcnt, vp->v_auxrefs);
532 	}
533 	return (vp);
534 }
535 
536 /*
537  * Returns the inode associated with the passed-in cluster, creating the
538  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
539  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
540  * Otherwise the whole cluster is synchronized.
541  *
542  * The passed-in cluster must be locked and will remain locked on return.
543  * The returned inode will be locked and the caller may dispose of both
544  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
545  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
546  *
547  * The hammer2_inode structure regulates the interface between the high level
548  * kernel VNOPS API and the filesystem backend (the chains).
549  *
550  * On return the inode is locked with the supplied cluster.
551  */
552 hammer2_inode_t *
553 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
554 		  hammer2_cluster_t *cluster, int idx)
555 {
556 	hammer2_inode_t *nip;
557 	const hammer2_inode_data_t *iptmp;
558 	const hammer2_inode_data_t *nipdata;
559 
560 	KKASSERT(cluster == NULL ||
561 		 hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
562 	KKASSERT(pmp);
563 
564 	/*
565 	 * Interlocked lookup/ref of the inode.  This code is only needed
566 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
567 	 * otherwise and test for duplicates).
568 	 *
569 	 * Cluster can be NULL during the initial pfs allocation.
570 	 */
571 again:
572 	while (cluster) {
573 		iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
574 		nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
575 		if (nip == NULL)
576 			break;
577 
578 		hammer2_mtx_ex(&nip->lock);
579 
580 		/*
581 		 * Handle SMP race (not applicable to the super-root spmp
582 		 * which can't index inodes due to duplicative inode numbers).
583 		 */
584 		if (pmp->spmp_hmp == NULL &&
585 		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
586 			hammer2_mtx_unlock(&nip->lock);
587 			hammer2_inode_drop(nip);
588 			continue;
589 		}
590 		if (idx >= 0)
591 			hammer2_inode_repoint_one(nip, cluster, idx);
592 		else
593 			hammer2_inode_repoint(nip, NULL, cluster);
594 
595 		return nip;
596 	}
597 
598 	/*
599 	 * We couldn't find the inode number, create a new inode.
600 	 */
601 	nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
602 	spin_init(&nip->cluster_spin, "h2clspin");
603 	atomic_add_long(&pmp->inmem_inodes, 1);
604 	hammer2_pfs_memory_inc(pmp);
605 	hammer2_pfs_memory_wakeup(pmp);
606 	if (pmp->spmp_hmp)
607 		nip->flags = HAMMER2_INODE_SROOT;
608 
609 	/*
610 	 * Initialize nip's cluster.  A cluster is provided for normal
611 	 * inodes but typically not for the super-root or PFS inodes.
612 	 */
613 	nip->cluster.refs = 1;
614 	nip->cluster.pmp = pmp;
615 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
616 	if (cluster) {
617 		nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
618 		nip->meta = nipdata->meta;
619 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
620 		hammer2_inode_repoint(nip, NULL, cluster);
621 	} else {
622 		nip->meta.inum = 1;		/* PFS inum is always 1 XXX */
623 		/* mtime will be updated when a cluster is available */
624 		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
625 	}
626 
627 	nip->pmp = pmp;
628 
629 	/*
630 	 * ref and lock on nip gives it state compatible to after a
631 	 * hammer2_inode_lock() call.
632 	 */
633 	nip->refs = 1;
634 	hammer2_mtx_init(&nip->lock, "h2inode");
635 	hammer2_mtx_ex(&nip->lock);
636 	/* combination of thread lock and chain lock == inode lock */
637 
638 	/*
639 	 * Attempt to add the inode.  If it fails we raced another inode
640 	 * get.  Undo all the work and try again.
641 	 */
642 	if (pmp->spmp_hmp == NULL) {
643 		hammer2_spin_ex(&pmp->inum_spin);
644 		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
645 			hammer2_spin_unex(&pmp->inum_spin);
646 			hammer2_mtx_unlock(&nip->lock);
647 			hammer2_inode_drop(nip);
648 			goto again;
649 		}
650 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
651 		hammer2_spin_unex(&pmp->inum_spin);
652 	}
653 
654 	return (nip);
655 }
656 
657 /*
658  * MESSY! CLEANUP!
659  *
660  * Create a new inode using the vattr to figure out the type.  A non-zero
661  * type field overrides vattr.  We need the directory to set iparent or to
662  * use when the inode is directly embedded in a directory (typically super-root
663  * entries), but note that this really only applies OBJTYPE_DIRECTORY as
664  * non-directory inodes can be hardlinked.
665  *
666  * If no error occurs the new inode is returned, otherwise NULL is returned.
667  * It is possible for an error to create a junk inode and then fail later.
668  * It will attempt to delete the junk inode and return NULL in this situation.
669  *
670  * If vap and/or cred are NULL the related fields are not set and the
671  * inode type defaults to a directory.  This is used when creating PFSs
672  * under the super-root, so the inode number is set to 1 in this case.
673  *
674  * dip is not locked on entry.
675  *
676  * NOTE: This function is used to create all manners of inodes, including
677  *	 super-root entries for snapshots and PFSs.  When used to create a
678  *	 snapshot the inode will be temporarily associated with the spmp.
679  *
680  * NOTE: When creating a normal file or directory the name/name_len/lhc
681  *	 is optional, but is typically specified to make debugging and
682  *	 recovery easeier.
683  */
684 hammer2_inode_t *
685 hammer2_inode_create(hammer2_inode_t *dip, hammer2_inode_t *pip,
686 		     struct vattr *vap, struct ucred *cred,
687 		     const uint8_t *name, size_t name_len, hammer2_key_t lhc,
688 		     hammer2_key_t inum,
689 		     uint8_t type, uint8_t target_type,
690 		     int flags, int *errorp)
691 {
692 	hammer2_xop_create_t *xop;
693 	hammer2_inode_t *nip;
694 	int error;
695 	uid_t xuid;
696 	uuid_t pip_uid;
697 	uuid_t pip_gid;
698 	uint32_t pip_mode;
699 	uint8_t pip_comp_algo;
700 	uint8_t pip_check_algo;
701 	hammer2_tid_t pip_inum;
702 
703 	if (name)
704 		lhc = hammer2_dirhash(name, name_len);
705 	*errorp = 0;
706 	nip = NULL;
707 
708 	/*
709 	 * Locate the inode or indirect block to create the new
710 	 * entry in.  At the same time check for key collisions
711 	 * and iterate until we don't get one.
712 	 *
713 	 * Lock the directory exclusively for now to guarantee that
714 	 * we can find an unused lhc for the name.  Due to collisions,
715 	 * two different creates can end up with the same lhc so we
716 	 * cannot depend on the OS to prevent the collision.
717 	 */
718 	hammer2_inode_lock(dip, 0);
719 
720 	pip_uid = pip->meta.uid;
721 	pip_gid = pip->meta.gid;
722 	pip_mode = pip->meta.mode;
723 	pip_comp_algo = pip->meta.comp_algo;
724 	pip_check_algo = pip->meta.check_algo;
725 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
726 
727 	/*
728 	 * If name specified, locate an unused key in the collision space.
729 	 * Otherwise use the passed-in lhc directly.
730 	 */
731 	if (name) {
732 		hammer2_xop_scanlhc_t *sxop;
733 		hammer2_key_t lhcbase;
734 
735 		lhcbase = lhc;
736 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
737 		sxop->lhc = lhc;
738 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
739 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
740 			if (lhc != sxop->head.cluster.focus->bref.key)
741 				break;
742 			++lhc;
743 		}
744 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
745 
746 		if (error) {
747 			if (error != HAMMER2_ERROR_ENOENT)
748 				goto done2;
749 			++lhc;
750 			error = 0;
751 		}
752 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
753 			error = HAMMER2_ERROR_ENOSPC;
754 			goto done2;
755 		}
756 	}
757 
758 	/*
759 	 * Create the inode with the lhc as the key.
760 	 */
761 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
762 	xop->lhc = lhc;
763 	xop->flags = flags;
764 	bzero(&xop->meta, sizeof(xop->meta));
765 
766 	if (vap) {
767 		xop->meta.type = hammer2_get_obj_type(vap->va_type);
768 
769 		switch (xop->meta.type) {
770 		case HAMMER2_OBJTYPE_CDEV:
771 		case HAMMER2_OBJTYPE_BDEV:
772 			xop->meta.rmajor = vap->va_rmajor;
773 			xop->meta.rminor = vap->va_rminor;
774 			break;
775 		default:
776 			break;
777 		}
778 		type = xop->meta.type;
779 	} else {
780 		xop->meta.type = type;
781 		xop->meta.target_type = target_type;
782 	}
783 	xop->meta.inum = inum;
784 	xop->meta.iparent = pip_inum;
785 
786 	/* Inherit parent's inode compression mode. */
787 	xop->meta.comp_algo = pip_comp_algo;
788 	xop->meta.check_algo = pip_check_algo;
789 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
790 	hammer2_update_time(&xop->meta.ctime);
791 	xop->meta.mtime = xop->meta.ctime;
792 	if (vap)
793 		xop->meta.mode = vap->va_mode;
794 	xop->meta.nlinks = 1;
795 	if (vap) {
796 		if (dip->pmp) {
797 			xuid = hammer2_to_unix_xid(&pip_uid);
798 			xuid = vop_helper_create_uid(dip->pmp->mp,
799 						     pip_mode,
800 						     xuid,
801 						     cred,
802 						     &vap->va_mode);
803 		} else {
804 			/* super-root has no dip and/or pmp */
805 			xuid = 0;
806 		}
807 		if (vap->va_vaflags & VA_UID_UUID_VALID)
808 			xop->meta.uid = vap->va_uid_uuid;
809 		else if (vap->va_uid != (uid_t)VNOVAL)
810 			hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
811 		else
812 			hammer2_guid_to_uuid(&xop->meta.uid, xuid);
813 
814 		if (vap->va_vaflags & VA_GID_UUID_VALID)
815 			xop->meta.gid = vap->va_gid_uuid;
816 		else if (vap->va_gid != (gid_t)VNOVAL)
817 			hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
818 		else
819 			xop->meta.gid = pip_gid;
820 	}
821 
822 	/*
823 	 * Regular files and softlinks allow a small amount of data to be
824 	 * directly embedded in the inode.  This flag will be cleared if
825 	 * the size is extended past the embedded limit.
826 	 */
827 	if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
828 	    xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
829 		xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
830 	}
831 	if (name) {
832 		hammer2_xop_setname(&xop->head, name, name_len);
833 	} else {
834 		name_len = hammer2_xop_setname_inum(&xop->head, inum);
835 		KKASSERT(lhc == inum);
836 	}
837 	xop->meta.name_len = name_len;
838 	xop->meta.name_key = lhc;
839 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
840 
841 	hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
842 
843 	error = hammer2_xop_collect(&xop->head, 0);
844 #if INODE_DEBUG
845 	kprintf("CREATE INODE %*.*s\n",
846 		(int)name_len, (int)name_len, name);
847 #endif
848 
849 	if (error) {
850 		*errorp = error;
851 		goto done;
852 	}
853 
854 	/*
855 	 * Set up the new inode if not a hardlink pointer.
856 	 *
857 	 * NOTE: *_get() integrates chain's lock into the inode lock.
858 	 *
859 	 * NOTE: Only one new inode can currently be created per
860 	 *	 transaction.  If the need arises we can adjust
861 	 *	 hammer2_trans_init() to allow more.
862 	 *
863 	 * NOTE: nipdata will have chain's blockset data.
864 	 */
865 	nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
866 	nip->comp_heuristic = 0;
867 done:
868 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
869 done2:
870 	hammer2_inode_unlock(dip);
871 
872 	return (nip);
873 }
874 
875 /*
876  * Create a directory entry under dip with the specified name, inode number,
877  * and OBJTYPE (type).
878  *
879  * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
880  */
881 int
882 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
883 		      hammer2_key_t inum, uint8_t type)
884 {
885 	hammer2_xop_mkdirent_t *xop;
886 	hammer2_key_t lhc;
887 	int error;
888 
889 	lhc = 0;
890 	error = 0;
891 
892 	KKASSERT(name != NULL);
893 	lhc = hammer2_dirhash(name, name_len);
894 
895 	/*
896 	 * Locate the inode or indirect block to create the new
897 	 * entry in.  At the same time check for key collisions
898 	 * and iterate until we don't get one.
899 	 *
900 	 * Lock the directory exclusively for now to guarantee that
901 	 * we can find an unused lhc for the name.  Due to collisions,
902 	 * two different creates can end up with the same lhc so we
903 	 * cannot depend on the OS to prevent the collision.
904 	 */
905 	hammer2_inode_lock(dip, 0);
906 
907 	/*
908 	 * If name specified, locate an unused key in the collision space.
909 	 * Otherwise use the passed-in lhc directly.
910 	 */
911 	{
912 		hammer2_xop_scanlhc_t *sxop;
913 		hammer2_key_t lhcbase;
914 
915 		lhcbase = lhc;
916 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
917 		sxop->lhc = lhc;
918 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
919 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
920 			if (lhc != sxop->head.cluster.focus->bref.key)
921 				break;
922 			++lhc;
923 		}
924 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
925 
926 		if (error) {
927 			if (error != HAMMER2_ERROR_ENOENT)
928 				goto done2;
929 			++lhc;
930 			error = 0;
931 		}
932 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
933 			error = HAMMER2_ERROR_ENOSPC;
934 			goto done2;
935 		}
936 	}
937 
938 	/*
939 	 * Create the directory entry with the lhc as the key.
940 	 */
941 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
942 	xop->lhc = lhc;
943 	bzero(&xop->dirent, sizeof(xop->dirent));
944 	xop->dirent.inum = inum;
945 	xop->dirent.type = type;
946 	xop->dirent.namlen = name_len;
947 
948 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
949 	hammer2_xop_setname(&xop->head, name, name_len);
950 
951 	hammer2_xop_start(&xop->head, hammer2_inode_xop_mkdirent);
952 
953 	error = hammer2_xop_collect(&xop->head, 0);
954 
955 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
956 done2:
957 	error = hammer2_error_to_errno(error);
958 	hammer2_inode_unlock(dip);
959 
960 	return error;
961 }
962 
963 /*
964  * Repoint ip->cluster's chains to cluster's chains and fixup the default
965  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
966  * filters out invalid or non-matching elements.
967  *
968  * Caller must hold the inode and cluster exclusive locked, if not NULL,
969  * must also be locked.
970  *
971  * Cluster may be NULL to clean out any chains in ip->cluster.
972  */
973 void
974 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
975 		      hammer2_cluster_t *cluster)
976 {
977 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
978 	hammer2_chain_t *ochain;
979 	hammer2_chain_t *nchain;
980 	int i;
981 
982 	bzero(dropch, sizeof(dropch));
983 
984 	/*
985 	 * Replace chains in ip->cluster with chains from cluster and
986 	 * adjust the focus if necessary.
987 	 *
988 	 * NOTE: nchain and/or ochain can be NULL due to gaps
989 	 *	 in the cluster arrays.
990 	 */
991 	hammer2_spin_ex(&ip->cluster_spin);
992 	for (i = 0; cluster && i < cluster->nchains; ++i) {
993 		/*
994 		 * Do not replace elements which are the same.  Also handle
995 		 * element count discrepancies.
996 		 */
997 		nchain = cluster->array[i].chain;
998 		if (i < ip->cluster.nchains) {
999 			ochain = ip->cluster.array[i].chain;
1000 			if (ochain == nchain)
1001 				continue;
1002 		} else {
1003 			ochain = NULL;
1004 		}
1005 
1006 		/*
1007 		 * Make adjustments
1008 		 */
1009 		ip->cluster.array[i].chain = nchain;
1010 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1011 		ip->cluster.array[i].flags |= cluster->array[i].flags &
1012 					      HAMMER2_CITEM_INVALID;
1013 		if (nchain)
1014 			hammer2_chain_ref(nchain);
1015 		dropch[i] = ochain;
1016 	}
1017 
1018 	/*
1019 	 * Release any left-over chains in ip->cluster.
1020 	 */
1021 	while (i < ip->cluster.nchains) {
1022 		nchain = ip->cluster.array[i].chain;
1023 		if (nchain) {
1024 			ip->cluster.array[i].chain = NULL;
1025 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1026 		}
1027 		dropch[i] = nchain;
1028 		++i;
1029 	}
1030 
1031 	/*
1032 	 * Fixup fields.  Note that the inode-embedded cluster is never
1033 	 * directly locked.
1034 	 */
1035 	if (cluster) {
1036 		ip->cluster.nchains = cluster->nchains;
1037 		ip->cluster.focus = cluster->focus;
1038 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1039 	} else {
1040 		ip->cluster.nchains = 0;
1041 		ip->cluster.focus = NULL;
1042 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1043 	}
1044 
1045 	hammer2_spin_unex(&ip->cluster_spin);
1046 
1047 	/*
1048 	 * Cleanup outside of spinlock
1049 	 */
1050 	while (--i >= 0) {
1051 		if (dropch[i])
1052 			hammer2_chain_drop(dropch[i]);
1053 	}
1054 }
1055 
1056 /*
1057  * Repoint a single element from the cluster to the ip.  Used by the
1058  * synchronization threads to piecemeal update inodes.  Does not change
1059  * focus and requires inode to be re-locked to clean-up flags (XXX).
1060  */
1061 void
1062 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1063 			  int idx)
1064 {
1065 	hammer2_chain_t *ochain;
1066 	hammer2_chain_t *nchain;
1067 	int i;
1068 
1069 	hammer2_spin_ex(&ip->cluster_spin);
1070 	KKASSERT(idx < cluster->nchains);
1071 	if (idx < ip->cluster.nchains) {
1072 		ochain = ip->cluster.array[idx].chain;
1073 		nchain = cluster->array[idx].chain;
1074 	} else {
1075 		ochain = NULL;
1076 		nchain = cluster->array[idx].chain;
1077 		ip->cluster.nchains = idx + 1;
1078 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1079 			bzero(&ip->cluster.array[i],
1080 			      sizeof(ip->cluster.array[i]));
1081 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1082 		}
1083 	}
1084 	if (ochain != nchain) {
1085 		/*
1086 		 * Make adjustments.
1087 		 */
1088 		ip->cluster.array[idx].chain = nchain;
1089 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1090 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1091 						HAMMER2_CITEM_INVALID;
1092 	}
1093 	hammer2_spin_unex(&ip->cluster_spin);
1094 	if (ochain != nchain) {
1095 		if (nchain)
1096 			hammer2_chain_ref(nchain);
1097 		if (ochain)
1098 			hammer2_chain_drop(ochain);
1099 	}
1100 }
1101 
1102 /*
1103  * Called with a locked inode to finish unlinking an inode after xop_unlink
1104  * had been run.  This function is responsible for decrementing nlinks.
1105  *
1106  * We don't bother decrementing nlinks if the file is not open and this was
1107  * the last link.
1108  *
1109  * If the inode is a hardlink target it's chain has not yet been deleted,
1110  * otherwise it's chain has been deleted.
1111  *
1112  * If isopen then any prior deletion was not permanent and the inode is
1113  * left intact with nlinks == 0;
1114  */
1115 int
1116 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1117 {
1118 	hammer2_pfs_t *pmp;
1119 	int error;
1120 
1121 	pmp = ip->pmp;
1122 
1123 	/*
1124 	 * Decrement nlinks.  If this is the last link and the file is
1125 	 * not open we can just delete the inode and not bother dropping
1126 	 * nlinks to 0 (avoiding unnecessary block updates).
1127 	 */
1128 	if (ip->meta.nlinks == 1) {
1129 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1130 		if (isopen == 0)
1131 			goto killit;
1132 	}
1133 
1134 	hammer2_inode_modify(ip);
1135 	--ip->meta.nlinks;
1136 	if ((int64_t)ip->meta.nlinks < 0)
1137 		ip->meta.nlinks = 0;	/* safety */
1138 
1139 	/*
1140 	 * If nlinks is not zero we are done.  However, this should only be
1141 	 * possible with a hardlink target.  If the inode is an embedded
1142 	 * hardlink nlinks should have dropped to zero, warn and proceed
1143 	 * with the next step.
1144 	 */
1145 	if (ip->meta.nlinks) {
1146 		if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1147 			return 0;
1148 		kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1149 			(intmax_t)ip->meta.nlinks);
1150 		return 0;
1151 	}
1152 
1153 	if (ip->vp)
1154 		hammer2_knote(ip->vp, NOTE_DELETE);
1155 
1156 	/*
1157 	 * nlinks is now zero, delete the inode if not open.
1158 	 */
1159 	if (isopen == 0) {
1160 		hammer2_xop_destroy_t *xop;
1161 
1162 killit:
1163 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1164 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1165 		hammer2_xop_start(&xop->head, hammer2_inode_xop_destroy);
1166 		error = hammer2_xop_collect(&xop->head, 0);
1167 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1168 	}
1169 	error = 0;	/* XXX */
1170 
1171 	return error;
1172 }
1173 
1174 /*
1175  * Mark an inode as being modified, meaning that the caller will modify
1176  * ip->meta.
1177  *
1178  * If a vnode is present we set the vnode dirty and the nominal filesystem
1179  * sync will also handle synchronizing the inode meta-data.  If no vnode
1180  * is present we must ensure that the inode is on pmp->sideq.
1181  *
1182  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1183  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1184  *	 later when the inode gets flushed.
1185  */
1186 void
1187 hammer2_inode_modify(hammer2_inode_t *ip)
1188 {
1189 	hammer2_pfs_t *pmp;
1190 
1191 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1192 	if (ip->vp) {
1193 		vsetisdirty(ip->vp);
1194 	} else if ((pmp = ip->pmp) != NULL) {
1195 		hammer2_inode_delayed_sideq(ip);
1196 	}
1197 }
1198 
1199 /*
1200  * Synchronize the inode's frontend state with the chain state prior
1201  * to any explicit flush of the inode or any strategy write call.
1202  *
1203  * Called with a locked inode inside a transaction.
1204  */
1205 void
1206 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1207 {
1208 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1209 		hammer2_xop_fsync_t *xop;
1210 		int error;
1211 
1212 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1213 		xop->clear_directdata = 0;
1214 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1215 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1216 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1217 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1218 				xop->clear_directdata = 1;
1219 			}
1220 			xop->osize = ip->osize;
1221 		} else {
1222 			xop->osize = ip->meta.size;	/* safety */
1223 		}
1224 		xop->ipflags = ip->flags;
1225 		xop->meta = ip->meta;
1226 
1227 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1228 					     HAMMER2_INODE_MODIFIED);
1229 		hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1230 		error = hammer2_xop_collect(&xop->head, 0);
1231 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1232 		if (error == HAMMER2_ERROR_ENOENT)
1233 			error = 0;
1234 		if (error) {
1235 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1236 			/*
1237 			atomic_set_int(&ip->flags,
1238 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1239 						       HAMMER2_INODE_MODIFIED));
1240 			*/
1241 			/* XXX return error somehow? */
1242 		}
1243 	}
1244 }
1245 
1246 /*
1247  * The normal filesystem sync no longer has visibility to an inode structure
1248  * after its vnode has been reclaimed.  In this situation an unlinked-but-open
1249  * inode or a dirty inode may require additional processing to synchronize
1250  * ip->meta to its underlying cluster nodes.
1251  *
1252  * In particular, reclaims can occur in almost any state (for example, when
1253  * doing operations on unrelated vnodes) and flushing the reclaimed inode
1254  * in the reclaim path itself is a non-starter.
1255  *
1256  * Caller must be in a transaction.
1257  */
1258 void
1259 hammer2_inode_run_sideq(hammer2_pfs_t *pmp)
1260 {
1261 	hammer2_xop_destroy_t *xop;
1262 	hammer2_inode_sideq_t *ipul;
1263 	hammer2_inode_t *ip;
1264 	int error;
1265 
1266 	if (TAILQ_EMPTY(&pmp->sideq))
1267 		return;
1268 
1269 	hammer2_spin_ex(&pmp->list_spin);
1270 	while ((ipul = TAILQ_FIRST(&pmp->sideq)) != NULL) {
1271 		TAILQ_REMOVE(&pmp->sideq, ipul, entry);
1272 		ip = ipul->ip;
1273 		KKASSERT(ip->flags & HAMMER2_INODE_ONSIDEQ);
1274 		atomic_clear_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
1275 		hammer2_spin_unex(&pmp->list_spin);
1276 		kfree(ipul, pmp->minode);
1277 
1278 		hammer2_inode_lock(ip, 0);
1279 		if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
1280 			/*
1281 			 * The inode was unlinked while open.  The inode must
1282 			 * be deleted and destroyed.
1283 			 */
1284 			xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1285 			hammer2_xop_start(&xop->head,
1286 					  hammer2_inode_xop_destroy);
1287 			error = hammer2_xop_collect(&xop->head, 0);
1288 			/* XXX error handling */
1289 			hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1290 		} else {
1291 			/*
1292 			 * The inode was dirty as-of the reclaim, requiring
1293 			 * synchronization of ip->meta with its underlying
1294 			 * chains.
1295 			 */
1296 			hammer2_inode_chain_sync(ip);
1297 		}
1298 
1299 		hammer2_inode_unlock(ip);
1300 		hammer2_inode_drop(ip);			/* ipul ref */
1301 
1302 		hammer2_spin_ex(&pmp->list_spin);
1303 	}
1304 	hammer2_spin_unex(&pmp->list_spin);
1305 }
1306 
1307 /*
1308  * Helper to create a directory entry.
1309  */
1310 void
1311 hammer2_inode_xop_mkdirent(hammer2_thread_t *thr, hammer2_xop_t *arg)
1312 {
1313 	hammer2_xop_mkdirent_t *xop = &arg->xop_mkdirent;
1314 	hammer2_chain_t *parent;
1315 	hammer2_chain_t *chain;
1316 	hammer2_key_t key_next;
1317 	size_t data_len;
1318 	int error;
1319 
1320 	if (hammer2_debug & 0x0001)
1321 		kprintf("dirent_create lhc %016jx clindex %d\n",
1322 			xop->lhc, thr->clindex);
1323 
1324 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1325 				     HAMMER2_RESOLVE_ALWAYS);
1326 	if (parent == NULL) {
1327 		error = HAMMER2_ERROR_EIO;
1328 		chain = NULL;
1329 		goto fail;
1330 	}
1331 	chain = hammer2_chain_lookup(&parent, &key_next,
1332 				     xop->lhc, xop->lhc,
1333 				     &error, 0);
1334 	if (chain) {
1335 		error = HAMMER2_ERROR_EEXIST;
1336 		goto fail;
1337 	}
1338 
1339 	/*
1340 	 * We may be able to embed the directory entry directly in the
1341 	 * blockref.
1342 	 */
1343 	if (xop->dirent.namlen <= sizeof(chain->bref.check.buf))
1344 		data_len = 0;
1345 	else
1346 		data_len = HAMMER2_ALLOC_MIN;
1347 
1348 	error = hammer2_chain_create(&parent, &chain,
1349 				     xop->head.ip1->pmp, HAMMER2_METH_DEFAULT,
1350 				     xop->lhc, 0,
1351 				     HAMMER2_BREF_TYPE_DIRENT,
1352 				     data_len,
1353 				     xop->head.mtid, 0, 0);
1354 	if (error == 0) {
1355 		/*
1356 		 * WARNING: chain->data->buf is sized to chain->bytes,
1357 		 *	    do not use sizeof(chain->data->buf), which
1358 		 *	    will be much larger.
1359 		 */
1360 		error = hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1361 		if (error == 0) {
1362 			chain->bref.embed.dirent = xop->dirent;
1363 			if (xop->dirent.namlen <= sizeof(chain->bref.check.buf))
1364 				bcopy(xop->head.name1, chain->bref.check.buf,
1365 				      xop->dirent.namlen);
1366 			else
1367 				bcopy(xop->head.name1, chain->data->buf,
1368 				      xop->dirent.namlen);
1369 		}
1370 	}
1371 fail:
1372 	if (parent) {
1373 		hammer2_chain_unlock(parent);
1374 		hammer2_chain_drop(parent);
1375 	}
1376 	hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
1377 	if (chain) {
1378 		hammer2_chain_unlock(chain);
1379 		hammer2_chain_drop(chain);
1380 	}
1381 }
1382 
1383 /*
1384  * Inode create helper (threaded, backend)
1385  *
1386  * Used by ncreate, nmknod, nsymlink, nmkdir.
1387  * Used by nlink and rename to create HARDLINK pointers.
1388  *
1389  * Frontend holds the parent directory ip locked exclusively.  We
1390  * create the inode and feed the exclusively locked chain to the
1391  * frontend.
1392  */
1393 void
1394 hammer2_inode_xop_create(hammer2_thread_t *thr, hammer2_xop_t *arg)
1395 {
1396 	hammer2_xop_create_t *xop = &arg->xop_create;
1397 	hammer2_chain_t *parent;
1398 	hammer2_chain_t *chain;
1399 	hammer2_key_t key_next;
1400 	int error;
1401 
1402 	if (hammer2_debug & 0x0001)
1403 		kprintf("inode_create lhc %016jx clindex %d\n",
1404 			xop->lhc, thr->clindex);
1405 
1406 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1407 				     HAMMER2_RESOLVE_ALWAYS);
1408 	if (parent == NULL) {
1409 		error = HAMMER2_ERROR_EIO;
1410 		chain = NULL;
1411 		goto fail;
1412 	}
1413 	chain = hammer2_chain_lookup(&parent, &key_next,
1414 				     xop->lhc, xop->lhc,
1415 				     &error, 0);
1416 	if (chain) {
1417 		error = HAMMER2_ERROR_EEXIST;
1418 		goto fail;
1419 	}
1420 
1421 	error = hammer2_chain_create(&parent, &chain,
1422 				     xop->head.ip1->pmp, HAMMER2_METH_DEFAULT,
1423 				     xop->lhc, 0,
1424 				     HAMMER2_BREF_TYPE_INODE,
1425 				     HAMMER2_INODE_BYTES,
1426 				     xop->head.mtid, 0, xop->flags);
1427 	if (error == 0) {
1428 		error = hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1429 		if (error == 0) {
1430 			chain->data->ipdata.meta = xop->meta;
1431 			if (xop->head.name1) {
1432 				bcopy(xop->head.name1,
1433 				      chain->data->ipdata.filename,
1434 				      xop->head.name1_len);
1435 				chain->data->ipdata.meta.name_len =
1436 					xop->head.name1_len;
1437 			}
1438 			chain->data->ipdata.meta.name_key = xop->lhc;
1439 		}
1440 	}
1441 fail:
1442 	if (parent) {
1443 		hammer2_chain_unlock(parent);
1444 		hammer2_chain_drop(parent);
1445 	}
1446 	hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
1447 	if (chain) {
1448 		hammer2_chain_unlock(chain);
1449 		hammer2_chain_drop(chain);
1450 	}
1451 }
1452 
1453 /*
1454  * Inode delete helper (backend, threaded)
1455  *
1456  * Generally used by hammer2_run_sideq()
1457  */
1458 void
1459 hammer2_inode_xop_destroy(hammer2_thread_t *thr, hammer2_xop_t *arg)
1460 {
1461 	hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1462 	hammer2_pfs_t *pmp;
1463 	hammer2_chain_t *parent;
1464 	hammer2_chain_t *chain;
1465 	hammer2_inode_t *ip;
1466 	int error;
1467 
1468 	/*
1469 	 * We need the precise parent chain to issue the deletion.
1470 	 */
1471 	ip = xop->head.ip1;
1472 	pmp = ip->pmp;
1473 
1474 	chain = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
1475 	if (chain == NULL) {
1476 		parent = NULL;
1477 		error = HAMMER2_ERROR_EIO;
1478 		goto done;
1479 	}
1480 	parent = hammer2_chain_getparent(chain, HAMMER2_RESOLVE_ALWAYS);
1481 	if (parent == NULL) {
1482 		error = HAMMER2_ERROR_EIO;
1483 		goto done;
1484 	}
1485 	KKASSERT(chain->parent == parent);
1486 
1487 	/*
1488 	 * We have the correct parent, we can issue the deletion.
1489 	 */
1490 	hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1491 	error = 0;
1492 done:
1493 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1494 	if (parent) {
1495 		hammer2_chain_unlock(parent);
1496 		hammer2_chain_drop(parent);
1497 	}
1498 	if (chain) {
1499 		hammer2_chain_unlock(chain);
1500 		hammer2_chain_drop(chain);
1501 	}
1502 }
1503 
1504 void
1505 hammer2_inode_xop_unlinkall(hammer2_thread_t *thr, hammer2_xop_t *arg)
1506 {
1507 	hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1508 	hammer2_chain_t *parent;
1509 	hammer2_chain_t *chain;
1510 	hammer2_key_t key_next;
1511 	int error;
1512 
1513 	/*
1514 	 * We need the precise parent chain to issue the deletion.
1515 	 */
1516 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1517 				     HAMMER2_RESOLVE_ALWAYS);
1518 	chain = NULL;
1519 	if (parent == NULL) {
1520 		error = 0;
1521 		goto done;
1522 	}
1523 	chain = hammer2_chain_lookup(&parent, &key_next,
1524 				     xop->key_beg, xop->key_end,
1525 				     &error, HAMMER2_LOOKUP_ALWAYS);
1526 	while (chain) {
1527 		hammer2_chain_delete(parent, chain,
1528 				     xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1529 		hammer2_xop_feed(&xop->head, chain, thr->clindex, chain->error);
1530 		/* depend on function to unlock the shared lock */
1531 		chain = hammer2_chain_next(&parent, chain, &key_next,
1532 					   key_next, xop->key_end,
1533 					   &error,
1534 					   HAMMER2_LOOKUP_ALWAYS);
1535 	}
1536 done:
1537 	if (error == 0)
1538 		error = HAMMER2_ERROR_ENOENT;
1539 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1540 	if (parent) {
1541 		hammer2_chain_unlock(parent);
1542 		hammer2_chain_drop(parent);
1543 	}
1544 	if (chain) {
1545 		hammer2_chain_unlock(chain);
1546 		hammer2_chain_drop(chain);
1547 	}
1548 }
1549 
1550 void
1551 hammer2_inode_xop_connect(hammer2_thread_t *thr, hammer2_xop_t *arg)
1552 {
1553 	hammer2_xop_connect_t *xop = &arg->xop_connect;
1554 	hammer2_inode_data_t *wipdata;
1555 	hammer2_chain_t *parent;
1556 	hammer2_chain_t *chain;
1557 	hammer2_pfs_t *pmp;
1558 	hammer2_key_t key_dummy;
1559 	int error;
1560 
1561 	/*
1562 	 * Get directory, then issue a lookup to prime the parent chain
1563 	 * for the create.  The lookup is expected to fail.
1564 	 */
1565 	pmp = xop->head.ip1->pmp;
1566 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1567 				     HAMMER2_RESOLVE_ALWAYS);
1568 	if (parent == NULL) {
1569 		chain = NULL;
1570 		error = HAMMER2_ERROR_EIO;
1571 		goto fail;
1572 	}
1573 	chain = hammer2_chain_lookup(&parent, &key_dummy,
1574 				     xop->lhc, xop->lhc,
1575 				     &error, 0);
1576 	if (chain) {
1577 		hammer2_chain_unlock(chain);
1578 		hammer2_chain_drop(chain);
1579 		chain = NULL;
1580 		error = HAMMER2_ERROR_EEXIST;
1581 		goto fail;
1582 	}
1583 	if (error)
1584 		goto fail;
1585 
1586 	/*
1587 	 * Adjust the filename in the inode, set the name key.
1588 	 *
1589 	 * NOTE: Frontend must also adjust ip2->meta on success, we can't
1590 	 *	 do it here.
1591 	 */
1592 	chain = hammer2_inode_chain(xop->head.ip2, thr->clindex,
1593 				    HAMMER2_RESOLVE_ALWAYS);
1594 	error = hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1595 	if (error)
1596 		goto fail;
1597 
1598 	wipdata = &chain->data->ipdata;
1599 
1600 	hammer2_inode_modify(xop->head.ip2);
1601 	if (xop->head.name1) {
1602 		bzero(wipdata->filename, sizeof(wipdata->filename));
1603 		bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1604 		wipdata->meta.name_len = xop->head.name1_len;
1605 	}
1606 	wipdata->meta.name_key = xop->lhc;
1607 
1608 	/*
1609 	 * Reconnect the chain to the new parent directory
1610 	 */
1611 	error = hammer2_chain_create(&parent, &chain,
1612 				     pmp, HAMMER2_METH_DEFAULT,
1613 				     xop->lhc, 0,
1614 				     HAMMER2_BREF_TYPE_INODE,
1615 				     HAMMER2_INODE_BYTES,
1616 				     xop->head.mtid, 0, 0);
1617 
1618 	/*
1619 	 * Feed result back.
1620 	 */
1621 fail:
1622 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1623 	if (parent) {
1624 		hammer2_chain_unlock(parent);
1625 		hammer2_chain_drop(parent);
1626 	}
1627 	if (chain) {
1628 		hammer2_chain_unlock(chain);
1629 		hammer2_chain_drop(chain);
1630 	}
1631 }
1632 
1633 /*
1634  * Synchronize the in-memory inode with the chain.
1635  */
1636 void
1637 hammer2_inode_xop_chain_sync(hammer2_thread_t *thr, hammer2_xop_t *arg)
1638 {
1639 	hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1640 	hammer2_chain_t	*parent;
1641 	hammer2_chain_t	*chain;
1642 	int error;
1643 
1644 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1645 				     HAMMER2_RESOLVE_ALWAYS);
1646 	chain = NULL;
1647 	if (parent == NULL) {
1648 		error = HAMMER2_ERROR_EIO;
1649 		goto done;
1650 	}
1651 	if (parent->error) {
1652 		error = parent->error;
1653 		goto done;
1654 	}
1655 
1656 	error = 0;
1657 
1658 	if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1659 		/* osize must be ignored */
1660 	} else if (xop->meta.size < xop->osize) {
1661 		/*
1662 		 * We must delete any chains beyond the EOF.  The chain
1663 		 * straddling the EOF will be pending in the bioq.
1664 		 */
1665 		hammer2_key_t lbase;
1666 		hammer2_key_t key_next;
1667 
1668 		lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1669 			~HAMMER2_PBUFMASK64;
1670 		chain = hammer2_chain_lookup(&parent, &key_next,
1671 					     lbase, HAMMER2_KEY_MAX,
1672 					     &error,
1673 					     HAMMER2_LOOKUP_NODATA |
1674 					     HAMMER2_LOOKUP_NODIRECT);
1675 		while (chain) {
1676 			/*
1677 			 * Degenerate embedded case, nothing to loop on
1678 			 */
1679 			switch (chain->bref.type) {
1680 			case HAMMER2_BREF_TYPE_DIRENT:
1681 			case HAMMER2_BREF_TYPE_INODE:
1682 				KKASSERT(0);
1683 				break;
1684 			case HAMMER2_BREF_TYPE_DATA:
1685 				hammer2_chain_delete(parent, chain,
1686 						     xop->head.mtid,
1687 						     HAMMER2_DELETE_PERMANENT);
1688 				break;
1689 			}
1690 			chain = hammer2_chain_next(&parent, chain, &key_next,
1691 						   key_next, HAMMER2_KEY_MAX,
1692 						   &error,
1693 						   HAMMER2_LOOKUP_NODATA |
1694 						   HAMMER2_LOOKUP_NODIRECT);
1695 		}
1696 
1697 		/*
1698 		 * Reset to point at inode for following code, if necessary.
1699 		 */
1700 		if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1701 			hammer2_chain_unlock(parent);
1702 			hammer2_chain_drop(parent);
1703 			parent = hammer2_inode_chain(xop->head.ip1,
1704 						     thr->clindex,
1705 						     HAMMER2_RESOLVE_ALWAYS);
1706 			kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1707 				parent->data->ipdata.filename);
1708 		}
1709 	}
1710 
1711 	/*
1712 	 * Sync the inode meta-data, potentially clear the blockset area
1713 	 * of direct data so it can be used for blockrefs.
1714 	 */
1715 	if (error == 0) {
1716 		error = hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1717 		if (error == 0) {
1718 			parent->data->ipdata.meta = xop->meta;
1719 			if (xop->clear_directdata) {
1720 				bzero(&parent->data->ipdata.u.blockset,
1721 				      sizeof(parent->data->ipdata.u.blockset));
1722 			}
1723 		}
1724 	}
1725 done:
1726 	if (chain) {
1727 		hammer2_chain_unlock(chain);
1728 		hammer2_chain_drop(chain);
1729 	}
1730 	if (parent) {
1731 		hammer2_chain_unlock(parent);
1732 		hammer2_chain_drop(parent);
1733 	}
1734 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1735 }
1736 
1737