1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
5  * Copyright (c) 2011-2023 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Matthew Dillon <dillon@dragonflybsd.org>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 /*
38 #include <sys/cdefs.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/lock.h>
43 #include <sys/uuid.h>
44 #include <sys/vnode.h>
45 */
46 
47 #include "hammer2.h"
48 
49 #define INODE_DEBUG	0
50 
51 /*
52  * Initialize inum hash in fresh structure
53  */
54 void
hammer2_inum_hash_init(hammer2_pfs_t * pmp)55 hammer2_inum_hash_init(hammer2_pfs_t *pmp)
56 {
57 	hammer2_inum_hash_t *hash;
58 	int i;
59 
60 	for (i = 0; i < HAMMER2_INUMHASH_SIZE; ++i) {
61 		hash = &pmp->inumhash[i];
62 		hammer2_spin_init(&hash->spin, "h2inum");
63 	}
64 }
65 
66 /*
67  * Caller holds pmp->list_spin and the inode should be locked.  Merge ip
68  * with the specified depend.
69  *
70  * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
71  * that successive calls must ensure the ip is on a pass2 depend (or they are
72  * all SYNCQ).  If the passed-in depend is not NULL and not (void *)-1 then
73  * we can set pass2 on it and return.
74  *
75  * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
76  * a self-depend if necessary, and depend->pass2 is set according
77  * to the PASS2 flag.  SIDEQ is set.
78  */
79 static __noinline
80 hammer2_depend_t *
hammer2_inode_setdepend_locked(hammer2_inode_t * ip,hammer2_depend_t * depend)81 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
82 {
83 	hammer2_pfs_t *pmp = ip->pmp;
84 	hammer2_depend_t *dtmp;
85 	hammer2_inode_t *iptmp;
86 
87 	/*
88 	 * If ip is SYNCQ its entry is used for the syncq list and it will
89 	 * no longer be associated with a dependency.  Merging this status
90 	 * with a passed-in depend implies PASS2.
91 	 */
92 	if (ip->flags & HAMMER2_INODE_SYNCQ) {
93 		if (depend == (void *)-1 ||
94 		    depend == NULL) {
95 			return ((void *)-1);
96 		}
97 		depend->pass2 = 1;
98 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
99 
100 		return depend;
101 	}
102 
103 	/*
104 	 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
105 	 * If it is not, associate the ip with the passed-in depend, creating
106 	 * a single-entry dependency using depend_static if necessary.
107 	 *
108 	 * NOTE: The use of ip->depend_static always requires that the
109 	 *	 specific ip containing the structure is part of that
110 	 *	 particular depend_static's dependency group.
111 	 */
112 	if (ip->flags & HAMMER2_INODE_SIDEQ) {
113 		/*
114 		 * Merge ip->depend with the passed-in depend.  If the
115 		 * passed-in depend is not a special case, all ips associated
116 		 * with ip->depend (including the original ip) must be moved
117 		 * to the passed-in depend.
118 		 */
119 		if (depend == NULL) {
120 			depend = ip->depend;
121 		} else if (depend == (void *)-1) {
122 			depend = ip->depend;
123 			depend->pass2 = 1;
124 		} else if (depend != ip->depend) {
125 #ifdef INVARIANTS
126 			int sanitychk = 0;
127 #endif
128 			dtmp = ip->depend;
129 			while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
130 #ifdef INVARIANTS
131 				if (iptmp == ip)
132 					sanitychk = 1;
133 #endif
134 				TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
135 				TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
136 				iptmp->depend = depend;
137 			}
138 			KKASSERT(sanitychk == 1);
139 			depend->count += dtmp->count;
140 			depend->pass2 |= dtmp->pass2;
141 			TAILQ_REMOVE(&pmp->depq, dtmp, entry);
142 			dtmp->count = 0;
143 			dtmp->pass2 = 0;
144 		}
145 	} else {
146 		/*
147 		 * Add ip to the sideq, creating a self-dependency if
148 		 * necessary.
149 		 */
150 		hammer2_inode_ref(ip); /* extra ref usually via hammer2_inode_modify() */
151 		atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
152 		if (depend == NULL) {
153 			depend = &ip->depend_static;
154 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
155 		} else if (depend == (void *)-1) {
156 			depend = &ip->depend_static;
157 			depend->pass2 = 1;
158 			TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
159 		} /* else add ip to passed-in depend */
160 		TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
161 		ip->depend = depend;
162 		++depend->count;
163 		++pmp->sideq_count;
164 	}
165 
166 	if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
167 		depend->pass2 = 1;
168 	if (depend->pass2)
169 		hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
170 
171 	return depend;
172 }
173 
174 /*
175  * Put a solo inode on the SIDEQ (meaning that its dirty).  This can also
176  * occur from inode_lock4() and inode_depend().
177  *
178  * Caller must pass-in a locked inode.
179  */
180 void
hammer2_inode_delayed_sideq(hammer2_inode_t * ip)181 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
182 {
183 	hammer2_pfs_t *pmp = ip->pmp;
184 
185 	/*
186 	 * Optimize case to avoid pmp spinlock.
187 	 */
188 	if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
189 		hammer2_spin_ex(&pmp->list_spin);
190 		hammer2_inode_setdepend_locked(ip, NULL);
191 		hammer2_spin_unex(&pmp->list_spin);
192 	}
193 }
194 
195 /*
196  * Lock an inode, with SYNCQ semantics.
197  *
198  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
199  * flags for options:
200  *
201  *	- pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.
202  *	  shared locks are not subject to SYNCQ semantics, exclusive locks
203  *	  are.
204  *
205  *	- pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
206  *	  Most front-end inode locks do.
207  *
208  *	- pass HAMMER2_RESOLVE_NEVER if you do not want to require that
209  *	  the inode data be resolved.  This is used by the syncthr because
210  *	  it can run on an unresolved/out-of-sync cluster, and also by the
211  *	  vnode reclamation code to avoid unnecessary I/O (particularly when
212  *	  disposing of hundreds of thousands of cached vnodes).
213  *
214  * This function, along with lock4, has SYNCQ semantics.  If the inode being
215  * locked is on the SYNCQ, that is it has been staged by the syncer, we must
216  * block until the operation is complete (even if we can lock the inode).  In
217  * order to reduce the stall time, we re-order the inode to the front of the
218  * pmp->syncq prior to blocking.  This reordering VERY significantly improves
219  * performance.
220  *
221  * The inode locking function locks the inode itself, resolves any stale
222  * chains in the inode's cluster, and allocates a fresh copy of the
223  * cluster with 1 ref and all the underlying chains locked.
224  *
225  * ip->cluster will be stable while the inode is locked.
226  *
227  * NOTE: We don't combine the inode/chain lock because putting away an
228  *       inode would otherwise confuse multiple lock holders of the inode.
229  */
230 void
hammer2_inode_lock(hammer2_inode_t * ip,int how)231 hammer2_inode_lock(hammer2_inode_t *ip, int how)
232 {
233 	hammer2_pfs_t *pmp;
234 
235 	hammer2_inode_ref(ip);
236 	pmp = ip->pmp;
237 
238 	/*
239 	 * Inode structure mutex - Shared lock
240 	 */
241 	if (how & HAMMER2_RESOLVE_SHARED) {
242 		hammer2_mtx_sh(&ip->lock);
243 		return;
244 	}
245 
246 	/*
247 	 * Inode structure mutex - Exclusive lock
248 	 *
249 	 * An exclusive lock (if not recursive) must wait for inodes on
250 	 * SYNCQ to flush first, to ensure that meta-data dependencies such
251 	 * as the nlink count and related directory entries are not split
252 	 * across flushes.
253 	 *
254 	 * If the vnode is locked by the current thread it must be unlocked
255 	 * across the tsleep() to avoid a deadlock.
256 	 */
257 	hammer2_mtx_ex(&ip->lock);
258 	if (hammer2_mtx_refs(&ip->lock) > 1)
259 		return;
260 	while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
261 		hammer2_spin_ex(&pmp->list_spin);
262 		if (ip->flags & HAMMER2_INODE_SYNCQ) {
263 			tsleep_interlock(&ip->flags, 0);
264 			atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
265 			TAILQ_REMOVE(&pmp->syncq, ip, entry);
266 			TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
267 			hammer2_spin_unex(&pmp->list_spin);
268 			hammer2_mtx_unlock(&ip->lock);
269 			tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
270 			hammer2_mtx_ex(&ip->lock);
271 			continue;
272 		}
273 		hammer2_spin_unex(&pmp->list_spin);
274 		break;
275 	}
276 }
277 
278 /*
279  * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
280  * ip1 and ip2 must not be NULL.  ip3 and ip4 may be NULL, but if ip3 is
281  * NULL then ip4 must also be NULL.
282  *
283  * This creates a dependency between up to four inodes.
284  */
285 void
hammer2_inode_lock4(hammer2_inode_t * ip1,hammer2_inode_t * ip2,hammer2_inode_t * ip3,hammer2_inode_t * ip4)286 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
287 		    hammer2_inode_t *ip3, hammer2_inode_t *ip4)
288 {
289 	hammer2_inode_t *ips[4];
290 	hammer2_inode_t *iptmp;
291 	hammer2_inode_t *ipslp;
292 	hammer2_depend_t *depend;
293 	hammer2_pfs_t *pmp;
294 	size_t count;
295 	size_t i;
296 
297 	pmp = ip1->pmp;			/* may be NULL */
298 	KKASSERT(pmp == ip2->pmp);
299 
300 	ips[0] = ip1;
301 	ips[1] = ip2;
302 	if (ip3 == NULL) {
303 		count = 2;
304 	} else if (ip4 == NULL) {
305 		count = 3;
306 		ips[2] = ip3;
307 		KKASSERT(pmp == ip3->pmp);
308 	} else {
309 		count = 4;
310 		ips[2] = ip3;
311 		ips[3] = ip4;
312 		KKASSERT(pmp == ip3->pmp);
313 		KKASSERT(pmp == ip4->pmp);
314 	}
315 
316 	for (i = 0; i < count; ++i)
317 		hammer2_inode_ref(ips[i]);
318 
319 restart:
320 	/*
321 	 * Lock the inodes in order
322 	 */
323 	for (i = 0; i < count; ++i) {
324 		hammer2_mtx_ex(&ips[i]->lock);
325 	}
326 
327 	/*
328 	 * Associate dependencies, record the first inode found on SYNCQ
329 	 * (operation is allowed to proceed for inodes on PASS2) for our
330 	 * sleep operation, this inode is theoretically the last one sync'd
331 	 * in the sequence.
332 	 *
333 	 * All inodes found on SYNCQ are moved to the head of the syncq
334 	 * to reduce stalls.
335 	 */
336 	hammer2_spin_ex(&pmp->list_spin);
337 	depend = NULL;
338 	ipslp = NULL;
339 	for (i = 0; i < count; ++i) {
340 		iptmp = ips[i];
341 		depend = hammer2_inode_setdepend_locked(iptmp, depend);
342 		if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
343 			TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
344 			TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
345 			if (ipslp == NULL)
346 				ipslp = iptmp;
347 		}
348 	}
349 	hammer2_spin_unex(&pmp->list_spin);
350 
351 	/*
352 	 * Block and retry if any of the inodes are on SYNCQ.  It is
353 	 * important that we allow the operation to proceed in the
354 	 * PASS2 case, to avoid deadlocking against the vnode.
355 	 */
356 	if (ipslp) {
357 		for (i = 0; i < count; ++i)
358 			hammer2_mtx_unlock(&ips[i]->lock);
359 		tsleep(&ipslp->flags, 0, "h2sync", 2);
360 		goto restart;
361 	}
362 }
363 
364 /*
365  * Release an inode lock.  If another thread is blocked on SYNCQ_WAKEUP
366  * we wake them up.
367  */
368 void
hammer2_inode_unlock(hammer2_inode_t * ip)369 hammer2_inode_unlock(hammer2_inode_t *ip)
370 {
371 	if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
372 		atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
373 		hammer2_mtx_unlock(&ip->lock);
374 		wakeup(&ip->flags);
375 	} else {
376 		hammer2_mtx_unlock(&ip->lock);
377 	}
378 	hammer2_inode_drop(ip);
379 }
380 
381 /*
382  * If either ip1 or ip2 have been tapped by the syncer, make sure that both
383  * are.  This ensure that dependencies (e.g. dirent-v-inode) are synced
384  * together.  For dirent-v-inode depends, pass the dirent as ip1.
385  *
386  * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
387  * single dependency.  Dependencies are entered into pmp->depq.  This
388  * effectively flags the inodes SIDEQ.
389  *
390  * Both ip1 and ip2 must be locked by the caller.  This also ensures
391  * that we can't race the end of the syncer's queue run.
392  */
393 void
hammer2_inode_depend(hammer2_inode_t * ip1,hammer2_inode_t * ip2)394 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
395 {
396 	hammer2_pfs_t *pmp;
397 	hammer2_depend_t *depend;
398 
399 	pmp = ip1->pmp;
400 	hammer2_spin_ex(&pmp->list_spin);
401 	depend = hammer2_inode_setdepend_locked(ip1, NULL);
402 	depend = hammer2_inode_setdepend_locked(ip2, depend);
403 	hammer2_spin_unex(&pmp->list_spin);
404 }
405 
406 /*
407  * Select a chain out of an inode's cluster and lock it.
408  *
409  * The inode does not have to be locked.
410  */
411 hammer2_chain_t *
hammer2_inode_chain(hammer2_inode_t * ip,int clindex,int how)412 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
413 {
414 	hammer2_chain_t *chain;
415 	hammer2_cluster_t *cluster;
416 
417 	hammer2_spin_sh(&ip->cluster_spin);
418 	cluster = &ip->cluster;
419 	if (clindex >= cluster->nchains)
420 		chain = NULL;
421 	else
422 		chain = cluster->array[clindex].chain;
423 	if (chain) {
424 		hammer2_chain_ref(chain);
425 		hammer2_spin_unsh(&ip->cluster_spin);
426 		hammer2_chain_lock(chain, how);
427 	} else {
428 		hammer2_spin_unsh(&ip->cluster_spin);
429 	}
430 	return chain;
431 }
432 
433 hammer2_chain_t *
hammer2_inode_chain_and_parent(hammer2_inode_t * ip,int clindex,hammer2_chain_t ** parentp,int how)434 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
435 			       hammer2_chain_t **parentp, int how)
436 {
437 	hammer2_chain_t *chain;
438 	hammer2_chain_t *parent;
439 
440 	for (;;) {
441 		hammer2_spin_sh(&ip->cluster_spin);
442 		if (clindex >= ip->cluster.nchains)
443 			chain = NULL;
444 		else
445 			chain = ip->cluster.array[clindex].chain;
446 		if (chain) {
447 			hammer2_chain_ref(chain);
448 			hammer2_spin_unsh(&ip->cluster_spin);
449 			hammer2_chain_lock(chain, how);
450 		} else {
451 			hammer2_spin_unsh(&ip->cluster_spin);
452 		}
453 
454 		/*
455 		 * Get parent, lock order must be (parent, chain).
456 		 */
457 		parent = chain->parent;
458 		if (parent) {
459 			hammer2_chain_ref(parent);
460 			hammer2_chain_unlock(chain);
461 			hammer2_chain_lock(parent, how);
462 			hammer2_chain_lock(chain, how);
463 		}
464 		if (ip->cluster.array[clindex].chain == chain &&
465 		    chain->parent == parent) {
466 			break;
467 		}
468 
469 		/*
470 		 * Retry
471 		 */
472 		hammer2_chain_unlock(chain);
473 		hammer2_chain_drop(chain);
474 		if (parent) {
475 			hammer2_chain_unlock(parent);
476 			hammer2_chain_drop(parent);
477 		}
478 	}
479 	*parentp = parent;
480 
481 	return chain;
482 }
483 
484 /*
485  * Temporarily release a lock held shared or exclusive.  Caller must
486  * hold the lock shared or exclusive on call and lock will be released
487  * on return.
488  *
489  * Restore a lock that was temporarily released.
490  */
491 hammer2_mtx_state_t
hammer2_inode_lock_temp_release(hammer2_inode_t * ip)492 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
493 {
494 	return hammer2_mtx_temp_release(&ip->lock);
495 }
496 
497 void
hammer2_inode_lock_temp_restore(hammer2_inode_t * ip,hammer2_mtx_state_t ostate)498 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
499 {
500 	hammer2_mtx_temp_restore(&ip->lock, ostate);
501 }
502 
503 /*
504  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
505  * is already held exclusively this is a NOP.
506  *
507  * The caller MUST hold the inode lock either shared or exclusive on call
508  * and will own the lock exclusively on return.
509  *
510  * Returns non-zero if the lock was already exclusive prior to the upgrade.
511  */
512 int
hammer2_inode_lock_upgrade(hammer2_inode_t * ip)513 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
514 {
515 	int wasexclusive;
516 
517 	/* XXX pretends it wasn't exclusive, but shouldn't matter */
518 	//if (mtx_islocked_ex(&ip->lock)) {
519 	if (0) {
520 		wasexclusive = 1;
521 	} else {
522 		hammer2_mtx_unlock(&ip->lock);
523 		hammer2_mtx_ex(&ip->lock);
524 		wasexclusive = 0;
525 	}
526 	return wasexclusive;
527 }
528 
529 /*
530  * Downgrade an inode lock from exclusive to shared only if the inode
531  * lock was previously shared.  If the inode lock was previously exclusive,
532  * this is a NOP.
533  */
534 void
hammer2_inode_lock_downgrade(hammer2_inode_t * ip,int wasexclusive)535 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
536 {
537 	if (wasexclusive == 0)
538 		hammer2_mtx_downgrade(&ip->lock);
539 }
540 
541 static __inline hammer2_inum_hash_t *
inumhash(hammer2_pfs_t * pmp,hammer2_tid_t inum)542 inumhash(hammer2_pfs_t *pmp, hammer2_tid_t inum)
543 {
544 	int hv;
545 
546 	hv = (int)inum;
547 	return (&pmp->inumhash[hv & HAMMER2_INUMHASH_MASK]);
548 }
549 
550 
551 /*
552  * Lookup an inode by inode number
553  */
554 hammer2_inode_t *
hammer2_inode_lookup(hammer2_pfs_t * pmp,hammer2_tid_t inum)555 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
556 {
557 	hammer2_inum_hash_t *hash;
558 	hammer2_inode_t *ip;
559 
560 	KKASSERT(pmp);
561 	if (pmp->spmp_hmp) {
562 		ip = NULL;
563 	} else {
564 		hash = inumhash(pmp, inum);
565 		hammer2_spin_sh(&hash->spin);
566 		for (ip = hash->base; ip; ip = ip->next) {
567 			if (ip->meta.inum == inum) {
568 				hammer2_inode_ref(ip);
569 				break;
570 			}
571 		}
572 		hammer2_spin_unsh(&hash->spin);
573 	}
574 	return(ip);
575 }
576 
577 /*
578  * Adding a ref to an inode is only legal if the inode already has at least
579  * one ref.
580  *
581  * (can be called with spinlock held)
582  */
583 void
hammer2_inode_ref(hammer2_inode_t * ip)584 hammer2_inode_ref(hammer2_inode_t *ip)
585 {
586 	atomic_add_int(&ip->refs, 1);
587 	if (hammer2_debug & 0x80000) {
588 		kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
589 		print_backtrace(8);
590 	}
591 }
592 
593 /*
594  * Drop an inode reference, freeing the inode when the last reference goes
595  * away.
596  */
597 void
hammer2_inode_drop(hammer2_inode_t * ip)598 hammer2_inode_drop(hammer2_inode_t *ip)
599 {
600 	hammer2_pfs_t *pmp;
601 	u_int refs;
602 
603 	while (ip) {
604 		if (hammer2_debug & 0x80000) {
605 			kprintf("INODE-1 %p (%d->%d)\n",
606 				ip, ip->refs, ip->refs - 1);
607 			print_backtrace(8);
608 		}
609 		refs = ip->refs;
610 		cpu_ccfence();
611 		if (refs == 1) {
612 			/*
613 			 * Transition to zero, must interlock with
614 			 * the inode inumber lookup tree (if applicable).
615 			 * It should not be possible for anyone to race
616 			 * the transition to 0.
617 			 */
618 			hammer2_inum_hash_t *hash;
619 			hammer2_inode_t **xipp;
620 
621 			pmp = ip->pmp;
622 			KKASSERT(pmp);
623 			hash = inumhash(pmp, ip->meta.inum);
624 
625 			hammer2_spin_ex(&hash->spin);
626 			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
627 				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
628 				if (ip->flags & HAMMER2_INODE_ONHASH) {
629 					xipp = &hash->base;
630 					while (*xipp != ip)
631 						xipp = &(*xipp)->next;
632 					*xipp = ip->next;
633 					ip->next = NULL;
634 					atomic_add_long(&pmp->inum_count, -1);
635 					atomic_clear_int(&ip->flags,
636 						     HAMMER2_INODE_ONHASH);
637 				}
638 				hammer2_spin_unex(&hash->spin);
639 
640 				ip->pmp = NULL;
641 
642 				/*
643 				 * Cleaning out ip->cluster isn't entirely
644 				 * trivial.
645 				 */
646 				hammer2_inode_repoint(ip, NULL);
647 				/*
648 				 * Add inode to reclaim queue.
649 				 */
650 				TAILQ_INSERT_TAIL(&pmp->recq, ip, recq_entry);
651 				ip = NULL;	/* will terminate loop */
652 			} else {
653 				hammer2_spin_unex(&hash->spin);
654 			}
655 		} else {
656 			/*
657 			 * Non zero transition
658 			 */
659 			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
660 				break;
661 		}
662 	}
663 }
664 
665 /*
666  * Get the vnode associated with the given inode, allocating the vnode if
667  * necessary.  The vnode will be returned exclusively locked.
668  *
669  * *errorp is set to a UNIX error, not a HAMMER2 error.
670  *
671  * The caller must lock the inode (shared or exclusive).
672  *
673  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
674  * races.
675  */
676 struct m_vnode *
hammer2_igetv(hammer2_inode_t * ip,int * errorp)677 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
678 {
679 	hammer2_pfs_t *pmp;
680 	struct m_vnode *vp;
681 
682 	pmp = ip->pmp;
683 	KKASSERT(pmp != NULL);
684 	*errorp = 0;
685 
686 	for (;;) {
687 		/*
688 		 * Attempt to reuse an existing vnode assignment.  It is
689 		 * possible to race a reclaim so the vget() may fail.  The
690 		 * inode must be unlocked during the vget() to avoid a
691 		 * deadlock against a reclaim.
692 		 */
693 		int wasexclusive;
694 
695 		vp = ip->vp;
696 		if (vp) {
697 			/*
698 			 * Inode must be unlocked during the vget() to avoid
699 			 * possible deadlocks, but leave the ip ref intact.
700 			 *
701 			 * vnode is held to prevent destruction during the
702 			 * vget().  The vget() can still fail if we lost
703 			 * a reclaim race on the vnode.
704 			 */
705 			hammer2_mtx_state_t ostate;
706 
707 			vhold(vp);
708 			ostate = hammer2_inode_lock_temp_release(ip);
709 			if (vget(vp, LK_EXCLUSIVE)) {
710 				vdrop(vp);
711 				hammer2_inode_lock_temp_restore(ip, ostate);
712 				continue;
713 			}
714 			hammer2_inode_lock_temp_restore(ip, ostate);
715 			vdrop(vp);
716 			/* vp still locked and ref from vget */
717 			if (ip->vp != vp) {
718 				kprintf("hammer2: igetv race %p/%p\n",
719 					ip->vp, vp);
720 				vput(vp);
721 				continue;
722 			}
723 			*errorp = 0;
724 			break;
725 		}
726 
727 		/*
728 		 * No vnode exists, allocate a new vnode.  Beware of
729 		 * allocation races.  This function will return an
730 		 * exclusively locked and referenced vnode.
731 		 */
732 		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
733 		if (*errorp) {
734 			kprintf("hammer2: igetv getnewvnode failed %d\n",
735 				*errorp);
736 			vp = NULL;
737 			break;
738 		}
739 
740 		/*
741 		 * Lock the inode and check for an allocation race.
742 		 */
743 		wasexclusive = hammer2_inode_lock_upgrade(ip);
744 		if (ip->vp != NULL) {
745 			vp->v_type = VBAD;
746 			vx_put(vp);
747 			hammer2_inode_lock_downgrade(ip, wasexclusive);
748 			continue;
749 		}
750 
751 		switch (ip->meta.type) {
752 		case HAMMER2_OBJTYPE_DIRECTORY:
753 			vp->v_type = VDIR;
754 			break;
755 		case HAMMER2_OBJTYPE_REGFILE:
756 			/*
757 			 * Regular file must use buffer cache I/O
758 			 * (VKVABIO cpu sync semantics supported)
759 			 */
760 			vp->v_type = VREG;
761 			vsetflags(vp, VKVABIO);
762 			vinitvmio(vp, ip->meta.size,
763 				  HAMMER2_LBUFSIZE,
764 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
765 			break;
766 		case HAMMER2_OBJTYPE_SOFTLINK:
767 			/*
768 			 * XXX for now we are using the generic file_read
769 			 * and file_write code so we need a buffer cache
770 			 * association.
771 			 *
772 			 * (VKVABIO cpu sync semantics supported)
773 			 */
774 			vp->v_type = VLNK;
775 			vsetflags(vp, VKVABIO);
776 			vinitvmio(vp, ip->meta.size,
777 				  HAMMER2_LBUFSIZE,
778 				  (int)ip->meta.size & HAMMER2_LBUFMASK);
779 			break;
780 		case HAMMER2_OBJTYPE_CDEV:
781 			vp->v_type = VCHR;
782 			/* fall through */
783 		case HAMMER2_OBJTYPE_BDEV:
784 			//vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
785 			if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
786 				vp->v_type = VBLK;
787 			addaliasu(vp,
788 				  ip->meta.rmajor,
789 				  ip->meta.rminor);
790 			break;
791 		case HAMMER2_OBJTYPE_FIFO:
792 			vp->v_type = VFIFO;
793 			//vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
794 			break;
795 		case HAMMER2_OBJTYPE_SOCKET:
796 			vp->v_type = VSOCK;
797 			break;
798 		default:
799 			panic("hammer2: unhandled objtype %d",
800 			      ip->meta.type);
801 			break;
802 		}
803 
804 		if (ip == pmp->iroot)
805 			vsetflags(vp, VROOT);
806 
807 		vp->v_data = ip;
808 		ip->vp = vp;
809 		hammer2_inode_ref(ip);		/* vp association */
810 		hammer2_inode_lock_downgrade(ip, wasexclusive);
811 		vx_downgrade(vp);
812 		break;
813 	}
814 
815 	/*
816 	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
817 	 */
818 	if (hammer2_debug & 0x0002) {
819 		kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
820 			vp, -1, -1);
821 	}
822 	return (vp);
823 }
824 
825 /*
826  * XXX this API needs a rewrite.  It needs to be split into a
827  * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
828  * rid of the inode/chain lock reversal fudge.
829  *
830  * Returns the inode associated with the passed-in cluster, allocating a new
831  * hammer2_inode structure if necessary, then synchronizing it to the passed
832  * xop cluster.  When synchronizing, if idx >= 0, only cluster index (idx)
833  * is synchronized.  Otherwise the whole cluster is synchronized.  inum will
834  * be extracted from the passed-in xop and the inum argument will be ignored.
835  *
836  * If xop is passed as NULL then a new hammer2_inode is allocated with the
837  * specified inum, and returned.   For normal inodes, the inode will be
838  * indexed in memory and if it already exists the existing ip will be
839  * returned instead of allocating a new one.  The superroot and PFS inodes
840  * are not indexed in memory.
841  *
842  * The passed-in cluster must be locked and will remain locked on return.
843  * The returned inode will be locked and the caller may dispose of both
844  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
845  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
846  *
847  * The hammer2_inode structure regulates the interface between the high level
848  * kernel VNOPS API and the filesystem backend (the chains).
849  *
850  * On return the inode is locked with the supplied cluster.
851  */
852 hammer2_inode_t *
hammer2_inode_get(hammer2_pfs_t * pmp,hammer2_xop_head_t * xop,hammer2_tid_t inum,int idx)853 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
854 		  hammer2_tid_t inum, int idx)
855 {
856 	hammer2_inode_t *nip;
857 	const hammer2_inode_data_t *iptmp;
858 	const hammer2_inode_data_t *nipdata;
859 
860 	KKASSERT(xop == NULL ||
861 		 hammer2_cluster_type(&xop->cluster) ==
862 		 HAMMER2_BREF_TYPE_INODE);
863 	KKASSERT(pmp);
864 
865 	/*
866 	 * Interlocked lookup/ref of the inode.  This code is only needed
867 	 * when looking up inodes with nlinks != 0 (TODO: optimize out
868 	 * otherwise and test for duplicates).
869 	 *
870 	 * Cluster can be NULL during the initial pfs allocation.
871 	 */
872 	if (xop) {
873 		iptmp = &hammer2_xop_gdata(xop)->ipdata;
874 		inum = iptmp->meta.inum;
875 		hammer2_xop_pdata(xop);
876 	}
877 again:
878 	nip = hammer2_inode_lookup(pmp, inum);
879 	if (nip) {
880 		/*
881 		 * We may have to unhold the cluster to avoid a deadlock
882 		 * against vnlru (and possibly other XOPs).
883 		 */
884 		if (xop) {
885 			if (hammer2_mtx_ex_try(&nip->lock) != 0) {
886 				hammer2_cluster_unhold(&xop->cluster);
887 				hammer2_mtx_ex(&nip->lock);
888 				hammer2_cluster_rehold(&xop->cluster);
889 			}
890 		} else {
891 			hammer2_mtx_ex(&nip->lock);
892 		}
893 
894 		/*
895 		 * Handle SMP race (not applicable to the super-root spmp
896 		 * which can't index inodes due to duplicative inode numbers).
897 		 */
898 		if (pmp->spmp_hmp == NULL &&
899 		    (nip->flags & HAMMER2_INODE_ONHASH) == 0) {
900 			hammer2_mtx_unlock(&nip->lock);
901 			hammer2_inode_drop(nip);
902 			goto again;
903 		}
904 		if (xop) {
905 			if (idx >= 0)
906 				hammer2_inode_repoint_one(nip, &xop->cluster,
907 							  idx);
908 			else
909 				hammer2_inode_repoint(nip, &xop->cluster);
910 		}
911 		return nip;
912 	}
913 
914 	/*
915 	 * We couldn't find the inode number, create a new inode and try to
916 	 * insert it, handle insertion races.
917 	 */
918 	nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
919 	hammer2_spin_init(&nip->cluster_spin, "h2clspin");
920 	atomic_add_long(&pmp->inmem_inodes, 1);
921 
922 	/*
923 	 * Initialize nip's cluster.  A cluster is provided for normal
924 	 * inodes but typically not for the super-root or PFS inodes.
925 	 */
926 	{
927 		hammer2_inode_t *nnip = nip;
928 		nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip));
929 	}
930 
931 	nip->cluster.refs = 1;
932 	nip->cluster.pmp = pmp;
933 	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
934 	if (xop) {
935 		nipdata = &hammer2_xop_gdata(xop)->ipdata;
936 		nip->meta = nipdata->meta;
937 		hammer2_xop_pdata(xop);
938 		hammer2_inode_repoint(nip, &xop->cluster);
939 	} else {
940 		nip->meta.inum = inum;		/* PFS inum is always 1 XXX */
941 		/* mtime will be updated when a cluster is available */
942 	}
943 
944 	nip->pmp = pmp;
945 
946 	/*
947 	 * ref and lock on nip gives it state compatible to after a
948 	 * hammer2_inode_lock() call.
949 	 */
950 	nip->refs = 1;
951 	hammer2_mtx_init(&nip->lock, "h2inode");
952 	hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
953 	hammer2_mtx_ex(&nip->lock);
954 	TAILQ_INIT(&nip->depend_static.sideq);
955 	/* combination of thread lock and chain lock == inode lock */
956 
957 	/*
958 	 * Attempt to add the inode.  If it fails we raced another inode
959 	 * get.  Undo all the work and try again.
960 	 */
961 	if (pmp->spmp_hmp == NULL) {
962 		hammer2_inum_hash_t *hash;
963 		hammer2_inode_t *xip;
964 		hammer2_inode_t **xipp;
965 
966 		hash = inumhash(pmp, nip->meta.inum);
967 		hammer2_spin_ex(&hash->spin);
968 		for (xipp = &hash->base;
969 		     (xip = *xipp) != NULL;
970 		     xipp = &xip->next)
971 		{
972 			if (xip->meta.inum == nip->meta.inum) {
973 				hammer2_spin_unex(&hash->spin);
974 				hammer2_mtx_unlock(&nip->lock);
975 				hammer2_inode_drop(nip);
976 				goto again;
977 			}
978 		}
979 		nip->next = NULL;
980 		*xipp = nip;
981 		atomic_set_int(&nip->flags, HAMMER2_INODE_ONHASH);
982 		atomic_add_long(&pmp->inum_count, 1);
983 		hammer2_spin_unex(&hash->spin);
984 	}
985 	return (nip);
986 }
987 
988 /*
989  * Create a PFS inode under the superroot.  This function will create the
990  * inode, its media chains, and also insert it into the media.
991  *
992  * Caller must be in a flush transaction because we are inserting the inode
993  * onto the media.
994  */
995 hammer2_inode_t *
hammer2_inode_create_pfs(hammer2_pfs_t * spmp,const char * name,size_t name_len,int * errorp)996 hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
997 		     const char *name, size_t name_len,
998 		     int *errorp)
999 {
1000 	hammer2_xop_create_t *xop;
1001 	hammer2_inode_t *pip;
1002 	hammer2_inode_t *nip;
1003 	int error;
1004 	uint8_t pip_comp_algo;
1005 	uint8_t pip_check_algo;
1006 	hammer2_tid_t pip_inum;
1007 	hammer2_key_t lhc;
1008 
1009 	pip = spmp->iroot;
1010 	nip = NULL;
1011 
1012 	lhc = hammer2_dirhash(name, name_len);
1013 	*errorp = 0;
1014 
1015 	/*
1016 	 * Locate the inode or indirect block to create the new
1017 	 * entry in.  At the same time check for key collisions
1018 	 * and iterate until we don't get one.
1019 	 *
1020 	 * Lock the directory exclusively for now to guarantee that
1021 	 * we can find an unused lhc for the name.  Due to collisions,
1022 	 * two different creates can end up with the same lhc so we
1023 	 * cannot depend on the OS to prevent the collision.
1024 	 */
1025 	hammer2_inode_lock(pip, 0);
1026 
1027 	pip_comp_algo = pip->meta.comp_algo;
1028 	pip_check_algo = pip->meta.check_algo;
1029 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1030 
1031 	/*
1032 	 * Locate an unused key in the collision space.
1033 	 */
1034 	{
1035 		hammer2_xop_scanlhc_t *sxop;
1036 		hammer2_key_t lhcbase;
1037 
1038 		lhcbase = lhc;
1039 		sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1040 		sxop->lhc = lhc;
1041 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1042 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1043 			if (lhc != sxop->head.cluster.focus->bref.key)
1044 				break;
1045 			++lhc;
1046 		}
1047 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1048 
1049 		if (error) {
1050 			if (error != HAMMER2_ERROR_ENOENT)
1051 				goto done2;
1052 			++lhc;
1053 			error = 0;
1054 		}
1055 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1056 			error = HAMMER2_ERROR_ENOSPC;
1057 			goto done2;
1058 		}
1059 	}
1060 
1061 	/*
1062 	 * Create the inode with the lhc as the key.
1063 	 */
1064 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1065 	xop->lhc = lhc;
1066 	xop->flags = HAMMER2_INSERT_PFSROOT;
1067 	bzero(&xop->meta, sizeof(xop->meta));
1068 
1069 	xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1070 	xop->meta.inum = 1;
1071 	xop->meta.iparent = pip_inum;
1072 
1073 	/* Inherit parent's inode compression mode. */
1074 	xop->meta.comp_algo = pip_comp_algo;
1075 	xop->meta.check_algo = pip_check_algo;
1076 	xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1077 	hammer2_update_time(&xop->meta.ctime, false);
1078 	xop->meta.mtime = xop->meta.ctime;
1079 	xop->meta.mode = 0755;
1080 	xop->meta.nlinks = 1;
1081 
1082 	hammer2_xop_setname(&xop->head, name, name_len);
1083 	xop->meta.name_len = name_len;
1084 	xop->meta.name_key = lhc;
1085 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1086 
1087 	hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
1088 
1089 	error = hammer2_xop_collect(&xop->head, 0);
1090 #if INODE_DEBUG
1091 	kprintf("CREATE INODE %*.*s\n",
1092 		(int)name_len, (int)name_len, name);
1093 #endif
1094 
1095 	if (error) {
1096 		*errorp = error;
1097 		goto done;
1098 	}
1099 
1100 	/*
1101 	 * Set up the new inode if not a hardlink pointer.
1102 	 *
1103 	 * NOTE: *_get() integrates chain's lock into the inode lock.
1104 	 *
1105 	 * NOTE: Only one new inode can currently be created per
1106 	 *	 transaction.  If the need arises we can adjust
1107 	 *	 hammer2_trans_init() to allow more.
1108 	 *
1109 	 * NOTE: nipdata will have chain's blockset data.
1110 	 */
1111 	nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
1112 	nip->comp_heuristic = 0;
1113 done:
1114 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1115 done2:
1116 	hammer2_inode_unlock(pip);
1117 
1118 	return (nip);
1119 }
1120 
1121 /*
1122  * Create a new, normal inode.  This function will create the inode,
1123  * the media chains, but will not insert the chains onto the media topology
1124  * (doing so would require a flush transaction and cause long stalls).
1125  *
1126  * Caller must be in a normal transaction.
1127  */
1128 hammer2_inode_t *
hammer2_inode_create_normal(hammer2_inode_t * pip,struct vattr * vap,struct ucred * cred,hammer2_key_t inum,int * errorp)1129 hammer2_inode_create_normal(hammer2_inode_t *pip,
1130 			    struct vattr *vap, struct ucred *cred,
1131 			    hammer2_key_t inum, int *errorp)
1132 {
1133 	hammer2_xop_create_t *xop;
1134 	hammer2_inode_t *dip;
1135 	hammer2_inode_t *nip;
1136 	int error;
1137 	uid_t xuid;
1138 	uuid_t pip_uid;
1139 	uuid_t pip_gid;
1140 	uint32_t pip_mode;
1141 	uint8_t pip_comp_algo;
1142 	uint8_t pip_check_algo;
1143 	hammer2_tid_t pip_inum;
1144 
1145 	dip = pip->pmp->iroot;
1146 	KKASSERT(dip != NULL);
1147 
1148 	*errorp = 0;
1149 
1150 	/*hammer2_inode_lock(dip, 0);*/
1151 
1152 	pip_uid = pip->meta.uid;
1153 	pip_gid = pip->meta.gid;
1154 	pip_mode = pip->meta.mode;
1155 	pip_comp_algo = pip->meta.comp_algo;
1156 	pip_check_algo = pip->meta.check_algo;
1157 	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1158 
1159 	/*
1160 	 * Create the in-memory hammer2_inode structure for the specified
1161 	 * inode.
1162 	 */
1163 	nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
1164 	nip->comp_heuristic = 0;
1165 	KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
1166 		 nip->cluster.nchains == 0);
1167 	atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
1168 
1169 	/*
1170 	 * Setup the inode meta-data
1171 	 */
1172 	nip->meta.type = hammer2_get_obj_type(vap->va_type);
1173 
1174 	switch (nip->meta.type) {
1175 	case HAMMER2_OBJTYPE_CDEV:
1176 	case HAMMER2_OBJTYPE_BDEV:
1177 		assert(0); /* XXX unsupported */
1178 		nip->meta.rmajor = vap->va_rmajor;
1179 		nip->meta.rminor = vap->va_rminor;
1180 		break;
1181 	default:
1182 		break;
1183 	}
1184 
1185 	KKASSERT(nip->meta.inum == inum);
1186 	nip->meta.iparent = pip_inum;
1187 
1188 	/* Inherit parent's inode compression mode. */
1189 	nip->meta.comp_algo = pip_comp_algo;
1190 	nip->meta.check_algo = pip_check_algo;
1191 	nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1192 	hammer2_update_time(&nip->meta.ctime, false);
1193 	nip->meta.mtime = nip->meta.ctime;
1194 	nip->meta.mode = vap->va_mode;
1195 	nip->meta.nlinks = 1;
1196 
1197 	xuid = hammer2_to_unix_xid(&pip_uid);
1198 	xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
1199 				     xuid, cred,
1200 				     &vap->va_mode);
1201 	if (vap->va_vaflags & VA_UID_UUID_VALID)
1202 		nip->meta.uid = vap->va_uid_uuid;
1203 	else if (vap->va_uid != (uid_t)VNOVAL)
1204 		hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
1205 	else
1206 		hammer2_guid_to_uuid(&nip->meta.uid, xuid);
1207 
1208 	if (vap->va_vaflags & VA_GID_UUID_VALID)
1209 		nip->meta.gid = vap->va_gid_uuid;
1210 	else if (vap->va_gid != (gid_t)VNOVAL)
1211 		hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
1212 	else
1213 		nip->meta.gid = pip_gid;
1214 
1215 	/*
1216 	 * Regular files and softlinks allow a small amount of data to be
1217 	 * directly embedded in the inode.  This flag will be cleared if
1218 	 * the size is extended past the embedded limit.
1219 	 */
1220 	if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1221 	    nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1222 		nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1223 	}
1224 
1225 	/*
1226 	 * Create the inode using (inum) as the key.  Pass pip for
1227 	 * method inheritance.
1228 	 */
1229 	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1230 	xop->lhc = inum;
1231 	xop->flags = 0;
1232 	xop->meta = nip->meta;
1233 
1234 	xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
1235 	xop->meta.name_key = inum;
1236 	nip->meta.name_len = xop->meta.name_len;
1237 	nip->meta.name_key = xop->meta.name_key;
1238 	hammer2_inode_modify(nip);
1239 
1240 	/*
1241 	 * Create the inode media chains but leave them detached.  We are
1242 	 * not in a flush transaction so we can't mess with media topology
1243 	 * above normal inodes (i.e. the index of the inodes themselves).
1244 	 *
1245 	 * We've already set the INODE_CREATING flag.  The inode's media
1246 	 * chains will be inserted onto the media topology on the next
1247 	 * filesystem sync.
1248 	 */
1249 	hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
1250 
1251 	error = hammer2_xop_collect(&xop->head, 0);
1252 #if INODE_DEBUG
1253 	kprintf("create inode type %d error %d\n", nip->meta.type, error);
1254 #endif
1255 
1256 	if (error) {
1257 		*errorp = error;
1258 		goto done;
1259 	}
1260 
1261 	/*
1262 	 * Associate the media chains created by the backend with the
1263 	 * frontend inode.
1264 	 */
1265 	hammer2_inode_repoint(nip, &xop->head.cluster);
1266 done:
1267 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1268 	/*hammer2_inode_unlock(dip);*/
1269 
1270 	return (nip);
1271 }
1272 
1273 /*
1274  * Create a directory entry under dip with the specified name, inode number,
1275  * and OBJTYPE (type).
1276  *
1277  * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1278  *
1279  * Caller must hold dip locked.
1280  */
1281 int
hammer2_dirent_create(hammer2_inode_t * dip,const char * name,size_t name_len,hammer2_key_t inum,uint8_t type)1282 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
1283 		      hammer2_key_t inum, uint8_t type)
1284 {
1285 	hammer2_xop_mkdirent_t *xop;
1286 	hammer2_key_t lhc;
1287 	int error;
1288 
1289 	lhc = 0;
1290 	error = 0;
1291 
1292 	KKASSERT(name != NULL);
1293 	lhc = hammer2_dirhash(name, name_len);
1294 
1295 	/*
1296 	 * Locate the inode or indirect block to create the new
1297 	 * entry in.  At the same time check for key collisions
1298 	 * and iterate until we don't get one.
1299 	 *
1300 	 * Lock the directory exclusively for now to guarantee that
1301 	 * we can find an unused lhc for the name.  Due to collisions,
1302 	 * two different creates can end up with the same lhc so we
1303 	 * cannot depend on the OS to prevent the collision.
1304 	 */
1305 	hammer2_inode_modify(dip);
1306 
1307 	/*
1308 	 * If name specified, locate an unused key in the collision space.
1309 	 * Otherwise use the passed-in lhc directly.
1310 	 */
1311 	{
1312 		hammer2_xop_scanlhc_t *sxop;
1313 		hammer2_key_t lhcbase;
1314 
1315 		lhcbase = lhc;
1316 		sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1317 		sxop->lhc = lhc;
1318 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1319 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1320 			if (lhc != sxop->head.cluster.focus->bref.key)
1321 				break;
1322 			++lhc;
1323 		}
1324 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1325 
1326 		if (error) {
1327 			if (error != HAMMER2_ERROR_ENOENT)
1328 				goto done2;
1329 			++lhc;
1330 			error = 0;
1331 		}
1332 		if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1333 			error = HAMMER2_ERROR_ENOSPC;
1334 			goto done2;
1335 		}
1336 	}
1337 
1338 	/*
1339 	 * Create the directory entry with the lhc as the key.
1340 	 */
1341 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1342 	xop->lhc = lhc;
1343 	bzero(&xop->dirent, sizeof(xop->dirent));
1344 	xop->dirent.inum = inum;
1345 	xop->dirent.type = type;
1346 	xop->dirent.namlen = name_len;
1347 
1348 	KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1349 	hammer2_xop_setname(&xop->head, name, name_len);
1350 
1351 	hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
1352 
1353 	error = hammer2_xop_collect(&xop->head, 0);
1354 
1355 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1356 done2:
1357 	error = hammer2_error_to_errno(error);
1358 
1359 	return error;
1360 }
1361 
1362 /*
1363  * Repoint ip->cluster's chains to cluster's chains and fixup the default
1364  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
1365  * filters out invalid or non-matching elements.
1366  *
1367  * Caller must hold the inode and cluster exclusive locked, if not NULL,
1368  * must also be locked.
1369  *
1370  * Cluster may be NULL to clean out any chains in ip->cluster.
1371  */
1372 void
hammer2_inode_repoint(hammer2_inode_t * ip,hammer2_cluster_t * cluster)1373 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
1374 {
1375 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1376 	hammer2_chain_t *ochain;
1377 	hammer2_chain_t *nchain;
1378 	int i;
1379 
1380 	bzero(dropch, sizeof(dropch));
1381 
1382 	/*
1383 	 * Drop any cached (typically data) chains related to this inode
1384 	 */
1385 	hammer2_spin_ex(&ip->cluster_spin);
1386 	for (i = 0; i < ip->ccache_nchains; ++i) {
1387 		dropch[i] = ip->ccache[i].chain;
1388 		ip->ccache[i].flags = 0;
1389 		ip->ccache[i].chain = NULL;
1390 	}
1391 	ip->ccache_nchains = 0;
1392 	hammer2_spin_unex(&ip->cluster_spin);
1393 
1394 	while (--i >= 0) {
1395 		if (dropch[i]) {
1396 			hammer2_chain_drop(dropch[i]);
1397 			dropch[i] = NULL;
1398 		}
1399 	}
1400 
1401 	/*
1402 	 * Replace chains in ip->cluster with chains from cluster and
1403 	 * adjust the focus if necessary.
1404 	 *
1405 	 * NOTE: nchain and/or ochain can be NULL due to gaps
1406 	 *	 in the cluster arrays.
1407 	 */
1408 	hammer2_spin_ex(&ip->cluster_spin);
1409 	for (i = 0; cluster && i < cluster->nchains; ++i) {
1410 		/*
1411 		 * Do not replace elements which are the same.  Also handle
1412 		 * element count discrepancies.
1413 		 */
1414 		nchain = cluster->array[i].chain;
1415 		if (i < ip->cluster.nchains) {
1416 			ochain = ip->cluster.array[i].chain;
1417 			if (ochain == nchain)
1418 				continue;
1419 		} else {
1420 			ochain = NULL;
1421 		}
1422 
1423 		/*
1424 		 * Make adjustments
1425 		 */
1426 		ip->cluster.array[i].chain = nchain;
1427 		ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1428 		ip->cluster.array[i].flags |= cluster->array[i].flags &
1429 					      HAMMER2_CITEM_INVALID;
1430 		if (nchain)
1431 			hammer2_chain_ref(nchain);
1432 		dropch[i] = ochain;
1433 	}
1434 
1435 	/*
1436 	 * Release any left-over chains in ip->cluster.
1437 	 */
1438 	while (i < ip->cluster.nchains) {
1439 		nchain = ip->cluster.array[i].chain;
1440 		if (nchain) {
1441 			ip->cluster.array[i].chain = NULL;
1442 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1443 		}
1444 		dropch[i] = nchain;
1445 		++i;
1446 	}
1447 
1448 	/*
1449 	 * Fixup fields.  Note that the inode-embedded cluster is never
1450 	 * directly locked.
1451 	 */
1452 	if (cluster) {
1453 		ip->cluster.nchains = cluster->nchains;
1454 		ip->cluster.focus = cluster->focus;
1455 		ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1456 	} else {
1457 		ip->cluster.nchains = 0;
1458 		ip->cluster.focus = NULL;
1459 		ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1460 	}
1461 
1462 	hammer2_spin_unex(&ip->cluster_spin);
1463 
1464 	/*
1465 	 * Cleanup outside of spinlock
1466 	 */
1467 	while (--i >= 0) {
1468 		if (dropch[i])
1469 			hammer2_chain_drop(dropch[i]);
1470 	}
1471 }
1472 
1473 /*
1474  * Repoint a single element from the cluster to the ip.  Used by the
1475  * synchronization threads to piecemeal update inodes.  Does not change
1476  * focus and requires inode to be re-locked to clean-up flags (XXX).
1477  */
1478 void
hammer2_inode_repoint_one(hammer2_inode_t * ip,hammer2_cluster_t * cluster,int idx)1479 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1480 			  int idx)
1481 {
1482 	hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1483 	hammer2_chain_t *ochain;
1484 	hammer2_chain_t *nchain;
1485 	int i;
1486 
1487 	/*
1488 	 * Drop any cached (typically data) chains related to this inode
1489 	 */
1490 	hammer2_spin_ex(&ip->cluster_spin);
1491 	for (i = 0; i < ip->ccache_nchains; ++i) {
1492 		dropch[i] = ip->ccache[i].chain;
1493 		ip->ccache[i].chain = NULL;
1494 	}
1495 	ip->ccache_nchains = 0;
1496 	hammer2_spin_unex(&ip->cluster_spin);
1497 
1498 	while (--i >= 0) {
1499 		if (dropch[i])
1500 			hammer2_chain_drop(dropch[i]);
1501 	}
1502 
1503 	/*
1504 	 * Replace inode chain at index
1505 	 */
1506 	hammer2_spin_ex(&ip->cluster_spin);
1507 	KKASSERT(idx < cluster->nchains);
1508 	if (idx < ip->cluster.nchains) {
1509 		ochain = ip->cluster.array[idx].chain;
1510 		nchain = cluster->array[idx].chain;
1511 	} else {
1512 		ochain = NULL;
1513 		nchain = cluster->array[idx].chain;
1514 		for (i = ip->cluster.nchains; i <= idx; ++i) {
1515 			bzero(&ip->cluster.array[i],
1516 			      sizeof(ip->cluster.array[i]));
1517 			ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1518 		}
1519 		ip->cluster.nchains = idx + 1;
1520 	}
1521 	if (ochain != nchain) {
1522 		/*
1523 		 * Make adjustments.
1524 		 */
1525 		ip->cluster.array[idx].chain = nchain;
1526 		ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1527 		ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1528 						HAMMER2_CITEM_INVALID;
1529 	}
1530 	hammer2_spin_unex(&ip->cluster_spin);
1531 	if (ochain != nchain) {
1532 		if (nchain)
1533 			hammer2_chain_ref(nchain);
1534 		if (ochain)
1535 			hammer2_chain_drop(ochain);
1536 	}
1537 }
1538 
1539 hammer2_key_t
hammer2_inode_data_count(const hammer2_inode_t * ip)1540 hammer2_inode_data_count(const hammer2_inode_t *ip)
1541 {
1542 	hammer2_chain_t *chain;
1543 	hammer2_key_t count = 0;
1544 	int i;
1545 
1546 	for (i = 0; i < ip->cluster.nchains; ++i) {
1547 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1548 			if (count < chain->bref.embed.stats.data_count)
1549 				count = chain->bref.embed.stats.data_count;
1550 		}
1551 	}
1552 	return count;
1553 }
1554 
1555 hammer2_key_t
hammer2_inode_inode_count(const hammer2_inode_t * ip)1556 hammer2_inode_inode_count(const hammer2_inode_t *ip)
1557 {
1558 	hammer2_chain_t *chain;
1559 	hammer2_key_t count = 0;
1560 	int i;
1561 
1562 	for (i = 0; i < ip->cluster.nchains; ++i) {
1563 		if ((chain = ip->cluster.array[i].chain) != NULL) {
1564 			if (count < chain->bref.embed.stats.inode_count)
1565 				count = chain->bref.embed.stats.inode_count;
1566 		}
1567 	}
1568 	return count;
1569 }
1570 
1571 /*
1572  * Called with a locked inode to finish unlinking an inode after xop_unlink
1573  * had been run.  This function is responsible for decrementing nlinks.
1574  *
1575  * We don't bother decrementing nlinks if the file is not open and this was
1576  * the last link.
1577  *
1578  * If the inode is a hardlink target it's chain has not yet been deleted,
1579  * otherwise it's chain has been deleted.
1580  *
1581  * If isopen then any prior deletion was not permanent and the inode is
1582  * left intact with nlinks == 0;
1583  */
1584 int
hammer2_inode_unlink_finisher(hammer2_inode_t * ip,struct m_vnode ** vprecyclep)1585 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct m_vnode **vprecyclep)
1586 {
1587 	struct m_vnode *vp;
1588 
1589 	/*
1590 	 * Decrement nlinks.  Catch a bad nlinks count here too (e.g. 0 or
1591 	 * negative), and just assume a transition to 0.
1592 	 */
1593 	if ((int64_t)ip->meta.nlinks <= 1) {
1594 		atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1595 
1596 		/*
1597 		 * Scrap the vnode as quickly as possible.  The vp association
1598 		 * stays intact while we hold the inode locked.  However, vp
1599 		 * can be NULL here.
1600 		 */
1601 		vp = ip->vp;
1602 		cpu_ccfence();
1603 
1604 		/*
1605 		 * If no vp is associated there is no high-level state to
1606 		 * deal with and we can scrap the inode immediately.
1607 		 */
1608 		if (vp == NULL) {
1609 			if ((ip->flags & HAMMER2_INODE_DELETING) == 0) {
1610 				atomic_set_int(&ip->flags,
1611 					       HAMMER2_INODE_DELETING);
1612 				hammer2_inode_delayed_sideq(ip);
1613 			}
1614 			return 0;
1615 		}
1616 
1617 		/*
1618 		 * Because INODE_ISUNLINKED is set with the inode lock
1619 		 * held, the vnode cannot be ripped up from under us.
1620 		 * There may still be refs so knote anyone waiting for
1621 		 * a delete notification.
1622 		 *
1623 		 * The vnode is not necessarily ref'd due to the unlinking
1624 		 * itself, so we have to defer handling to the end of the
1625 		 * VOP, which will then call hammer2_inode_vprecycle().
1626 		 */
1627 		if (vprecyclep) {
1628 			vhold(vp);
1629 			*vprecyclep = vp;
1630 		}
1631 	}
1632 
1633 	/*
1634 	 * Adjust nlinks and retain the inode on the media for now
1635 	 */
1636 	hammer2_inode_modify(ip);
1637 	if ((int64_t)ip->meta.nlinks > 1)
1638 		--ip->meta.nlinks;
1639 	else
1640 		ip->meta.nlinks = 0;
1641 
1642 	return 0;
1643 }
1644 
1645 /*
1646  * Called at the end of a VOP that removes a file with a vnode that
1647  * we want to try to dispose of quickly due to a file deletion.  If
1648  * we don't do this, the vnode can hang around with 0 refs for a very
1649  * long time and prevent reclamation of the underlying file and inode
1650  * (inode remains on-media with nlinks == 0 until the vnode is recycled
1651  * due to random system activity or a umount).
1652  */
1653 void
hammer2_inode_vprecycle(struct m_vnode * vp)1654 hammer2_inode_vprecycle(struct m_vnode *vp)
1655 {
1656 	if (vget(vp, LK_EXCLUSIVE) == 0) {
1657 		vfinalize(vp);
1658 		hammer2_knote(vp, NOTE_DELETE);
1659 		vdrop(vp);
1660 		vput(vp);
1661 	} else {
1662 		vdrop(vp);
1663 	}
1664 }
1665 
1666 
1667 /*
1668  * Mark an inode as being modified, meaning that the caller will modify
1669  * ip->meta.
1670  *
1671  * If a vnode is present we set the vnode dirty and the nominal filesystem
1672  * sync will also handle synchronizing the inode meta-data.  Unless NOSIDEQ
1673  * we must ensure that the inode is on pmp->sideq.
1674  *
1675  * NOTE: We must always queue the inode to the sideq.  This allows H2 to
1676  *	 shortcut vsyncscan() and flush inodes and their related vnodes
1677  *	 in a two stages.  H2 still calls vfsync() for each vnode.
1678  *
1679  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1680  *	 only modifying the in-memory inode.  A modify_tid is synchronized
1681  *	 later when the inode gets flushed.
1682  *
1683  * NOTE: As an exception to the general rule, the inode MAY be locked
1684  *	 shared for this particular call.
1685  */
1686 void
hammer2_inode_modify(hammer2_inode_t * ip)1687 hammer2_inode_modify(hammer2_inode_t *ip)
1688 {
1689 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1690 	if (ip->vp)
1691 		vsetisdirty(ip->vp);
1692 	if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
1693 		hammer2_inode_delayed_sideq(ip);
1694 }
1695 
1696 /*
1697  * Synchronize the inode's frontend state with the chain state prior
1698  * to any explicit flush of the inode or any strategy write call.  This
1699  * does not flush the inode's chain or its sub-topology to media (higher
1700  * level layers are responsible for doing that).
1701  *
1702  * Called with a locked inode inside a normal transaction.
1703  *
1704  * inode must be locked.
1705  */
1706 int
hammer2_inode_chain_sync(hammer2_inode_t * ip)1707 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1708 {
1709 	int error;
1710 
1711 	error = 0;
1712 	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1713 		hammer2_xop_fsync_t *xop;
1714 
1715 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1716 		xop->clear_directdata = 0;
1717 		if (ip->flags & HAMMER2_INODE_RESIZED) {
1718 			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1719 			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1720 				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1721 				xop->clear_directdata = 1;
1722 			}
1723 			xop->osize = ip->osize;
1724 		} else {
1725 			xop->osize = ip->meta.size;	/* safety */
1726 		}
1727 		xop->ipflags = ip->flags;
1728 		xop->meta = ip->meta;
1729 
1730 		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1731 					     HAMMER2_INODE_MODIFIED);
1732 		hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
1733 		error = hammer2_xop_collect(&xop->head, 0);
1734 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1735 		if (error == HAMMER2_ERROR_ENOENT)
1736 			error = 0;
1737 		if (error) {
1738 			kprintf("hammer2: unable to fsync inode %p\n", ip);
1739 			/*
1740 			atomic_set_int(&ip->flags,
1741 				       xop->ipflags & (HAMMER2_INODE_RESIZED |
1742 						       HAMMER2_INODE_MODIFIED));
1743 			*/
1744 			/* XXX return error somehow? */
1745 		}
1746 	}
1747 	return error;
1748 }
1749 
1750 /*
1751  * When an inode is flagged INODE_CREATING its chains have not actually
1752  * been inserting into the on-media tree yet.
1753  */
1754 int
hammer2_inode_chain_ins(hammer2_inode_t * ip)1755 hammer2_inode_chain_ins(hammer2_inode_t *ip)
1756 {
1757 	int error;
1758 
1759 	error = 0;
1760 	if (ip->flags & HAMMER2_INODE_CREATING) {
1761 		hammer2_xop_create_t *xop;
1762 
1763 		atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
1764 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1765 		xop->lhc = ip->meta.inum;
1766 		xop->flags = 0;
1767 		hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
1768 		error = hammer2_xop_collect(&xop->head, 0);
1769 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1770 		if (error == HAMMER2_ERROR_ENOENT)
1771 			error = 0;
1772 		if (error) {
1773 			kprintf("hammer2: backend unable to "
1774 				"insert inode %p %ld\n", ip, (long)ip->meta.inum);
1775 			/* XXX return error somehow? */
1776 		}
1777 	}
1778 	return error;
1779 }
1780 
1781 /*
1782  * When an inode is flagged INODE_DELETING it has been deleted (no directory
1783  * entry or open refs are left, though as an optimization H2 might leave
1784  * nlinks == 1 to avoid unnecessary block updates).  The backend flush then
1785  * needs to actually remove it from the topology.
1786  *
1787  * NOTE: backend flush must still sync and flush the deleted inode to clean
1788  *	 out related chains.
1789  *
1790  * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
1791  *	 to prevent the vnode reclaim code from trying to delete it twice.
1792  */
1793 int
hammer2_inode_chain_des(hammer2_inode_t * ip)1794 hammer2_inode_chain_des(hammer2_inode_t *ip)
1795 {
1796 	int error;
1797 
1798 	error = 0;
1799 	if (ip->flags & HAMMER2_INODE_DELETING) {
1800 		hammer2_xop_destroy_t *xop;
1801 
1802 		atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING |
1803 					     HAMMER2_INODE_ISUNLINKED);
1804 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1805 		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1806 		error = hammer2_xop_collect(&xop->head, 0);
1807 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1808 
1809 		if (error == HAMMER2_ERROR_ENOENT)
1810 			error = 0;
1811 		if (error) {
1812 			kprintf("hammer2: backend unable to "
1813 				"delete inode %p %ld\n", ip, (long)ip->meta.inum);
1814 			/* XXX return error somehow? */
1815 		}
1816 	}
1817 	return error;
1818 }
1819 
1820 /*
1821  * Flushes the inode's chain and its sub-topology to media.  Interlocks
1822  * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush.  Any strategy
1823  * function creating or modifying a chain under this inode will re-set the
1824  * flag.
1825  *
1826  * inode must be locked.
1827  */
1828 int
hammer2_inode_chain_flush(hammer2_inode_t * ip,int flags)1829 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
1830 {
1831 	hammer2_xop_flush_t *xop;
1832 	int error;
1833 
1834 	atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1835 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
1836 	hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
1837 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1838 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1839 	if (error == HAMMER2_ERROR_ENOENT)
1840 		error = 0;
1841 
1842 	return error;
1843 }
1844 
1845 int
vflush(struct mount * mp,int rootrefs,int flags)1846 vflush(struct mount *mp, int rootrefs, int flags)
1847 {
1848 	hammer2_pfs_t *pmp = MPTOPMP(mp);
1849 	struct hammer2_inode *ip, *tmp;
1850 	struct m_vnode *vp;
1851 	hammer2_key_t count_before, count_after, count_recq;
1852 	hammer2_inum_hash_t *hash;
1853 	int i;
1854 
1855 	printf("%s: total chain %ld\n", __func__, hammer2_chain_allocs);
1856 	printf("%s: total dio %d\n", __func__, hammer2_dio_count);
1857 
1858 	for (i = 0; i < HAMMER2_INUMHASH_SIZE; ++i) {
1859 		hash = &pmp->inumhash[i];
1860 		hammer2_spin_ex(&hash->spin);
1861 		count_before = 0;
1862 		for (ip = hash->base; ip; ip = ip->next)
1863 			count_before++;
1864 
1865 		for (ip = hash->base; ip;) {
1866 			tmp = ip->next;
1867 			vp = ip->vp;
1868 			assert(vp);
1869 			if (!vp->v_vflushed) {
1870 				/*
1871 				 * Not all inodes are modified and ref'd,
1872 				 * so ip->refs requirement here is the initial 1.
1873 				 */
1874 				assert(ip->refs > 0);
1875 				hammer2_inode_drop(ip);
1876 				vp->v_vflushed = 1;
1877 			}
1878 			ip = tmp;
1879 		}
1880 
1881 		count_after = 0;
1882 		for (ip = hash->base; ip; ip = ip->next)
1883 			count_after++;
1884 		hammer2_spin_unex(&hash->spin);
1885 	}
1886 
1887 	printf("%s: total inode %jd -> %jd\n",
1888 	    __func__, (intmax_t)count_before, (intmax_t)count_after);
1889 	assert(count_before >= count_after);
1890 
1891 	count_recq = 0;
1892 	TAILQ_FOREACH(ip, &pmp->recq, recq_entry)
1893 		count_recq++;
1894 	if (count_recq)
1895 		printf("%s: %jd inode in reclaim queue\n",
1896 		    __func__, (intmax_t)count_recq);
1897 
1898 	return 0;
1899 }
1900