xref: /dragonfly/sys/vfs/hammer/hammer_inode.c (revision 8e1c6f81)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.110 2008/08/09 07:04:16 dillon Exp $
35  */
36 
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41 
42 static int	hammer_unload_inode(struct hammer_inode *ip);
43 static void	hammer_free_inode(hammer_inode_t ip);
44 static void	hammer_flush_inode_core(hammer_inode_t ip,
45 					hammer_flush_group_t flg, int flags);
46 static int	hammer_setup_child_callback(hammer_record_t rec, void *data);
47 #if 0
48 static int	hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
49 #endif
50 static int	hammer_setup_parent_inodes(hammer_inode_t ip,
51 					hammer_flush_group_t flg);
52 static int	hammer_setup_parent_inodes_helper(hammer_record_t record,
53 					hammer_flush_group_t flg);
54 static void	hammer_inode_wakereclaims(hammer_inode_t ip);
55 
56 #ifdef DEBUG_TRUNCATE
57 extern struct hammer_inode *HammerTruncIp;
58 #endif
59 
60 /*
61  * RB-Tree support for inode structures
62  */
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65 {
66 	if (ip1->obj_localization < ip2->obj_localization)
67 		return(-1);
68 	if (ip1->obj_localization > ip2->obj_localization)
69 		return(1);
70 	if (ip1->obj_id < ip2->obj_id)
71 		return(-1);
72 	if (ip1->obj_id > ip2->obj_id)
73 		return(1);
74 	if (ip1->obj_asof < ip2->obj_asof)
75 		return(-1);
76 	if (ip1->obj_asof > ip2->obj_asof)
77 		return(1);
78 	return(0);
79 }
80 
81 /*
82  * RB-Tree support for inode structures / special LOOKUP_INFO
83  */
84 static int
85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
86 {
87 	if (info->obj_localization < ip->obj_localization)
88 		return(-1);
89 	if (info->obj_localization > ip->obj_localization)
90 		return(1);
91 	if (info->obj_id < ip->obj_id)
92 		return(-1);
93 	if (info->obj_id > ip->obj_id)
94 		return(1);
95 	if (info->obj_asof < ip->obj_asof)
96 		return(-1);
97 	if (info->obj_asof > ip->obj_asof)
98 		return(1);
99 	return(0);
100 }
101 
102 /*
103  * Used by hammer_scan_inode_snapshots() to locate all of an object's
104  * snapshots.  Note that the asof field is not tested, which we can get
105  * away with because it is the lowest-priority field.
106  */
107 static int
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
109 {
110 	hammer_inode_info_t info = data;
111 
112 	if (ip->obj_localization > info->obj_localization)
113 		return(1);
114 	if (ip->obj_localization < info->obj_localization)
115 		return(-1);
116 	if (ip->obj_id > info->obj_id)
117 		return(1);
118 	if (ip->obj_id < info->obj_id)
119 		return(-1);
120 	return(0);
121 }
122 
123 /*
124  * Used by hammer_unload_pseudofs() to locate all inodes associated with
125  * a particular PFS.
126  */
127 static int
128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
129 {
130 	u_int32_t localization = *(u_int32_t *)data;
131 	if (ip->obj_localization > localization)
132 		return(1);
133 	if (ip->obj_localization < localization)
134 		return(-1);
135 	return(0);
136 }
137 
138 /*
139  * RB-Tree support for pseudofs structures
140  */
141 static int
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
143 {
144 	if (p1->localization < p2->localization)
145 		return(-1);
146 	if (p1->localization > p2->localization)
147 		return(1);
148 	return(0);
149 }
150 
151 
152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
154 		hammer_inode_info_cmp, hammer_inode_info_t);
155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
156              hammer_pfs_rb_compare, u_int32_t, localization);
157 
158 /*
159  * The kernel is not actively referencing this vnode but is still holding
160  * it cached.
161  *
162  * This is called from the frontend.
163  */
164 int
165 hammer_vop_inactive(struct vop_inactive_args *ap)
166 {
167 	struct hammer_inode *ip = VTOI(ap->a_vp);
168 
169 	/*
170 	 * Degenerate case
171 	 */
172 	if (ip == NULL) {
173 		vrecycle(ap->a_vp);
174 		return(0);
175 	}
176 
177 	/*
178 	 * If the inode no longer has visibility in the filesystem try to
179 	 * recycle it immediately, even if the inode is dirty.  Recycling
180 	 * it quickly allows the system to reclaim buffer cache and VM
181 	 * resources which can matter a lot in a heavily loaded system.
182 	 *
183 	 * This can deadlock in vfsync() if we aren't careful.
184 	 *
185 	 * Do not queue the inode to the flusher if we still have visibility,
186 	 * otherwise namespace calls such as chmod will unnecessarily generate
187 	 * multiple inode updates.
188 	 */
189 	hammer_inode_unloadable_check(ip, 0);
190 	if (ip->ino_data.nlinks == 0) {
191 		if (ip->flags & HAMMER_INODE_MODMASK)
192 			hammer_flush_inode(ip, 0);
193 		vrecycle(ap->a_vp);
194 	}
195 	return(0);
196 }
197 
198 /*
199  * Release the vnode association.  This is typically (but not always)
200  * the last reference on the inode.
201  *
202  * Once the association is lost we are on our own with regards to
203  * flushing the inode.
204  */
205 int
206 hammer_vop_reclaim(struct vop_reclaim_args *ap)
207 {
208 	struct hammer_inode *ip;
209 	hammer_mount_t hmp;
210 	struct vnode *vp;
211 
212 	vp = ap->a_vp;
213 
214 	if ((ip = vp->v_data) != NULL) {
215 		hmp = ip->hmp;
216 		vp->v_data = NULL;
217 		ip->vp = NULL;
218 
219 		if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
220 			++hammer_count_reclaiming;
221 			++hmp->inode_reclaims;
222 			ip->flags |= HAMMER_INODE_RECLAIM;
223 
224 			/*
225 			 * Poke the flusher.  If we don't do this programs
226 			 * will start to stall on the reclaiming count.
227 			 */
228 			if (hmp->inode_reclaims > HAMMER_RECLAIM_FLUSH &&
229 			   (hmp->inode_reclaims & 255) == 0) {
230 			       hammer_flusher_async(hmp, NULL);
231 			}
232 		}
233 		hammer_rel_inode(ip, 1);
234 	}
235 	return(0);
236 }
237 
238 /*
239  * Return a locked vnode for the specified inode.  The inode must be
240  * referenced but NOT LOCKED on entry and will remain referenced on
241  * return.
242  *
243  * Called from the frontend.
244  */
245 int
246 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
247 {
248 	hammer_mount_t hmp;
249 	struct vnode *vp;
250 	int error = 0;
251 	u_int8_t obj_type;
252 
253 	hmp = ip->hmp;
254 
255 	for (;;) {
256 		if ((vp = ip->vp) == NULL) {
257 			error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
258 			if (error)
259 				break;
260 			hammer_lock_ex(&ip->lock);
261 			if (ip->vp != NULL) {
262 				hammer_unlock(&ip->lock);
263 				vp->v_type = VBAD;
264 				vx_put(vp);
265 				continue;
266 			}
267 			hammer_ref(&ip->lock);
268 			vp = *vpp;
269 			ip->vp = vp;
270 
271 			obj_type = ip->ino_data.obj_type;
272 			vp->v_type = hammer_get_vnode_type(obj_type);
273 
274 			hammer_inode_wakereclaims(ip);
275 
276 			switch(ip->ino_data.obj_type) {
277 			case HAMMER_OBJTYPE_CDEV:
278 			case HAMMER_OBJTYPE_BDEV:
279 				vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
280 				addaliasu(vp, ip->ino_data.rmajor,
281 					  ip->ino_data.rminor);
282 				break;
283 			case HAMMER_OBJTYPE_FIFO:
284 				vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
285 				break;
286 			default:
287 				break;
288 			}
289 
290 			/*
291 			 * Only mark as the root vnode if the ip is not
292 			 * historical, otherwise the VFS cache will get
293 			 * confused.  The other half of the special handling
294 			 * is in hammer_vop_nlookupdotdot().
295 			 *
296 			 * Pseudo-filesystem roots also do not count.
297 			 */
298 			if (ip->obj_id == HAMMER_OBJID_ROOT &&
299 			    ip->obj_asof == hmp->asof &&
300 			    ip->obj_localization == 0) {
301 				vp->v_flag |= VROOT;
302 			}
303 
304 			vp->v_data = (void *)ip;
305 			/* vnode locked by getnewvnode() */
306 			/* make related vnode dirty if inode dirty? */
307 			hammer_unlock(&ip->lock);
308 			if (vp->v_type == VREG)
309 				vinitvmio(vp, ip->ino_data.size);
310 			break;
311 		}
312 
313 		/*
314 		 * loop if the vget fails (aka races), or if the vp
315 		 * no longer matches ip->vp.
316 		 */
317 		if (vget(vp, LK_EXCLUSIVE) == 0) {
318 			if (vp == ip->vp)
319 				break;
320 			vput(vp);
321 		}
322 	}
323 	*vpp = vp;
324 	return(error);
325 }
326 
327 /*
328  * Locate all copies of the inode for obj_id compatible with the specified
329  * asof, reference, and issue the related call-back.  This routine is used
330  * for direct-io invalidation and does not create any new inodes.
331  */
332 void
333 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
334 		            int (*callback)(hammer_inode_t ip, void *data),
335 			    void *data)
336 {
337 	hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
338 				   hammer_inode_info_cmp_all_history,
339 				   callback, iinfo);
340 }
341 
342 /*
343  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
344  * do not attach or detach the related vnode (use hammer_get_vnode() for
345  * that).
346  *
347  * The flags argument is only applied for newly created inodes, and only
348  * certain flags are inherited.
349  *
350  * Called from the frontend.
351  */
352 struct hammer_inode *
353 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
354 		 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
355 		 int flags, int *errorp)
356 {
357 	hammer_mount_t hmp = trans->hmp;
358 	struct hammer_inode_info iinfo;
359 	struct hammer_cursor cursor;
360 	struct hammer_inode *ip;
361 
362 
363 	/*
364 	 * Determine if we already have an inode cached.  If we do then
365 	 * we are golden.
366 	 */
367 	iinfo.obj_id = obj_id;
368 	iinfo.obj_asof = asof;
369 	iinfo.obj_localization = localization;
370 loop:
371 	ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
372 	if (ip) {
373 		hammer_ref(&ip->lock);
374 		*errorp = 0;
375 		return(ip);
376 	}
377 
378 	/*
379 	 * Allocate a new inode structure and deal with races later.
380 	 */
381 	ip = kmalloc(sizeof(*ip), M_HAMMER_INO, M_WAITOK|M_ZERO);
382 	++hammer_count_inodes;
383 	++hmp->count_inodes;
384 	ip->obj_id = obj_id;
385 	ip->obj_asof = iinfo.obj_asof;
386 	ip->obj_localization = localization;
387 	ip->hmp = hmp;
388 	ip->flags = flags & HAMMER_INODE_RO;
389 	ip->cache[0].ip = ip;
390 	ip->cache[1].ip = ip;
391 	if (hmp->ronly)
392 		ip->flags |= HAMMER_INODE_RO;
393 	ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
394 		0x7FFFFFFFFFFFFFFFLL;
395 	RB_INIT(&ip->rec_tree);
396 	TAILQ_INIT(&ip->target_list);
397 	hammer_ref(&ip->lock);
398 
399 	/*
400 	 * Locate the on-disk inode.  If this is a PFS root we always
401 	 * access the current version of the root inode and (if it is not
402 	 * a master) always access information under it with a snapshot
403 	 * TID.
404 	 */
405 retry:
406 	hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
407 	cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
408 	cursor.key_beg.obj_id = ip->obj_id;
409 	cursor.key_beg.key = 0;
410 	cursor.key_beg.create_tid = 0;
411 	cursor.key_beg.delete_tid = 0;
412 	cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
413 	cursor.key_beg.obj_type = 0;
414 
415 	cursor.asof = iinfo.obj_asof;
416 	cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
417 		       HAMMER_CURSOR_ASOF;
418 
419 	*errorp = hammer_btree_lookup(&cursor);
420 	if (*errorp == EDEADLK) {
421 		hammer_done_cursor(&cursor);
422 		goto retry;
423 	}
424 
425 	/*
426 	 * On success the B-Tree lookup will hold the appropriate
427 	 * buffer cache buffers and provide a pointer to the requested
428 	 * information.  Copy the information to the in-memory inode
429 	 * and cache the B-Tree node to improve future operations.
430 	 */
431 	if (*errorp == 0) {
432 		ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
433 		ip->ino_data = cursor.data->inode;
434 
435 		/*
436 		 * cache[0] tries to cache the location of the object inode.
437 		 * The assumption is that it is near the directory inode.
438 		 *
439 		 * cache[1] tries to cache the location of the object data.
440 		 * The assumption is that it is near the directory data.
441 		 */
442 		hammer_cache_node(&ip->cache[0], cursor.node);
443 		if (dip && dip->cache[1].node)
444 			hammer_cache_node(&ip->cache[1], dip->cache[1].node);
445 
446 		/*
447 		 * The file should not contain any data past the file size
448 		 * stored in the inode.  Setting save_trunc_off to the
449 		 * file size instead of max reduces B-Tree lookup overheads
450 		 * on append by allowing the flusher to avoid checking for
451 		 * record overwrites.
452 		 */
453 		ip->save_trunc_off = ip->ino_data.size;
454 
455 		/*
456 		 * Locate and assign the pseudofs management structure to
457 		 * the inode.
458 		 */
459 		if (dip && dip->obj_localization == ip->obj_localization) {
460 			ip->pfsm = dip->pfsm;
461 			hammer_ref(&ip->pfsm->lock);
462 		} else {
463 			ip->pfsm = hammer_load_pseudofs(trans,
464 							ip->obj_localization,
465 							errorp);
466 			*errorp = 0;	/* ignore ENOENT */
467 		}
468 	}
469 
470 	/*
471 	 * The inode is placed on the red-black tree and will be synced to
472 	 * the media when flushed or by the filesystem sync.  If this races
473 	 * another instantiation/lookup the insertion will fail.
474 	 */
475 	if (*errorp == 0) {
476 		if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
477 			hammer_free_inode(ip);
478 			hammer_done_cursor(&cursor);
479 			goto loop;
480 		}
481 		ip->flags |= HAMMER_INODE_ONDISK;
482 	} else {
483 		if (ip->flags & HAMMER_INODE_RSV_INODES) {
484 			ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
485 			--hmp->rsv_inodes;
486 		}
487 
488 		hammer_free_inode(ip);
489 		ip = NULL;
490 	}
491 	hammer_done_cursor(&cursor);
492 	return (ip);
493 }
494 
495 /*
496  * Create a new filesystem object, returning the inode in *ipp.  The
497  * returned inode will be referenced.  The inode is created in-memory.
498  *
499  * If pfsm is non-NULL the caller wishes to create the root inode for
500  * a master PFS.
501  */
502 int
503 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
504 		    struct ucred *cred, hammer_inode_t dip,
505 		    hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
506 {
507 	hammer_mount_t hmp;
508 	hammer_inode_t ip;
509 	uid_t xuid;
510 	int error;
511 
512 	hmp = trans->hmp;
513 
514 	ip = kmalloc(sizeof(*ip), M_HAMMER_INO, M_WAITOK|M_ZERO);
515 	++hammer_count_inodes;
516 	++hmp->count_inodes;
517 
518 	if (pfsm) {
519 		KKASSERT(pfsm->localization != 0);
520 		ip->obj_id = HAMMER_OBJID_ROOT;
521 		ip->obj_localization = pfsm->localization;
522 	} else {
523 		KKASSERT(dip != NULL);
524 		ip->obj_id = hammer_alloc_objid(hmp, dip);
525 		ip->obj_localization = dip->obj_localization;
526 	}
527 
528 	KKASSERT(ip->obj_id != 0);
529 	ip->obj_asof = hmp->asof;
530 	ip->hmp = hmp;
531 	ip->flush_state = HAMMER_FST_IDLE;
532 	ip->flags = HAMMER_INODE_DDIRTY |
533 		    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
534 	ip->cache[0].ip = ip;
535 	ip->cache[1].ip = ip;
536 
537 	ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
538 	/* ip->save_trunc_off = 0; (already zero) */
539 	RB_INIT(&ip->rec_tree);
540 	TAILQ_INIT(&ip->target_list);
541 
542 	ip->ino_data.atime = trans->time;
543 	ip->ino_data.mtime = trans->time;
544 	ip->ino_data.size = 0;
545 	ip->ino_data.nlinks = 0;
546 
547 	/*
548 	 * A nohistory designator on the parent directory is inherited by
549 	 * the child.  We will do this even for pseudo-fs creation... the
550 	 * sysad can turn it off.
551 	 */
552 	if (dip) {
553 		ip->ino_data.uflags = dip->ino_data.uflags &
554 				      (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
555 	}
556 
557 	ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
558 	ip->ino_leaf.base.localization = ip->obj_localization +
559 					 HAMMER_LOCALIZE_INODE;
560 	ip->ino_leaf.base.obj_id = ip->obj_id;
561 	ip->ino_leaf.base.key = 0;
562 	ip->ino_leaf.base.create_tid = 0;
563 	ip->ino_leaf.base.delete_tid = 0;
564 	ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
565 	ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
566 
567 	ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
568 	ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
569 	ip->ino_data.mode = vap->va_mode;
570 	ip->ino_data.ctime = trans->time;
571 
572 	/*
573 	 * Setup the ".." pointer.  This only needs to be done for directories
574 	 * but we do it for all objects as a recovery aid.
575 	 */
576 	if (dip)
577 		ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
578 #if 0
579 	/*
580 	 * The parent_obj_localization field only applies to pseudo-fs roots.
581 	 * XXX this is no longer applicable, PFSs are no longer directly
582 	 * tied into the parent's directory structure.
583 	 */
584 	if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
585 	    ip->obj_id == HAMMER_OBJID_ROOT) {
586 		ip->ino_data.ext.obj.parent_obj_localization =
587 						dip->obj_localization;
588 	}
589 #endif
590 
591 	switch(ip->ino_leaf.base.obj_type) {
592 	case HAMMER_OBJTYPE_CDEV:
593 	case HAMMER_OBJTYPE_BDEV:
594 		ip->ino_data.rmajor = vap->va_rmajor;
595 		ip->ino_data.rminor = vap->va_rminor;
596 		break;
597 	default:
598 		break;
599 	}
600 
601 	/*
602 	 * Calculate default uid/gid and overwrite with information from
603 	 * the vap.
604 	 */
605 	if (dip) {
606 		xuid = hammer_to_unix_xid(&dip->ino_data.uid);
607 		xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
608 					     xuid, cred, &vap->va_mode);
609 	} else {
610 		xuid = 0;
611 	}
612 	ip->ino_data.mode = vap->va_mode;
613 
614 	if (vap->va_vaflags & VA_UID_UUID_VALID)
615 		ip->ino_data.uid = vap->va_uid_uuid;
616 	else if (vap->va_uid != (uid_t)VNOVAL)
617 		hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
618 	else
619 		hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
620 
621 	if (vap->va_vaflags & VA_GID_UUID_VALID)
622 		ip->ino_data.gid = vap->va_gid_uuid;
623 	else if (vap->va_gid != (gid_t)VNOVAL)
624 		hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
625 	else if (dip)
626 		ip->ino_data.gid = dip->ino_data.gid;
627 
628 	hammer_ref(&ip->lock);
629 
630 	if (pfsm) {
631 		ip->pfsm = pfsm;
632 		hammer_ref(&pfsm->lock);
633 		error = 0;
634 	} else if (dip->obj_localization == ip->obj_localization) {
635 		ip->pfsm = dip->pfsm;
636 		hammer_ref(&ip->pfsm->lock);
637 		error = 0;
638 	} else {
639 		ip->pfsm = hammer_load_pseudofs(trans,
640 						ip->obj_localization,
641 						&error);
642 		error = 0;	/* ignore ENOENT */
643 	}
644 
645 	if (error) {
646 		hammer_free_inode(ip);
647 		ip = NULL;
648 	} else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
649 		panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
650 		/* not reached */
651 		hammer_free_inode(ip);
652 	}
653 	*ipp = ip;
654 	return(error);
655 }
656 
657 /*
658  * Final cleanup / freeing of an inode structure
659  */
660 static void
661 hammer_free_inode(hammer_inode_t ip)
662 {
663 	KKASSERT(ip->lock.refs == 1);
664 	hammer_uncache_node(&ip->cache[0]);
665 	hammer_uncache_node(&ip->cache[1]);
666 	hammer_inode_wakereclaims(ip);
667 	if (ip->objid_cache)
668 		hammer_clear_objid(ip);
669 	--hammer_count_inodes;
670 	--ip->hmp->count_inodes;
671 	if (ip->pfsm) {
672 		hammer_rel_pseudofs(ip->hmp, ip->pfsm);
673 		ip->pfsm = NULL;
674 	}
675 	kfree(ip, M_HAMMER_INO);
676 	ip = NULL;
677 }
678 
679 /*
680  * Retrieve pseudo-fs data.  NULL will never be returned.
681  *
682  * If an error occurs *errorp will be set and a default template is returned,
683  * otherwise *errorp is set to 0.  Typically when an error occurs it will
684  * be ENOENT.
685  */
686 hammer_pseudofs_inmem_t
687 hammer_load_pseudofs(hammer_transaction_t trans,
688 		     u_int32_t localization, int *errorp)
689 {
690 	hammer_mount_t hmp = trans->hmp;
691 	hammer_inode_t ip;
692 	hammer_pseudofs_inmem_t pfsm;
693 	struct hammer_cursor cursor;
694 	int bytes;
695 
696 retry:
697 	pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
698 	if (pfsm) {
699 		hammer_ref(&pfsm->lock);
700 		*errorp = 0;
701 		return(pfsm);
702 	}
703 
704 	/*
705 	 * PFS records are stored in the root inode (not the PFS root inode,
706 	 * but the real root).  Avoid an infinite recursion if loading
707 	 * the PFS for the real root.
708 	 */
709 	if (localization) {
710 		ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
711 				      HAMMER_MAX_TID,
712 				      HAMMER_DEF_LOCALIZATION, 0, errorp);
713 	} else {
714 		ip = NULL;
715 	}
716 
717 	pfsm = kmalloc(sizeof(*pfsm), M_HAMMER, M_WAITOK | M_ZERO);
718 	pfsm->localization = localization;
719 	pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
720 	pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
721 
722 	hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
723 	cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
724 				      HAMMER_LOCALIZE_MISC;
725 	cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
726 	cursor.key_beg.create_tid = 0;
727 	cursor.key_beg.delete_tid = 0;
728 	cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
729 	cursor.key_beg.obj_type = 0;
730 	cursor.key_beg.key = localization;
731 	cursor.asof = HAMMER_MAX_TID;
732 	cursor.flags |= HAMMER_CURSOR_ASOF;
733 
734 	if (ip)
735 		*errorp = hammer_ip_lookup(&cursor);
736 	else
737 		*errorp = hammer_btree_lookup(&cursor);
738 	if (*errorp == 0) {
739 		*errorp = hammer_ip_resolve_data(&cursor);
740 		if (*errorp == 0) {
741 			if (cursor.data->pfsd.mirror_flags &
742 			    HAMMER_PFSD_DELETED) {
743 				*errorp = ENOENT;
744 			} else {
745 				bytes = cursor.leaf->data_len;
746 				if (bytes > sizeof(pfsm->pfsd))
747 					bytes = sizeof(pfsm->pfsd);
748 				bcopy(cursor.data, &pfsm->pfsd, bytes);
749 			}
750 		}
751 	}
752 	hammer_done_cursor(&cursor);
753 
754 	pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
755 	hammer_ref(&pfsm->lock);
756 	if (ip)
757 		hammer_rel_inode(ip, 0);
758 	if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
759 		kfree(pfsm, M_HAMMER);
760 		goto retry;
761 	}
762 	return(pfsm);
763 }
764 
765 /*
766  * Store pseudo-fs data.  The backend will automatically delete any prior
767  * on-disk pseudo-fs data but we have to delete in-memory versions.
768  */
769 int
770 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
771 {
772 	struct hammer_cursor cursor;
773 	hammer_record_t record;
774 	hammer_inode_t ip;
775 	int error;
776 
777 	ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
778 			      HAMMER_DEF_LOCALIZATION, 0, &error);
779 retry:
780 	pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
781 	hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
782 	cursor.key_beg.localization = ip->obj_localization +
783 				      HAMMER_LOCALIZE_MISC;
784 	cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
785 	cursor.key_beg.create_tid = 0;
786 	cursor.key_beg.delete_tid = 0;
787 	cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
788 	cursor.key_beg.obj_type = 0;
789 	cursor.key_beg.key = pfsm->localization;
790 	cursor.asof = HAMMER_MAX_TID;
791 	cursor.flags |= HAMMER_CURSOR_ASOF;
792 
793 	error = hammer_ip_lookup(&cursor);
794 	if (error == 0 && hammer_cursor_inmem(&cursor)) {
795 		record = cursor.iprec;
796 		if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
797 			KKASSERT(cursor.deadlk_rec == NULL);
798 			hammer_ref(&record->lock);
799 			cursor.deadlk_rec = record;
800 			error = EDEADLK;
801 		} else {
802 			record->flags |= HAMMER_RECF_DELETED_FE;
803 			error = 0;
804 		}
805 	}
806 	if (error == 0 || error == ENOENT) {
807 		record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
808 		record->type = HAMMER_MEM_RECORD_GENERAL;
809 
810 		record->leaf.base.localization = ip->obj_localization +
811 						 HAMMER_LOCALIZE_MISC;
812 		record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
813 		record->leaf.base.key = pfsm->localization;
814 		record->leaf.data_len = sizeof(pfsm->pfsd);
815 		bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
816 		error = hammer_ip_add_record(trans, record);
817 	}
818 	hammer_done_cursor(&cursor);
819 	if (error == EDEADLK)
820 		goto retry;
821 	hammer_rel_inode(ip, 0);
822 	return(error);
823 }
824 
825 /*
826  * Create a root directory for a PFS if one does not alredy exist.
827  *
828  * The PFS root stands alone so we must also bump the nlinks count
829  * to prevent it from being destroyed on release.
830  */
831 int
832 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
833 		       hammer_pseudofs_inmem_t pfsm)
834 {
835 	hammer_inode_t ip;
836 	struct vattr vap;
837 	int error;
838 
839 	ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
840 			      pfsm->localization, 0, &error);
841 	if (ip == NULL) {
842 		vattr_null(&vap);
843 		vap.va_mode = 0755;
844 		vap.va_type = VDIR;
845 		error = hammer_create_inode(trans, &vap, cred, NULL, pfsm, &ip);
846 		if (error == 0) {
847 			++ip->ino_data.nlinks;
848 			hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
849 		}
850 	}
851 	if (ip)
852 		hammer_rel_inode(ip, 0);
853 	return(error);
854 }
855 
856 /*
857  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
858  * if we are unable to disassociate all the inodes.
859  */
860 static
861 int
862 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
863 {
864 	int res;
865 
866 	hammer_ref(&ip->lock);
867 	if (ip->lock.refs == 2 && ip->vp)
868 		vclean_unlocked(ip->vp);
869 	if (ip->lock.refs == 1 && ip->vp == NULL)
870 		res = 0;
871 	else
872 		res = -1;	/* stop, someone is using the inode */
873 	hammer_rel_inode(ip, 0);
874 	return(res);
875 }
876 
877 int
878 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
879 {
880 	int res;
881 	int try;
882 
883 	for (try = res = 0; try < 4; ++try) {
884 		res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
885 					   hammer_inode_pfs_cmp,
886 					   hammer_unload_pseudofs_callback,
887 					   &localization);
888 		if (res == 0 && try > 1)
889 			break;
890 		hammer_flusher_sync(trans->hmp);
891 	}
892 	if (res != 0)
893 		res = ENOTEMPTY;
894 	return(res);
895 }
896 
897 
898 /*
899  * Release a reference on a PFS
900  */
901 void
902 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
903 {
904 	hammer_unref(&pfsm->lock);
905 	if (pfsm->lock.refs == 0) {
906 		RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
907 		kfree(pfsm, M_HAMMER);
908 	}
909 }
910 
911 /*
912  * Called by hammer_sync_inode().
913  */
914 static int
915 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
916 {
917 	hammer_transaction_t trans = cursor->trans;
918 	hammer_record_t record;
919 	int error;
920 	int redirty;
921 
922 retry:
923 	error = 0;
924 
925 	/*
926 	 * If the inode has a presence on-disk then locate it and mark
927 	 * it deleted, setting DELONDISK.
928 	 *
929 	 * The record may or may not be physically deleted, depending on
930 	 * the retention policy.
931 	 */
932 	if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
933 	    HAMMER_INODE_ONDISK) {
934 		hammer_normalize_cursor(cursor);
935 		cursor->key_beg.localization = ip->obj_localization +
936 					       HAMMER_LOCALIZE_INODE;
937 		cursor->key_beg.obj_id = ip->obj_id;
938 		cursor->key_beg.key = 0;
939 		cursor->key_beg.create_tid = 0;
940 		cursor->key_beg.delete_tid = 0;
941 		cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
942 		cursor->key_beg.obj_type = 0;
943 		cursor->asof = ip->obj_asof;
944 		cursor->flags &= ~HAMMER_CURSOR_INITMASK;
945 		cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
946 		cursor->flags |= HAMMER_CURSOR_BACKEND;
947 
948 		error = hammer_btree_lookup(cursor);
949 		if (hammer_debug_inode)
950 			kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
951 
952 		if (error == 0) {
953 			error = hammer_ip_delete_record(cursor, ip, trans->tid);
954 			if (hammer_debug_inode)
955 				kprintf(" error %d\n", error);
956 			if (error == 0) {
957 				ip->flags |= HAMMER_INODE_DELONDISK;
958 			}
959 			if (cursor->node)
960 				hammer_cache_node(&ip->cache[0], cursor->node);
961 		}
962 		if (error == EDEADLK) {
963 			hammer_done_cursor(cursor);
964 			error = hammer_init_cursor(trans, cursor,
965 						   &ip->cache[0], ip);
966 			if (hammer_debug_inode)
967 				kprintf("IPDED %p %d\n", ip, error);
968 			if (error == 0)
969 				goto retry;
970 		}
971 	}
972 
973 	/*
974 	 * Ok, write out the initial record or a new record (after deleting
975 	 * the old one), unless the DELETED flag is set.  This routine will
976 	 * clear DELONDISK if it writes out a record.
977 	 *
978 	 * Update our inode statistics if this is the first application of
979 	 * the inode on-disk.
980 	 */
981 	if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
982 		/*
983 		 * Generate a record and write it to the media.  We clean-up
984 		 * the state before releasing so we do not have to set-up
985 		 * a flush_group.
986 		 */
987 		record = hammer_alloc_mem_record(ip, 0);
988 		record->type = HAMMER_MEM_RECORD_INODE;
989 		record->flush_state = HAMMER_FST_FLUSH;
990 		record->leaf = ip->sync_ino_leaf;
991 		record->leaf.base.create_tid = trans->tid;
992 		record->leaf.data_len = sizeof(ip->sync_ino_data);
993 		record->leaf.create_ts = trans->time32;
994 		record->data = (void *)&ip->sync_ino_data;
995 		record->flags |= HAMMER_RECF_INTERLOCK_BE;
996 
997 		/*
998 		 * If this flag is set we cannot sync the new file size
999 		 * because we haven't finished related truncations.  The
1000 		 * inode will be flushed in another flush group to finish
1001 		 * the job.
1002 		 */
1003 		if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1004 		    ip->sync_ino_data.size != ip->ino_data.size) {
1005 			redirty = 1;
1006 			ip->sync_ino_data.size = ip->ino_data.size;
1007 		} else {
1008 			redirty = 0;
1009 		}
1010 
1011 		for (;;) {
1012 			error = hammer_ip_sync_record_cursor(cursor, record);
1013 			if (hammer_debug_inode)
1014 				kprintf("GENREC %p rec %08x %d\n",
1015 					ip, record->flags, error);
1016 			if (error != EDEADLK)
1017 				break;
1018 			hammer_done_cursor(cursor);
1019 			error = hammer_init_cursor(trans, cursor,
1020 						   &ip->cache[0], ip);
1021 			if (hammer_debug_inode)
1022 				kprintf("GENREC reinit %d\n", error);
1023 			if (error)
1024 				break;
1025 		}
1026 
1027 		/*
1028 		 * The record isn't managed by the inode's record tree,
1029 		 * destroy it whether we succeed or fail.
1030 		 */
1031 		record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1032 		record->flags |= HAMMER_RECF_DELETED_FE | HAMMER_RECF_COMMITTED;
1033 		record->flush_state = HAMMER_FST_IDLE;
1034 		hammer_rel_mem_record(record);
1035 
1036 		/*
1037 		 * Finish up.
1038 		 */
1039 		if (error == 0) {
1040 			if (hammer_debug_inode)
1041 				kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1042 			ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1043 					    HAMMER_INODE_ATIME |
1044 					    HAMMER_INODE_MTIME);
1045 			ip->flags &= ~HAMMER_INODE_DELONDISK;
1046 			if (redirty)
1047 				ip->sync_flags |= HAMMER_INODE_DDIRTY;
1048 
1049 			/*
1050 			 * Root volume count of inodes
1051 			 */
1052 			hammer_sync_lock_sh(trans);
1053 			if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1054 				hammer_modify_volume_field(trans,
1055 							   trans->rootvol,
1056 							   vol0_stat_inodes);
1057 				++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1058 				hammer_modify_volume_done(trans->rootvol);
1059 				ip->flags |= HAMMER_INODE_ONDISK;
1060 				if (hammer_debug_inode)
1061 					kprintf("NOWONDISK %p\n", ip);
1062 			}
1063 			hammer_sync_unlock(trans);
1064 		}
1065 	}
1066 
1067 	/*
1068 	 * If the inode has been destroyed, clean out any left-over flags
1069 	 * that may have been set by the frontend.
1070 	 */
1071 	if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1072 		ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1073 				    HAMMER_INODE_ATIME |
1074 				    HAMMER_INODE_MTIME);
1075 	}
1076 	return(error);
1077 }
1078 
1079 /*
1080  * Update only the itimes fields.
1081  *
1082  * ATIME can be updated without generating any UNDO.  MTIME is updated
1083  * with UNDO so it is guaranteed to be synchronized properly in case of
1084  * a crash.
1085  *
1086  * Neither field is included in the B-Tree leaf element's CRC, which is how
1087  * we can get away with updating ATIME the way we do.
1088  */
1089 static int
1090 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1091 {
1092 	hammer_transaction_t trans = cursor->trans;
1093 	int error;
1094 
1095 retry:
1096 	if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1097 	    HAMMER_INODE_ONDISK) {
1098 		return(0);
1099 	}
1100 
1101 	hammer_normalize_cursor(cursor);
1102 	cursor->key_beg.localization = ip->obj_localization +
1103 				       HAMMER_LOCALIZE_INODE;
1104 	cursor->key_beg.obj_id = ip->obj_id;
1105 	cursor->key_beg.key = 0;
1106 	cursor->key_beg.create_tid = 0;
1107 	cursor->key_beg.delete_tid = 0;
1108 	cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1109 	cursor->key_beg.obj_type = 0;
1110 	cursor->asof = ip->obj_asof;
1111 	cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1112 	cursor->flags |= HAMMER_CURSOR_ASOF;
1113 	cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1114 	cursor->flags |= HAMMER_CURSOR_GET_DATA;
1115 	cursor->flags |= HAMMER_CURSOR_BACKEND;
1116 
1117 	error = hammer_btree_lookup(cursor);
1118 	if (error == 0) {
1119 		hammer_cache_node(&ip->cache[0], cursor->node);
1120 		if (ip->sync_flags & HAMMER_INODE_MTIME) {
1121 			/*
1122 			 * Updating MTIME requires an UNDO.  Just cover
1123 			 * both atime and mtime.
1124 			 */
1125 			hammer_sync_lock_sh(trans);
1126 			hammer_modify_buffer(trans, cursor->data_buffer,
1127 				     HAMMER_ITIMES_BASE(&cursor->data->inode),
1128 				     HAMMER_ITIMES_BYTES);
1129 			cursor->data->inode.atime = ip->sync_ino_data.atime;
1130 			cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1131 			hammer_modify_buffer_done(cursor->data_buffer);
1132 			hammer_sync_unlock(trans);
1133 		} else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1134 			/*
1135 			 * Updating atime only can be done in-place with
1136 			 * no UNDO.
1137 			 */
1138 			hammer_sync_lock_sh(trans);
1139 			hammer_modify_buffer(trans, cursor->data_buffer,
1140 					     NULL, 0);
1141 			cursor->data->inode.atime = ip->sync_ino_data.atime;
1142 			hammer_modify_buffer_done(cursor->data_buffer);
1143 			hammer_sync_unlock(trans);
1144 		}
1145 		ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1146 	}
1147 	if (error == EDEADLK) {
1148 		hammer_done_cursor(cursor);
1149 		error = hammer_init_cursor(trans, cursor,
1150 					   &ip->cache[0], ip);
1151 		if (error == 0)
1152 			goto retry;
1153 	}
1154 	return(error);
1155 }
1156 
1157 /*
1158  * Release a reference on an inode, flush as requested.
1159  *
1160  * On the last reference we queue the inode to the flusher for its final
1161  * disposition.
1162  */
1163 void
1164 hammer_rel_inode(struct hammer_inode *ip, int flush)
1165 {
1166 	hammer_mount_t hmp = ip->hmp;
1167 
1168 	/*
1169 	 * Handle disposition when dropping the last ref.
1170 	 */
1171 	for (;;) {
1172 		if (ip->lock.refs == 1) {
1173 			/*
1174 			 * Determine whether on-disk action is needed for
1175 			 * the inode's final disposition.
1176 			 */
1177 			KKASSERT(ip->vp == NULL);
1178 			hammer_inode_unloadable_check(ip, 0);
1179 			if (ip->flags & HAMMER_INODE_MODMASK) {
1180 				if (hmp->rsv_inodes > desiredvnodes) {
1181 					hammer_flush_inode(ip,
1182 							   HAMMER_FLUSH_SIGNAL);
1183 				} else {
1184 					hammer_flush_inode(ip, 0);
1185 				}
1186 			} else if (ip->lock.refs == 1) {
1187 				hammer_unload_inode(ip);
1188 				break;
1189 			}
1190 		} else {
1191 			if (flush)
1192 				hammer_flush_inode(ip, 0);
1193 
1194 			/*
1195 			 * The inode still has multiple refs, try to drop
1196 			 * one ref.
1197 			 */
1198 			KKASSERT(ip->lock.refs >= 1);
1199 			if (ip->lock.refs > 1) {
1200 				hammer_unref(&ip->lock);
1201 				break;
1202 			}
1203 		}
1204 	}
1205 }
1206 
1207 /*
1208  * Unload and destroy the specified inode.  Must be called with one remaining
1209  * reference.  The reference is disposed of.
1210  *
1211  * The inode must be completely clean.
1212  */
1213 static int
1214 hammer_unload_inode(struct hammer_inode *ip)
1215 {
1216 	hammer_mount_t hmp = ip->hmp;
1217 
1218 	KASSERT(ip->lock.refs == 1,
1219 		("hammer_unload_inode: %d refs\n", ip->lock.refs));
1220 	KKASSERT(ip->vp == NULL);
1221 	KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1222 	KKASSERT(ip->cursor_ip_refs == 0);
1223 	KKASSERT(ip->lock.lockcount == 0);
1224 	KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1225 
1226 	KKASSERT(RB_EMPTY(&ip->rec_tree));
1227 	KKASSERT(TAILQ_EMPTY(&ip->target_list));
1228 
1229 	RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1230 
1231 	hammer_free_inode(ip);
1232 	return(0);
1233 }
1234 
1235 /*
1236  * Called during unmounting if a critical error occured.  The in-memory
1237  * inode and all related structures are destroyed.
1238  *
1239  * If a critical error did not occur the unmount code calls the standard
1240  * release and asserts that the inode is gone.
1241  */
1242 int
1243 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1244 {
1245 	hammer_record_t rec;
1246 
1247 	/*
1248 	 * Get rid of the inodes in-memory records, regardless of their
1249 	 * state, and clear the mod-mask.
1250 	 */
1251 	while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1252 		TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1253 		rec->target_ip = NULL;
1254 		if (rec->flush_state == HAMMER_FST_SETUP)
1255 			rec->flush_state = HAMMER_FST_IDLE;
1256 	}
1257 	while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1258 		if (rec->flush_state == HAMMER_FST_FLUSH)
1259 			--rec->flush_group->refs;
1260 		else
1261 			hammer_ref(&rec->lock);
1262 		KKASSERT(rec->lock.refs == 1);
1263 		rec->flush_state = HAMMER_FST_IDLE;
1264 		rec->flush_group = NULL;
1265 		rec->flags |= HAMMER_RECF_DELETED_FE;
1266 		rec->flags |= HAMMER_RECF_DELETED_BE;
1267 		hammer_rel_mem_record(rec);
1268 	}
1269 	ip->flags &= ~HAMMER_INODE_MODMASK;
1270 	ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1271 	KKASSERT(ip->vp == NULL);
1272 
1273 	/*
1274 	 * Remove the inode from any flush group, force it idle.  FLUSH
1275 	 * and SETUP states have an inode ref.
1276 	 */
1277 	switch(ip->flush_state) {
1278 	case HAMMER_FST_FLUSH:
1279 		TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
1280 		--ip->flush_group->refs;
1281 		ip->flush_group = NULL;
1282 		/* fall through */
1283 	case HAMMER_FST_SETUP:
1284 		hammer_unref(&ip->lock);
1285 		ip->flush_state = HAMMER_FST_IDLE;
1286 		/* fall through */
1287 	case HAMMER_FST_IDLE:
1288 		break;
1289 	}
1290 
1291 	/*
1292 	 * There shouldn't be any associated vnode.  The unload needs at
1293 	 * least one ref, if we do have a vp steal its ip ref.
1294 	 */
1295 	if (ip->vp) {
1296 		kprintf("hammer_destroy_inode_callback: Unexpected "
1297 			"vnode association ip %p vp %p\n", ip, ip->vp);
1298 		ip->vp->v_data = NULL;
1299 		ip->vp = NULL;
1300 	} else {
1301 		hammer_ref(&ip->lock);
1302 	}
1303 	hammer_unload_inode(ip);
1304 	return(0);
1305 }
1306 
1307 /*
1308  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1309  * the read-only flag for cached inodes.
1310  *
1311  * This routine is called from a RB_SCAN().
1312  */
1313 int
1314 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1315 {
1316 	hammer_mount_t hmp = ip->hmp;
1317 
1318 	if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1319 		ip->flags |= HAMMER_INODE_RO;
1320 	else
1321 		ip->flags &= ~HAMMER_INODE_RO;
1322 	return(0);
1323 }
1324 
1325 /*
1326  * A transaction has modified an inode, requiring updates as specified by
1327  * the passed flags.
1328  *
1329  * HAMMER_INODE_DDIRTY: Inode data has been updated
1330  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1331  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1332  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1333  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1334  */
1335 void
1336 hammer_modify_inode(hammer_inode_t ip, int flags)
1337 {
1338 	/*
1339 	 * ronly of 0 or 2 does not trigger assertion.
1340 	 * 2 is a special error state
1341 	 */
1342 	KKASSERT(ip->hmp->ronly != 1 ||
1343 		  (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1344 			    HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1345 			    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1346 	if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1347 		ip->flags |= HAMMER_INODE_RSV_INODES;
1348 		++ip->hmp->rsv_inodes;
1349 	}
1350 
1351 	ip->flags |= flags;
1352 }
1353 
1354 /*
1355  * Request that an inode be flushed.  This whole mess cannot block and may
1356  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1357  * actively flush the inode until the flush can be done.
1358  *
1359  * The inode may already be flushing, or may be in a setup state.  We can
1360  * place the inode in a flushing state if it is currently idle and flag it
1361  * to reflush if it is currently flushing.
1362  *
1363  * Upon return if the inode could not be flushed due to a setup
1364  * dependancy, then it will be automatically flushed when the dependancy
1365  * is satisfied.
1366  */
1367 void
1368 hammer_flush_inode(hammer_inode_t ip, int flags)
1369 {
1370 	hammer_mount_t hmp;
1371 	hammer_flush_group_t flg;
1372 	int good;
1373 
1374 	/*
1375 	 * next_flush_group is the first flush group we can place the inode
1376 	 * in.  It may be NULL.  If it becomes full we append a new flush
1377 	 * group and make that the next_flush_group.
1378 	 */
1379 	hmp = ip->hmp;
1380 	while ((flg = hmp->next_flush_group) != NULL) {
1381 		KKASSERT(flg->running == 0);
1382 		if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1383 			break;
1384 		hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1385 		hammer_flusher_async(ip->hmp, flg);
1386 	}
1387 	if (flg == NULL) {
1388 		flg = kmalloc(sizeof(*flg), M_HAMMER, M_WAITOK|M_ZERO);
1389 		hmp->next_flush_group = flg;
1390 		TAILQ_INIT(&flg->flush_list);
1391 		TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1392 	}
1393 
1394 	/*
1395 	 * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1396 	 * state we have to put it back into an IDLE state so we can
1397 	 * drop the extra ref.
1398 	 *
1399 	 * If we have a parent dependancy we must still fall through
1400 	 * so we can run it.
1401 	 */
1402 	if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1403 		if (ip->flush_state == HAMMER_FST_SETUP &&
1404 		    TAILQ_EMPTY(&ip->target_list)) {
1405 			ip->flush_state = HAMMER_FST_IDLE;
1406 			hammer_rel_inode(ip, 0);
1407 		}
1408 		if (ip->flush_state == HAMMER_FST_IDLE)
1409 			return;
1410 	}
1411 
1412 	/*
1413 	 * Our flush action will depend on the current state.
1414 	 */
1415 	switch(ip->flush_state) {
1416 	case HAMMER_FST_IDLE:
1417 		/*
1418 		 * We have no dependancies and can flush immediately.  Some
1419 		 * our children may not be flushable so we have to re-test
1420 		 * with that additional knowledge.
1421 		 */
1422 		hammer_flush_inode_core(ip, flg, flags);
1423 		break;
1424 	case HAMMER_FST_SETUP:
1425 		/*
1426 		 * Recurse upwards through dependancies via target_list
1427 		 * and start their flusher actions going if possible.
1428 		 *
1429 		 * 'good' is our connectivity.  -1 means we have none and
1430 		 * can't flush, 0 means there weren't any dependancies, and
1431 		 * 1 means we have good connectivity.
1432 		 */
1433 		good = hammer_setup_parent_inodes(ip, flg);
1434 
1435 		if (good >= 0) {
1436 			/*
1437 			 * We can continue if good >= 0.  Determine how
1438 			 * many records under our inode can be flushed (and
1439 			 * mark them).
1440 			 */
1441 			hammer_flush_inode_core(ip, flg, flags);
1442 		} else {
1443 			/*
1444 			 * Parent has no connectivity, tell it to flush
1445 			 * us as soon as it does.
1446 			 *
1447 			 * The REFLUSH flag is also needed to trigger
1448 			 * dependancy wakeups.
1449 			 */
1450 			ip->flags |= HAMMER_INODE_CONN_DOWN |
1451 				     HAMMER_INODE_REFLUSH;
1452 			if (flags & HAMMER_FLUSH_SIGNAL) {
1453 				ip->flags |= HAMMER_INODE_RESIGNAL;
1454 				hammer_flusher_async(ip->hmp, flg);
1455 			}
1456 		}
1457 		break;
1458 	case HAMMER_FST_FLUSH:
1459 		/*
1460 		 * We are already flushing, flag the inode to reflush
1461 		 * if needed after it completes its current flush.
1462 		 *
1463 		 * The REFLUSH flag is also needed to trigger
1464 		 * dependancy wakeups.
1465 		 */
1466 		if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1467 			ip->flags |= HAMMER_INODE_REFLUSH;
1468 		if (flags & HAMMER_FLUSH_SIGNAL) {
1469 			ip->flags |= HAMMER_INODE_RESIGNAL;
1470 			hammer_flusher_async(ip->hmp, flg);
1471 		}
1472 		break;
1473 	}
1474 }
1475 
1476 /*
1477  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1478  * ip which reference our ip.
1479  *
1480  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1481  *     so for now do not ref/deref the structures.  Note that if we use the
1482  *     ref/rel code later, the rel CAN block.
1483  */
1484 static int
1485 hammer_setup_parent_inodes(hammer_inode_t ip, hammer_flush_group_t flg)
1486 {
1487 	hammer_record_t depend;
1488 	int good;
1489 	int r;
1490 
1491 	good = 0;
1492 	TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1493 		r = hammer_setup_parent_inodes_helper(depend, flg);
1494 		KKASSERT(depend->target_ip == ip);
1495 		if (r < 0 && good == 0)
1496 			good = -1;
1497 		if (r > 0)
1498 			good = 1;
1499 	}
1500 	return(good);
1501 }
1502 
1503 /*
1504  * This helper function takes a record representing the dependancy between
1505  * the parent inode and child inode.
1506  *
1507  * record->ip		= parent inode
1508  * record->target_ip	= child inode
1509  *
1510  * We are asked to recurse upwards and convert the record from SETUP
1511  * to FLUSH if possible.
1512  *
1513  * Return 1 if the record gives us connectivity
1514  *
1515  * Return 0 if the record is not relevant
1516  *
1517  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1518  */
1519 static int
1520 hammer_setup_parent_inodes_helper(hammer_record_t record,
1521 				  hammer_flush_group_t flg)
1522 {
1523 	hammer_mount_t hmp;
1524 	hammer_inode_t pip;
1525 	int good;
1526 
1527 	KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1528 	pip = record->ip;
1529 	hmp = pip->hmp;
1530 
1531 	/*
1532 	 * If the record is already flushing, is it in our flush group?
1533 	 *
1534 	 * If it is in our flush group but it is a general record or a
1535 	 * delete-on-disk, it does not improve our connectivity (return 0),
1536 	 * and if the target inode is not trying to destroy itself we can't
1537 	 * allow the operation yet anyway (the second return -1).
1538 	 */
1539 	if (record->flush_state == HAMMER_FST_FLUSH) {
1540 		/*
1541 		 * If not in our flush group ask the parent to reflush
1542 		 * us as soon as possible.
1543 		 */
1544 		if (record->flush_group != flg) {
1545 			pip->flags |= HAMMER_INODE_REFLUSH;
1546 			record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1547 			return(-1);
1548 		}
1549 
1550 		/*
1551 		 * If in our flush group everything is already set up,
1552 		 * just return whether the record will improve our
1553 		 * visibility or not.
1554 		 */
1555 		if (record->type == HAMMER_MEM_RECORD_ADD)
1556 			return(1);
1557 		return(0);
1558 	}
1559 
1560 	/*
1561 	 * It must be a setup record.  Try to resolve the setup dependancies
1562 	 * by recursing upwards so we can place ip on the flush list.
1563 	 */
1564 	KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1565 
1566 	good = hammer_setup_parent_inodes(pip, flg);
1567 
1568 	/*
1569 	 * If good < 0 the parent has no connectivity and we cannot safely
1570 	 * flush the directory entry, which also means we can't flush our
1571 	 * ip.  Flag the parent and us for downward recursion once the
1572 	 * parent's connectivity is resolved.
1573 	 */
1574 	if (good < 0) {
1575 		/* pip->flags |= HAMMER_INODE_CONN_DOWN; set by recursion */
1576 		record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1577 		return(good);
1578 	}
1579 
1580 	/*
1581 	 * We are go, place the parent inode in a flushing state so we can
1582 	 * place its record in a flushing state.  Note that the parent
1583 	 * may already be flushing.  The record must be in the same flush
1584 	 * group as the parent.
1585 	 */
1586 	if (pip->flush_state != HAMMER_FST_FLUSH)
1587 		hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1588 	KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1589 	KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1590 
1591 #if 0
1592 	if (record->type == HAMMER_MEM_RECORD_DEL &&
1593 	    (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1594 		/*
1595 		 * Regardless of flushing state we cannot sync this path if the
1596 		 * record represents a delete-on-disk but the target inode
1597 		 * is not ready to sync its own deletion.
1598 		 *
1599 		 * XXX need to count effective nlinks to determine whether
1600 		 * the flush is ok, otherwise removing a hardlink will
1601 		 * just leave the DEL record to rot.
1602 		 */
1603 		record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1604 		return(-1);
1605 	} else
1606 #endif
1607 	if (pip->flush_group == flg) {
1608 		/*
1609 		 * Because we have not calculated nlinks yet we can just
1610 		 * set records to the flush state if the parent is in
1611 		 * the same flush group as we are.
1612 		 */
1613 		record->flush_state = HAMMER_FST_FLUSH;
1614 		record->flush_group = flg;
1615 		++record->flush_group->refs;
1616 		hammer_ref(&record->lock);
1617 
1618 		/*
1619 		 * A general directory-add contributes to our visibility.
1620 		 *
1621 		 * Otherwise it is probably a directory-delete or
1622 		 * delete-on-disk record and does not contribute to our
1623 		 * visbility (but we can still flush it).
1624 		 */
1625 		if (record->type == HAMMER_MEM_RECORD_ADD)
1626 			return(1);
1627 		return(0);
1628 	} else {
1629 		/*
1630 		 * If the parent is not in our flush group we cannot
1631 		 * flush this record yet, there is no visibility.
1632 		 * We tell the parent to reflush and mark ourselves
1633 		 * so the parent knows it should flush us too.
1634 		 */
1635 		pip->flags |= HAMMER_INODE_REFLUSH;
1636 		record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1637 		return(-1);
1638 	}
1639 }
1640 
1641 /*
1642  * This is the core routine placing an inode into the FST_FLUSH state.
1643  */
1644 static void
1645 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1646 {
1647 	int go_count;
1648 
1649 	/*
1650 	 * Set flush state and prevent the flusher from cycling into
1651 	 * the next flush group.  Do not place the ip on the list yet.
1652 	 * Inodes not in the idle state get an extra reference.
1653 	 */
1654 	KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1655 	if (ip->flush_state == HAMMER_FST_IDLE)
1656 		hammer_ref(&ip->lock);
1657 	ip->flush_state = HAMMER_FST_FLUSH;
1658 	ip->flush_group = flg;
1659 	++ip->hmp->flusher.group_lock;
1660 	++ip->hmp->count_iqueued;
1661 	++hammer_count_iqueued;
1662 	++flg->total_count;
1663 
1664 	/*
1665 	 * We need to be able to vfsync/truncate from the backend.
1666 	 */
1667 	KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1668 	if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1669 		ip->flags |= HAMMER_INODE_VHELD;
1670 		vref(ip->vp);
1671 	}
1672 
1673 	/*
1674 	 * Figure out how many in-memory records we can actually flush
1675 	 * (not including inode meta-data, buffers, etc).
1676 	 */
1677 	KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1678 	if (flags & HAMMER_FLUSH_RECURSION) {
1679 		/*
1680 		 * If this is a upwards recursion we do not want to
1681 		 * recurse down again!
1682 		 */
1683 		go_count = 1;
1684 #if 0
1685 	} else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1686 		/*
1687 		 * No new records are added if we must complete a flush
1688 		 * from a previous cycle, but we do have to move the records
1689 		 * from the previous cycle to the current one.
1690 		 */
1691 #if 0
1692 		go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1693 				   hammer_syncgrp_child_callback, NULL);
1694 #endif
1695 		go_count = 1;
1696 #endif
1697 	} else {
1698 		/*
1699 		 * Normal flush, scan records and bring them into the flush.
1700 		 * Directory adds and deletes are usually skipped (they are
1701 		 * grouped with the related inode rather then with the
1702 		 * directory).
1703 		 *
1704 		 * go_count can be negative, which means the scan aborted
1705 		 * due to the flush group being over-full and we should
1706 		 * flush what we have.
1707 		 */
1708 		go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1709 				   hammer_setup_child_callback, NULL);
1710 	}
1711 
1712 	/*
1713 	 * This is a more involved test that includes go_count.  If we
1714 	 * can't flush, flag the inode and return.  If go_count is 0 we
1715 	 * were are unable to flush any records in our rec_tree and
1716 	 * must ignore the XDIRTY flag.
1717 	 */
1718 	if (go_count == 0) {
1719 		if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1720 			--ip->hmp->count_iqueued;
1721 			--hammer_count_iqueued;
1722 
1723 			--flg->total_count;
1724 			ip->flush_state = HAMMER_FST_SETUP;
1725 			ip->flush_group = NULL;
1726 			if (ip->flags & HAMMER_INODE_VHELD) {
1727 				ip->flags &= ~HAMMER_INODE_VHELD;
1728 				vrele(ip->vp);
1729 			}
1730 
1731 			/*
1732 			 * REFLUSH is needed to trigger dependancy wakeups
1733 			 * when an inode is in SETUP.
1734 			 */
1735 			ip->flags |= HAMMER_INODE_REFLUSH;
1736 			if (flags & HAMMER_FLUSH_SIGNAL) {
1737 				ip->flags |= HAMMER_INODE_RESIGNAL;
1738 				hammer_flusher_async(ip->hmp, flg);
1739 			}
1740 			if (--ip->hmp->flusher.group_lock == 0)
1741 				wakeup(&ip->hmp->flusher.group_lock);
1742 			return;
1743 		}
1744 	}
1745 
1746 	/*
1747 	 * Snapshot the state of the inode for the backend flusher.
1748 	 *
1749 	 * We continue to retain save_trunc_off even when all truncations
1750 	 * have been resolved as an optimization to determine if we can
1751 	 * skip the B-Tree lookup for overwrite deletions.
1752 	 *
1753 	 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1754 	 * and stays in ip->flags.  Once set, it stays set until the
1755 	 * inode is destroyed.
1756 	 */
1757 	if (ip->flags & HAMMER_INODE_TRUNCATED) {
1758 		KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
1759 		ip->sync_trunc_off = ip->trunc_off;
1760 		ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1761 		ip->flags &= ~HAMMER_INODE_TRUNCATED;
1762 		ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1763 
1764 		/*
1765 		 * The save_trunc_off used to cache whether the B-Tree
1766 		 * holds any records past that point is not used until
1767 		 * after the truncation has succeeded, so we can safely
1768 		 * set it now.
1769 		 */
1770 		if (ip->save_trunc_off > ip->sync_trunc_off)
1771 			ip->save_trunc_off = ip->sync_trunc_off;
1772 	}
1773 	ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
1774 			   ~HAMMER_INODE_TRUNCATED);
1775 	ip->sync_ino_leaf = ip->ino_leaf;
1776 	ip->sync_ino_data = ip->ino_data;
1777 	ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1778 #ifdef DEBUG_TRUNCATE
1779 	if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1780 		kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1781 #endif
1782 
1783 	/*
1784 	 * The flusher list inherits our inode and reference.
1785 	 */
1786 	KKASSERT(flg->running == 0);
1787 	TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry);
1788 	if (--ip->hmp->flusher.group_lock == 0)
1789 		wakeup(&ip->hmp->flusher.group_lock);
1790 
1791 	if (flags & HAMMER_FLUSH_SIGNAL) {
1792 		hammer_flusher_async(ip->hmp, flg);
1793 	}
1794 }
1795 
1796 /*
1797  * Callback for scan of ip->rec_tree.  Try to include each record in our
1798  * flush.  ip->flush_group has been set but the inode has not yet been
1799  * moved into a flushing state.
1800  *
1801  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1802  * both inodes.
1803  *
1804  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1805  * the caller from shortcutting the flush.
1806  */
1807 static int
1808 hammer_setup_child_callback(hammer_record_t rec, void *data)
1809 {
1810 	hammer_flush_group_t flg;
1811 	hammer_inode_t target_ip;
1812 	hammer_inode_t ip;
1813 	int r;
1814 
1815 	/*
1816 	 * Deleted records are ignored.  Note that the flush detects deleted
1817 	 * front-end records at multiple points to deal with races.  This is
1818 	 * just the first line of defense.  The only time DELETED_FE cannot
1819 	 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1820 	 *
1821 	 * Don't get confused between record deletion and, say, directory
1822 	 * entry deletion.  The deletion of a directory entry that is on
1823 	 * the media has nothing to do with the record deletion flags.
1824 	 */
1825 	if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) {
1826 		if (rec->flush_state == HAMMER_FST_FLUSH) {
1827 			KKASSERT(rec->flush_group == rec->ip->flush_group);
1828 			r = 1;
1829 		} else {
1830 			r = 0;
1831 		}
1832 		return(r);
1833 	}
1834 
1835 	/*
1836 	 * If the record is in an idle state it has no dependancies and
1837 	 * can be flushed.
1838 	 */
1839 	ip = rec->ip;
1840 	flg = ip->flush_group;
1841 	r = 0;
1842 
1843 	switch(rec->flush_state) {
1844 	case HAMMER_FST_IDLE:
1845 		/*
1846 		 * The record has no setup dependancy, we can flush it.
1847 		 */
1848 		KKASSERT(rec->target_ip == NULL);
1849 		rec->flush_state = HAMMER_FST_FLUSH;
1850 		rec->flush_group = flg;
1851 		++flg->refs;
1852 		hammer_ref(&rec->lock);
1853 		r = 1;
1854 		break;
1855 	case HAMMER_FST_SETUP:
1856 		/*
1857 		 * The record has a setup dependancy.  These are typically
1858 		 * directory entry adds and deletes.  Such entries will be
1859 		 * flushed when their inodes are flushed so we do not
1860 		 * usually have to add them to the flush here.  However,
1861 		 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
1862 		 * it is asking us to flush this record (and it).
1863 		 */
1864 		target_ip = rec->target_ip;
1865 		KKASSERT(target_ip != NULL);
1866 		KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1867 
1868 		/*
1869 		 * If the target IP is already flushing in our group
1870 		 * we could associate the record, but target_ip has
1871 		 * already synced ino_data to sync_ino_data and we
1872 		 * would also have to adjust nlinks.   Plus there are
1873 		 * ordering issues for adds and deletes.
1874 		 *
1875 		 * Reflush downward if this is an ADD, and upward if
1876 		 * this is a DEL.
1877 		 */
1878 		if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1879 			if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
1880 				ip->flags |= HAMMER_INODE_REFLUSH;
1881 			else
1882 				target_ip->flags |= HAMMER_INODE_REFLUSH;
1883 			break;
1884 		}
1885 
1886 		/*
1887 		 * Target IP is not yet flushing.  This can get complex
1888 		 * because we have to be careful about the recursion.
1889 		 *
1890 		 * Directories create an issue for us in that if a flush
1891 		 * of a directory is requested the expectation is to flush
1892 		 * any pending directory entries, but this will cause the
1893 		 * related inodes to recursively flush as well.  We can't
1894 		 * really defer the operation so just get as many as we
1895 		 * can and
1896 		 */
1897 #if 0
1898 		if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
1899 		    (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
1900 			/*
1901 			 * We aren't reclaiming and the target ip was not
1902 			 * previously prevented from flushing due to this
1903 			 * record dependancy.  Do not flush this record.
1904 			 */
1905 			/*r = 0;*/
1906 		} else
1907 #endif
1908 		if (flg->total_count + flg->refs >
1909 			   ip->hmp->undo_rec_limit) {
1910 			/*
1911 			 * Our flush group is over-full and we risk blowing
1912 			 * out the UNDO FIFO.  Stop the scan, flush what we
1913 			 * have, then reflush the directory.
1914 			 *
1915 			 * The directory may be forced through multiple
1916 			 * flush groups before it can be completely
1917 			 * flushed.
1918 			 */
1919 			ip->flags |= HAMMER_INODE_RESIGNAL |
1920 				     HAMMER_INODE_REFLUSH;
1921 			r = -1;
1922 		} else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1923 			/*
1924 			 * If the target IP is not flushing we can force
1925 			 * it to flush, even if it is unable to write out
1926 			 * any of its own records we have at least one in
1927 			 * hand that we CAN deal with.
1928 			 */
1929 			rec->flush_state = HAMMER_FST_FLUSH;
1930 			rec->flush_group = flg;
1931 			++flg->refs;
1932 			hammer_ref(&rec->lock);
1933 			hammer_flush_inode_core(target_ip, flg,
1934 						HAMMER_FLUSH_RECURSION);
1935 			r = 1;
1936 		} else {
1937 			/*
1938 			 * General or delete-on-disk record.
1939 			 *
1940 			 * XXX this needs help.  If a delete-on-disk we could
1941 			 * disconnect the target.  If the target has its own
1942 			 * dependancies they really need to be flushed.
1943 			 *
1944 			 * XXX
1945 			 */
1946 			rec->flush_state = HAMMER_FST_FLUSH;
1947 			rec->flush_group = flg;
1948 			++flg->refs;
1949 			hammer_ref(&rec->lock);
1950 			hammer_flush_inode_core(target_ip, flg,
1951 						HAMMER_FLUSH_RECURSION);
1952 			r = 1;
1953 		}
1954 		break;
1955 	case HAMMER_FST_FLUSH:
1956 		/*
1957 		 * The flush_group should already match.
1958 		 */
1959 		KKASSERT(rec->flush_group == flg);
1960 		r = 1;
1961 		break;
1962 	}
1963 	return(r);
1964 }
1965 
1966 #if 0
1967 /*
1968  * This version just moves records already in a flush state to the new
1969  * flush group and that is it.
1970  */
1971 static int
1972 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
1973 {
1974 	hammer_inode_t ip = rec->ip;
1975 
1976 	switch(rec->flush_state) {
1977 	case HAMMER_FST_FLUSH:
1978 		KKASSERT(rec->flush_group == ip->flush_group);
1979 		break;
1980 	default:
1981 		break;
1982 	}
1983 	return(0);
1984 }
1985 #endif
1986 
1987 /*
1988  * Wait for a previously queued flush to complete.
1989  *
1990  * If a critical error occured we don't try to wait.
1991  */
1992 void
1993 hammer_wait_inode(hammer_inode_t ip)
1994 {
1995 	hammer_flush_group_t flg;
1996 
1997 	flg = NULL;
1998 	if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
1999 		while (ip->flush_state != HAMMER_FST_IDLE &&
2000 		       (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2001 			if (ip->flush_state == HAMMER_FST_SETUP)
2002 				hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2003 			if (ip->flush_state != HAMMER_FST_IDLE) {
2004 				ip->flags |= HAMMER_INODE_FLUSHW;
2005 				tsleep(&ip->flags, 0, "hmrwin", 0);
2006 			}
2007 		}
2008 	}
2009 }
2010 
2011 /*
2012  * Called by the backend code when a flush has been completed.
2013  * The inode has already been removed from the flush list.
2014  *
2015  * A pipelined flush can occur, in which case we must re-enter the
2016  * inode on the list and re-copy its fields.
2017  */
2018 void
2019 hammer_flush_inode_done(hammer_inode_t ip, int error)
2020 {
2021 	hammer_mount_t hmp;
2022 	int dorel;
2023 
2024 	KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2025 
2026 	hmp = ip->hmp;
2027 
2028 	/*
2029 	 * Merge left-over flags back into the frontend and fix the state.
2030 	 * Incomplete truncations are retained by the backend.
2031 	 */
2032 	ip->error = error;
2033 	ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2034 	ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2035 
2036 	/*
2037 	 * The backend may have adjusted nlinks, so if the adjusted nlinks
2038 	 * does not match the fronttend set the frontend's RDIRTY flag again.
2039 	 */
2040 	if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2041 		ip->flags |= HAMMER_INODE_DDIRTY;
2042 
2043 	/*
2044 	 * Fix up the dirty buffer status.
2045 	 */
2046 	if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2047 		ip->flags |= HAMMER_INODE_BUFS;
2048 	}
2049 
2050 	/*
2051 	 * Re-set the XDIRTY flag if some of the inode's in-memory records
2052 	 * could not be flushed.
2053 	 */
2054 	KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2055 		  (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2056 		 (!RB_EMPTY(&ip->rec_tree) &&
2057 		  (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2058 
2059 	/*
2060 	 * Do not lose track of inodes which no longer have vnode
2061 	 * assocations, otherwise they may never get flushed again.
2062 	 */
2063 	if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
2064 		ip->flags |= HAMMER_INODE_REFLUSH;
2065 
2066 	/*
2067 	 * Adjust the flush state.
2068 	 */
2069 	if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2070 		/*
2071 		 * We were unable to flush out all our records, leave the
2072 		 * inode in a flush state and in the current flush group.
2073 		 * The flush group will be re-run.
2074 		 *
2075 		 * This occurs if the UNDO block gets too full or there is
2076 		 * too much dirty meta-data and allows the flusher to
2077 		 * finalize the UNDO block and then re-flush.
2078 		 */
2079 		ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2080 		dorel = 0;
2081 	} else {
2082 		/*
2083 		 * Remove from the flush_group
2084 		 */
2085 		TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
2086 		ip->flush_group = NULL;
2087 
2088 		/*
2089 		 * Clean up the vnode ref and tracking counts.
2090 		 */
2091 		if (ip->flags & HAMMER_INODE_VHELD) {
2092 			ip->flags &= ~HAMMER_INODE_VHELD;
2093 			vrele(ip->vp);
2094 		}
2095 		--hmp->count_iqueued;
2096 		--hammer_count_iqueued;
2097 
2098 		/*
2099 		 * And adjust the state.
2100 		 */
2101 		if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2102 			ip->flush_state = HAMMER_FST_IDLE;
2103 			dorel = 1;
2104 		} else {
2105 			ip->flush_state = HAMMER_FST_SETUP;
2106 			dorel = 0;
2107 		}
2108 
2109 		/*
2110 		 * If the frontend is waiting for a flush to complete,
2111 		 * wake it up.
2112 		 */
2113 		if (ip->flags & HAMMER_INODE_FLUSHW) {
2114 			ip->flags &= ~HAMMER_INODE_FLUSHW;
2115 			wakeup(&ip->flags);
2116 		}
2117 
2118 		/*
2119 		 * If the frontend made more changes and requested another
2120 		 * flush, then try to get it running.
2121 		 *
2122 		 * Reflushes are aborted when the inode is errored out.
2123 		 */
2124 		if (ip->flags & HAMMER_INODE_REFLUSH) {
2125 			ip->flags &= ~HAMMER_INODE_REFLUSH;
2126 			if (ip->flags & HAMMER_INODE_RESIGNAL) {
2127 				ip->flags &= ~HAMMER_INODE_RESIGNAL;
2128 				hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2129 			} else {
2130 				hammer_flush_inode(ip, 0);
2131 			}
2132 		}
2133 	}
2134 
2135 	/*
2136 	 * If we have no parent dependancies we can clear CONN_DOWN
2137 	 */
2138 	if (TAILQ_EMPTY(&ip->target_list))
2139 		ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2140 
2141 	/*
2142 	 * If the inode is now clean drop the space reservation.
2143 	 */
2144 	if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2145 	    (ip->flags & HAMMER_INODE_RSV_INODES)) {
2146 		ip->flags &= ~HAMMER_INODE_RSV_INODES;
2147 		--hmp->rsv_inodes;
2148 	}
2149 
2150 	if (dorel)
2151 		hammer_rel_inode(ip, 0);
2152 }
2153 
2154 /*
2155  * Called from hammer_sync_inode() to synchronize in-memory records
2156  * to the media.
2157  */
2158 static int
2159 hammer_sync_record_callback(hammer_record_t record, void *data)
2160 {
2161 	hammer_cursor_t cursor = data;
2162 	hammer_transaction_t trans = cursor->trans;
2163 	hammer_mount_t hmp = trans->hmp;
2164 	int error;
2165 
2166 	/*
2167 	 * Skip records that do not belong to the current flush.
2168 	 */
2169 	++hammer_stats_record_iterations;
2170 	if (record->flush_state != HAMMER_FST_FLUSH)
2171 		return(0);
2172 
2173 #if 1
2174 	if (record->flush_group != record->ip->flush_group) {
2175 		kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2176 		Debugger("blah2");
2177 		return(0);
2178 	}
2179 #endif
2180 	KKASSERT(record->flush_group == record->ip->flush_group);
2181 
2182 	/*
2183 	 * Interlock the record using the BE flag.  Once BE is set the
2184 	 * frontend cannot change the state of FE.
2185 	 *
2186 	 * NOTE: If FE is set prior to us setting BE we still sync the
2187 	 * record out, but the flush completion code converts it to
2188 	 * a delete-on-disk record instead of destroying it.
2189 	 */
2190 	KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2191 	record->flags |= HAMMER_RECF_INTERLOCK_BE;
2192 
2193 	/*
2194 	 * The backend may have already disposed of the record.
2195 	 */
2196 	if (record->flags & HAMMER_RECF_DELETED_BE) {
2197 		error = 0;
2198 		goto done;
2199 	}
2200 
2201 	/*
2202 	 * If the whole inode is being deleting all on-disk records will
2203 	 * be deleted very soon, we can't sync any new records to disk
2204 	 * because they will be deleted in the same transaction they were
2205 	 * created in (delete_tid == create_tid), which will assert.
2206 	 *
2207 	 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2208 	 * that we currently panic on.
2209 	 */
2210 	if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2211 		switch(record->type) {
2212 		case HAMMER_MEM_RECORD_DATA:
2213 			/*
2214 			 * We don't have to do anything, if the record was
2215 			 * committed the space will have been accounted for
2216 			 * in the blockmap.
2217 			 */
2218 			/* fall through */
2219 		case HAMMER_MEM_RECORD_GENERAL:
2220 			record->flags |= HAMMER_RECF_DELETED_FE;
2221 			record->flags |= HAMMER_RECF_DELETED_BE;
2222 			error = 0;
2223 			goto done;
2224 		case HAMMER_MEM_RECORD_ADD:
2225 			panic("hammer_sync_record_callback: illegal add "
2226 			      "during inode deletion record %p", record);
2227 			break; /* NOT REACHED */
2228 		case HAMMER_MEM_RECORD_INODE:
2229 			panic("hammer_sync_record_callback: attempt to "
2230 			      "sync inode record %p?", record);
2231 			break; /* NOT REACHED */
2232 		case HAMMER_MEM_RECORD_DEL:
2233 			/*
2234 			 * Follow through and issue the on-disk deletion
2235 			 */
2236 			break;
2237 		}
2238 	}
2239 
2240 	/*
2241 	 * If DELETED_FE is set special handling is needed for directory
2242 	 * entries.  Dependant pieces related to the directory entry may
2243 	 * have already been synced to disk.  If this occurs we have to
2244 	 * sync the directory entry and then change the in-memory record
2245 	 * from an ADD to a DELETE to cover the fact that it's been
2246 	 * deleted by the frontend.
2247 	 *
2248 	 * A directory delete covering record (MEM_RECORD_DEL) can never
2249 	 * be deleted by the frontend.
2250 	 *
2251 	 * Any other record type (aka DATA) can be deleted by the frontend.
2252 	 * XXX At the moment the flusher must skip it because there may
2253 	 * be another data record in the flush group for the same block,
2254 	 * meaning that some frontend data changes can leak into the backend's
2255 	 * synchronization point.
2256 	 */
2257 	if (record->flags & HAMMER_RECF_DELETED_FE) {
2258 		if (record->type == HAMMER_MEM_RECORD_ADD) {
2259 			record->flags |= HAMMER_RECF_CONVERT_DELETE;
2260 		} else {
2261 			KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2262 			record->flags |= HAMMER_RECF_DELETED_BE;
2263 			error = 0;
2264 			goto done;
2265 		}
2266 	}
2267 
2268 	/*
2269 	 * Assign the create_tid for new records.  Deletions already
2270 	 * have the record's entire key properly set up.
2271 	 */
2272 	if (record->type != HAMMER_MEM_RECORD_DEL)
2273 		record->leaf.base.create_tid = trans->tid;
2274 		record->leaf.create_ts = trans->time32;
2275 	for (;;) {
2276 		error = hammer_ip_sync_record_cursor(cursor, record);
2277 		if (error != EDEADLK)
2278 			break;
2279 		hammer_done_cursor(cursor);
2280 		error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2281 					   record->ip);
2282 		if (error)
2283 			break;
2284 	}
2285 	record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2286 
2287 	if (error)
2288 		error = -error;
2289 done:
2290 	hammer_flush_record_done(record, error);
2291 
2292 	/*
2293 	 * Do partial finalization if we have built up too many dirty
2294 	 * buffers.  Otherwise a buffer cache deadlock can occur when
2295 	 * doing things like creating tens of thousands of tiny files.
2296 	 *
2297 	 * We must release our cursor lock to avoid a 3-way deadlock
2298 	 * due to the exclusive sync lock the finalizer must get.
2299 	 */
2300         if (hammer_flusher_meta_limit(hmp)) {
2301 		hammer_unlock_cursor(cursor, 0);
2302                 hammer_flusher_finalize(trans, 0);
2303 		hammer_lock_cursor(cursor, 0);
2304 	}
2305 
2306 	return(error);
2307 }
2308 
2309 /*
2310  * Backend function called by the flusher to sync an inode to media.
2311  */
2312 int
2313 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2314 {
2315 	struct hammer_cursor cursor;
2316 	hammer_node_t tmp_node;
2317 	hammer_record_t depend;
2318 	hammer_record_t next;
2319 	int error, tmp_error;
2320 	u_int64_t nlinks;
2321 
2322 	if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2323 		return(0);
2324 
2325 	error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2326 	if (error)
2327 		goto done;
2328 
2329 	/*
2330 	 * Any directory records referencing this inode which are not in
2331 	 * our current flush group must adjust our nlink count for the
2332 	 * purposes of synchronization to disk.
2333 	 *
2334 	 * Records which are in our flush group can be unlinked from our
2335 	 * inode now, potentially allowing the inode to be physically
2336 	 * deleted.
2337 	 *
2338 	 * This cannot block.
2339 	 */
2340 	nlinks = ip->ino_data.nlinks;
2341 	next = TAILQ_FIRST(&ip->target_list);
2342 	while ((depend = next) != NULL) {
2343 		next = TAILQ_NEXT(depend, target_entry);
2344 		if (depend->flush_state == HAMMER_FST_FLUSH &&
2345 		    depend->flush_group == ip->flush_group) {
2346 			/*
2347 			 * If this is an ADD that was deleted by the frontend
2348 			 * the frontend nlinks count will have already been
2349 			 * decremented, but the backend is going to sync its
2350 			 * directory entry and must account for it.  The
2351 			 * record will be converted to a delete-on-disk when
2352 			 * it gets synced.
2353 			 *
2354 			 * If the ADD was not deleted by the frontend we
2355 			 * can remove the dependancy from our target_list.
2356 			 */
2357 			if (depend->flags & HAMMER_RECF_DELETED_FE) {
2358 				++nlinks;
2359 			} else {
2360 				TAILQ_REMOVE(&ip->target_list, depend,
2361 					     target_entry);
2362 				depend->target_ip = NULL;
2363 			}
2364 		} else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2365 			/*
2366 			 * Not part of our flush group
2367 			 */
2368 			KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2369 			switch(depend->type) {
2370 			case HAMMER_MEM_RECORD_ADD:
2371 				--nlinks;
2372 				break;
2373 			case HAMMER_MEM_RECORD_DEL:
2374 				++nlinks;
2375 				break;
2376 			default:
2377 				break;
2378 			}
2379 		}
2380 	}
2381 
2382 	/*
2383 	 * Set dirty if we had to modify the link count.
2384 	 */
2385 	if (ip->sync_ino_data.nlinks != nlinks) {
2386 		KKASSERT((int64_t)nlinks >= 0);
2387 		ip->sync_ino_data.nlinks = nlinks;
2388 		ip->sync_flags |= HAMMER_INODE_DDIRTY;
2389 	}
2390 
2391 	/*
2392 	 * If there is a trunction queued destroy any data past the (aligned)
2393 	 * truncation point.  Userland will have dealt with the buffer
2394 	 * containing the truncation point for us.
2395 	 *
2396 	 * We don't flush pending frontend data buffers until after we've
2397 	 * dealt with the truncation.
2398 	 */
2399 	if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2400 		/*
2401 		 * Interlock trunc_off.  The VOP front-end may continue to
2402 		 * make adjustments to it while we are blocked.
2403 		 */
2404 		off_t trunc_off;
2405 		off_t aligned_trunc_off;
2406 		int blkmask;
2407 
2408 		trunc_off = ip->sync_trunc_off;
2409 		blkmask = hammer_blocksize(trunc_off) - 1;
2410 		aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2411 
2412 		/*
2413 		 * Delete any whole blocks on-media.  The front-end has
2414 		 * already cleaned out any partial block and made it
2415 		 * pending.  The front-end may have updated trunc_off
2416 		 * while we were blocked so we only use sync_trunc_off.
2417 		 *
2418 		 * This operation can blow out the buffer cache, EWOULDBLOCK
2419 		 * means we were unable to complete the deletion.  The
2420 		 * deletion will update sync_trunc_off in that case.
2421 		 */
2422 		error = hammer_ip_delete_range(&cursor, ip,
2423 						aligned_trunc_off,
2424 						0x7FFFFFFFFFFFFFFFLL, 2);
2425 		if (error == EWOULDBLOCK) {
2426 			ip->flags |= HAMMER_INODE_WOULDBLOCK;
2427 			error = 0;
2428 			goto defer_buffer_flush;
2429 		}
2430 
2431 		if (error)
2432 			goto done;
2433 
2434 		/*
2435 		 * Clear the truncation flag on the backend after we have
2436 		 * complete the deletions.  Backend data is now good again
2437 		 * (including new records we are about to sync, below).
2438 		 *
2439 		 * Leave sync_trunc_off intact.  As we write additional
2440 		 * records the backend will update sync_trunc_off.  This
2441 		 * tells the backend whether it can skip the overwrite
2442 		 * test.  This should work properly even when the backend
2443 		 * writes full blocks where the truncation point straddles
2444 		 * the block because the comparison is against the base
2445 		 * offset of the record.
2446 		 */
2447 		ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2448 		/* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2449 	} else {
2450 		error = 0;
2451 	}
2452 
2453 	/*
2454 	 * Now sync related records.  These will typically be directory
2455 	 * entries, records tracking direct-writes, or delete-on-disk records.
2456 	 */
2457 	if (error == 0) {
2458 		tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2459 				    hammer_sync_record_callback, &cursor);
2460 		if (tmp_error < 0)
2461 			tmp_error = -error;
2462 		if (tmp_error)
2463 			error = tmp_error;
2464 	}
2465 	hammer_cache_node(&ip->cache[1], cursor.node);
2466 
2467 	/*
2468 	 * Re-seek for inode update, assuming our cache hasn't been ripped
2469 	 * out from under us.
2470 	 */
2471 	if (error == 0) {
2472 		tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
2473 		if (tmp_node) {
2474 			hammer_cursor_downgrade(&cursor);
2475 			hammer_lock_sh(&tmp_node->lock);
2476 			if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2477 				hammer_cursor_seek(&cursor, tmp_node, 0);
2478 			hammer_unlock(&tmp_node->lock);
2479 			hammer_rel_node(tmp_node);
2480 		}
2481 		error = 0;
2482 	}
2483 
2484 	/*
2485 	 * If we are deleting the inode the frontend had better not have
2486 	 * any active references on elements making up the inode.
2487 	 *
2488 	 * The call to hammer_ip_delete_clean() cleans up auxillary records
2489 	 * but not DB or DATA records.  Those must have already been deleted
2490 	 * by the normal truncation mechanic.
2491 	 */
2492 	if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2493 		RB_EMPTY(&ip->rec_tree)  &&
2494 	    (ip->sync_flags & HAMMER_INODE_DELETING) &&
2495 	    (ip->flags & HAMMER_INODE_DELETED) == 0) {
2496 		int count1 = 0;
2497 
2498 		error = hammer_ip_delete_clean(&cursor, ip, &count1);
2499 		if (error == 0) {
2500 			ip->flags |= HAMMER_INODE_DELETED;
2501 			ip->sync_flags &= ~HAMMER_INODE_DELETING;
2502 			ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2503 			KKASSERT(RB_EMPTY(&ip->rec_tree));
2504 
2505 			/*
2506 			 * Set delete_tid in both the frontend and backend
2507 			 * copy of the inode record.  The DELETED flag handles
2508 			 * this, do not set RDIRTY.
2509 			 */
2510 			ip->ino_leaf.base.delete_tid = trans->tid;
2511 			ip->sync_ino_leaf.base.delete_tid = trans->tid;
2512 			ip->ino_leaf.delete_ts = trans->time32;
2513 			ip->sync_ino_leaf.delete_ts = trans->time32;
2514 
2515 
2516 			/*
2517 			 * Adjust the inode count in the volume header
2518 			 */
2519 			hammer_sync_lock_sh(trans);
2520 			if (ip->flags & HAMMER_INODE_ONDISK) {
2521 				hammer_modify_volume_field(trans,
2522 							   trans->rootvol,
2523 							   vol0_stat_inodes);
2524 				--ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2525 				hammer_modify_volume_done(trans->rootvol);
2526 			}
2527 			hammer_sync_unlock(trans);
2528 		}
2529 	}
2530 
2531 	if (error)
2532 		goto done;
2533 	ip->sync_flags &= ~HAMMER_INODE_BUFS;
2534 
2535 defer_buffer_flush:
2536 	/*
2537 	 * Now update the inode's on-disk inode-data and/or on-disk record.
2538 	 * DELETED and ONDISK are managed only in ip->flags.
2539 	 *
2540 	 * In the case of a defered buffer flush we still update the on-disk
2541 	 * inode to satisfy visibility requirements if there happen to be
2542 	 * directory dependancies.
2543 	 */
2544 	switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2545 	case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2546 		/*
2547 		 * If deleted and on-disk, don't set any additional flags.
2548 		 * the delete flag takes care of things.
2549 		 *
2550 		 * Clear flags which may have been set by the frontend.
2551 		 */
2552 		ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2553 				    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2554 				    HAMMER_INODE_DELETING);
2555 		break;
2556 	case HAMMER_INODE_DELETED:
2557 		/*
2558 		 * Take care of the case where a deleted inode was never
2559 		 * flushed to the disk in the first place.
2560 		 *
2561 		 * Clear flags which may have been set by the frontend.
2562 		 */
2563 		ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2564 				    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2565 				    HAMMER_INODE_DELETING);
2566 		while (RB_ROOT(&ip->rec_tree)) {
2567 			hammer_record_t record = RB_ROOT(&ip->rec_tree);
2568 			hammer_ref(&record->lock);
2569 			KKASSERT(record->lock.refs == 1);
2570 			record->flags |= HAMMER_RECF_DELETED_FE;
2571 			record->flags |= HAMMER_RECF_DELETED_BE;
2572 			hammer_rel_mem_record(record);
2573 		}
2574 		break;
2575 	case HAMMER_INODE_ONDISK:
2576 		/*
2577 		 * If already on-disk, do not set any additional flags.
2578 		 */
2579 		break;
2580 	default:
2581 		/*
2582 		 * If not on-disk and not deleted, set DDIRTY to force
2583 		 * an initial record to be written.
2584 		 *
2585 		 * Also set the create_tid in both the frontend and backend
2586 		 * copy of the inode record.
2587 		 */
2588 		ip->ino_leaf.base.create_tid = trans->tid;
2589 		ip->ino_leaf.create_ts = trans->time32;
2590 		ip->sync_ino_leaf.base.create_tid = trans->tid;
2591 		ip->sync_ino_leaf.create_ts = trans->time32;
2592 		ip->sync_flags |= HAMMER_INODE_DDIRTY;
2593 		break;
2594 	}
2595 
2596 	/*
2597 	 * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
2598 	 * is already on-disk the old record is marked as deleted.
2599 	 *
2600 	 * If DELETED is set hammer_update_inode() will delete the existing
2601 	 * record without writing out a new one.
2602 	 *
2603 	 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2604 	 */
2605 	if (ip->flags & HAMMER_INODE_DELETED) {
2606 		error = hammer_update_inode(&cursor, ip);
2607 	} else
2608 	if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2609 	    (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2610 		error = hammer_update_itimes(&cursor, ip);
2611 	} else
2612 	if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2613 		error = hammer_update_inode(&cursor, ip);
2614 	}
2615 done:
2616 	if (error) {
2617 		hammer_critical_error(ip->hmp, ip, error,
2618 				      "while syncing inode");
2619 	}
2620 	hammer_done_cursor(&cursor);
2621 	return(error);
2622 }
2623 
2624 /*
2625  * This routine is called when the OS is no longer actively referencing
2626  * the inode (but might still be keeping it cached), or when releasing
2627  * the last reference to an inode.
2628  *
2629  * At this point if the inode's nlinks count is zero we want to destroy
2630  * it, which may mean destroying it on-media too.
2631  */
2632 void
2633 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2634 {
2635 	struct vnode *vp;
2636 
2637 	/*
2638 	 * Set the DELETING flag when the link count drops to 0 and the
2639 	 * OS no longer has any opens on the inode.
2640 	 *
2641 	 * The backend will clear DELETING (a mod flag) and set DELETED
2642 	 * (a state flag) when it is actually able to perform the
2643 	 * operation.
2644 	 */
2645 	if (ip->ino_data.nlinks == 0 &&
2646 	    (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2647 		ip->flags |= HAMMER_INODE_DELETING;
2648 		ip->flags |= HAMMER_INODE_TRUNCATED;
2649 		ip->trunc_off = 0;
2650 		vp = NULL;
2651 		if (getvp) {
2652 			if (hammer_get_vnode(ip, &vp) != 0)
2653 				return;
2654 		}
2655 
2656 		/*
2657 		 * Final cleanup
2658 		 */
2659 		if (ip->vp) {
2660 			vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2661 			vnode_pager_setsize(ip->vp, 0);
2662 		}
2663 		if (getvp) {
2664 			vput(vp);
2665 		}
2666 	}
2667 }
2668 
2669 /*
2670  * After potentially resolving a dependancy the inode is tested
2671  * to determine whether it needs to be reflushed.
2672  */
2673 void
2674 hammer_test_inode(hammer_inode_t ip)
2675 {
2676 	if (ip->flags & HAMMER_INODE_REFLUSH) {
2677 		ip->flags &= ~HAMMER_INODE_REFLUSH;
2678 		hammer_ref(&ip->lock);
2679 		if (ip->flags & HAMMER_INODE_RESIGNAL) {
2680 			ip->flags &= ~HAMMER_INODE_RESIGNAL;
2681 			hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2682 		} else {
2683 			hammer_flush_inode(ip, 0);
2684 		}
2685 		hammer_rel_inode(ip, 0);
2686 	}
2687 }
2688 
2689 /*
2690  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
2691  * reassociated with a vp or just before it gets freed.
2692  *
2693  * Wakeup one thread blocked waiting on reclaims to complete.  Note that
2694  * the inode the thread is waiting on behalf of is a different inode then
2695  * the inode we are called with.  This is to create a pipeline.
2696  */
2697 static void
2698 hammer_inode_wakereclaims(hammer_inode_t ip)
2699 {
2700 	struct hammer_reclaim *reclaim;
2701 	hammer_mount_t hmp = ip->hmp;
2702 
2703 	if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2704 		return;
2705 
2706 	--hammer_count_reclaiming;
2707 	--hmp->inode_reclaims;
2708 	ip->flags &= ~HAMMER_INODE_RECLAIM;
2709 
2710 	if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
2711 		TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2712 		reclaim->okydoky = 1;
2713 		wakeup(reclaim);
2714 	}
2715 }
2716 
2717 /*
2718  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
2719  * inodes build up before we start blocking.
2720  *
2721  * When we block we don't care *which* inode has finished reclaiming,
2722  * as lone as one does.  This is somewhat heuristical... we also put a
2723  * cap on how long we are willing to wait.
2724  */
2725 void
2726 hammer_inode_waitreclaims(hammer_mount_t hmp)
2727 {
2728 	struct hammer_reclaim reclaim;
2729 	int delay;
2730 
2731 	if (hmp->inode_reclaims > HAMMER_RECLAIM_WAIT) {
2732 		reclaim.okydoky = 0;
2733 		TAILQ_INSERT_TAIL(&hmp->reclaim_list,
2734 				  &reclaim, entry);
2735 	} else {
2736 		reclaim.okydoky = 1;
2737 	}
2738 
2739 	if (reclaim.okydoky == 0) {
2740 		delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2741 			HAMMER_RECLAIM_WAIT;
2742 		if (delay >= 0)
2743 			tsleep(&reclaim, 0, "hmrrcm", delay + 1);
2744 		if (reclaim.okydoky == 0)
2745 			TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
2746 	}
2747 }
2748 
2749