xref: /dragonfly/sys/vfs/hammer/hammer_ondisk.c (revision 299d9671)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41 
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47 
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
52 
53 static int
54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
55 {
56 	if (vol1->vol_no < vol2->vol_no)
57 		return(-1);
58 	if (vol1->vol_no > vol2->vol_no)
59 		return(1);
60 	return(0);
61 }
62 
63 /*
64  * hammer_buffer structures are indexed via their zoneX_offset, not
65  * their zone2_offset.
66  */
67 static int
68 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
69 {
70 	if (buf1->zoneX_offset < buf2->zoneX_offset)
71 		return(-1);
72 	if (buf1->zoneX_offset > buf2->zoneX_offset)
73 		return(1);
74 	return(0);
75 }
76 
77 static int
78 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
79 {
80 	if (node1->node_offset < node2->node_offset)
81 		return(-1);
82 	if (node1->node_offset > node2->node_offset)
83 		return(1);
84 	return(0);
85 }
86 
87 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
88 	     hammer_vol_rb_compare, int32_t, vol_no);
89 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
90 	     hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
91 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
92 	     hammer_nod_rb_compare, hammer_off_t, node_offset);
93 
94 /************************************************************************
95  *				VOLUMES					*
96  ************************************************************************
97  *
98  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
99  * code on failure.  Volumes must be loaded at mount time, get_volume() will
100  * not load a new volume.
101  *
102  * Calls made to hammer_load_volume() or single-threaded
103  */
104 int
105 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
106 		      struct vnode *devvp)
107 {
108 	struct mount *mp;
109 	hammer_volume_t volume;
110 	struct hammer_volume_ondisk *ondisk;
111 	struct nlookupdata nd;
112 	struct buf *bp = NULL;
113 	int error;
114 	int ronly;
115 	int setmp = 0;
116 
117 	mp = hmp->mp;
118 	ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
119 
120 	/*
121 	 * Allocate a volume structure
122 	 */
123 	++hammer_count_volumes;
124 	volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
125 	volume->vol_name = kstrdup(volname, hmp->m_misc);
126 	volume->io.hmp = hmp;	/* bootstrap */
127 	hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
128 	volume->io.offset = 0LL;
129 	volume->io.bytes = HAMMER_BUFSIZE;
130 
131 	/*
132 	 * Get the device vnode
133 	 */
134 	if (devvp == NULL) {
135 		error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
136 		if (error == 0)
137 			error = nlookup(&nd);
138 		if (error == 0)
139 			error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
140 		nlookup_done(&nd);
141 	} else {
142 		error = 0;
143 		volume->devvp = devvp;
144 	}
145 
146 	if (error == 0) {
147 		if (vn_isdisk(volume->devvp, &error)) {
148 			error = vfs_mountedon(volume->devvp);
149 		}
150 	}
151 	if (error == 0 &&
152 	    count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
153 		error = EBUSY;
154 	}
155 	if (error == 0) {
156 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
157 		error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
158 		if (error == 0) {
159 			error = VOP_OPEN(volume->devvp,
160 					 (ronly ? FREAD : FREAD|FWRITE),
161 					 FSCRED, NULL);
162 		}
163 		vn_unlock(volume->devvp);
164 	}
165 	if (error) {
166 		hammer_free_volume(volume);
167 		return(error);
168 	}
169 	volume->devvp->v_rdev->si_mountpoint = mp;
170 	setmp = 1;
171 
172 	/*
173 	 * Extract the volume number from the volume header and do various
174 	 * sanity checks.
175 	 */
176 	error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
177 	if (error)
178 		goto late_failure;
179 	ondisk = (void *)bp->b_data;
180 	if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
181 		kprintf("hammer_mount: volume %s has an invalid header\n",
182 			volume->vol_name);
183 		error = EFTYPE;
184 		goto late_failure;
185 	}
186 	volume->vol_no = ondisk->vol_no;
187 	volume->buffer_base = ondisk->vol_buf_beg;
188 	volume->vol_flags = ondisk->vol_flags;
189 	volume->nblocks = ondisk->vol_nblocks;
190 	volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
191 				    ondisk->vol_buf_end - ondisk->vol_buf_beg);
192 	volume->maxraw_off = ondisk->vol_buf_end;
193 
194 	if (RB_EMPTY(&hmp->rb_vols_root)) {
195 		hmp->fsid = ondisk->vol_fsid;
196 	} else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
197 		kprintf("hammer_mount: volume %s's fsid does not match "
198 			"other volumes\n", volume->vol_name);
199 		error = EFTYPE;
200 		goto late_failure;
201 	}
202 
203 	/*
204 	 * Insert the volume structure into the red-black tree.
205 	 */
206 	if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
207 		kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
208 			volume->vol_name, volume->vol_no);
209 		error = EEXIST;
210 	}
211 
212 	/*
213 	 * Set the root volume .  HAMMER special cases rootvol the structure.
214 	 * We do not hold a ref because this would prevent related I/O
215 	 * from being flushed.
216 	 */
217 	if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
218 		hmp->rootvol = volume;
219 		hmp->nvolumes = ondisk->vol_count;
220 		if (bp) {
221 			brelse(bp);
222 			bp = NULL;
223 		}
224 		hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
225 			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
226 		hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
227 			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
228 	}
229 late_failure:
230 	if (bp)
231 		brelse(bp);
232 	if (error) {
233 		/*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
234 		if (setmp)
235 			volume->devvp->v_rdev->si_mountpoint = NULL;
236 		VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
237 		hammer_free_volume(volume);
238 	}
239 	return (error);
240 }
241 
242 /*
243  * This is called for each volume when updating the mount point from
244  * read-write to read-only or vise-versa.
245  */
246 int
247 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
248 {
249 	if (volume->devvp) {
250 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
251 		if (volume->io.hmp->ronly) {
252 			/* do not call vinvalbuf */
253 			VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
254 			VOP_CLOSE(volume->devvp, FREAD|FWRITE);
255 		} else {
256 			/* do not call vinvalbuf */
257 			VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
258 			VOP_CLOSE(volume->devvp, FREAD);
259 		}
260 		vn_unlock(volume->devvp);
261 	}
262 	return(0);
263 }
264 
265 /*
266  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
267  * so returns -1 on failure.
268  */
269 int
270 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
271 {
272 	hammer_mount_t hmp = volume->io.hmp;
273 	int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
274 	struct buf *bp;
275 
276 	/*
277 	 * Clean up the root volume pointer, which is held unlocked in hmp.
278 	 */
279 	if (hmp->rootvol == volume)
280 		hmp->rootvol = NULL;
281 
282 	/*
283 	 * We must not flush a dirty buffer to disk on umount.  It should
284 	 * have already been dealt with by the flusher, or we may be in
285 	 * catastrophic failure.
286 	 */
287 	hammer_io_clear_modify(&volume->io, 1);
288 	volume->io.waitdep = 1;
289 	bp = hammer_io_release(&volume->io, 1);
290 
291 	/*
292 	 * Clean up the persistent ref ioerror might have on the volume
293 	 */
294 	if (volume->io.ioerror) {
295 		volume->io.ioerror = 0;
296 		hammer_unref(&volume->io.lock);
297 	}
298 
299 	/*
300 	 * There should be no references on the volume, no clusters, and
301 	 * no super-clusters.
302 	 */
303 	KKASSERT(volume->io.lock.refs == 0);
304 	if (bp)
305 		brelse(bp);
306 
307 	volume->ondisk = NULL;
308 	if (volume->devvp) {
309 		if (volume->devvp->v_rdev &&
310 		    volume->devvp->v_rdev->si_mountpoint == hmp->mp
311 		) {
312 			volume->devvp->v_rdev->si_mountpoint = NULL;
313 		}
314 		if (ronly) {
315 			/*
316 			 * Make sure we don't sync anything to disk if we
317 			 * are in read-only mode (1) or critically-errored
318 			 * (2).  Note that there may be dirty buffers in
319 			 * normal read-only mode from crash recovery.
320 			 */
321 			vinvalbuf(volume->devvp, 0, 0, 0);
322 			VOP_CLOSE(volume->devvp, FREAD);
323 		} else {
324 			/*
325 			 * Normal termination, save any dirty buffers
326 			 * (XXX there really shouldn't be any).
327 			 */
328 			vinvalbuf(volume->devvp, V_SAVE, 0, 0);
329 			VOP_CLOSE(volume->devvp, FREAD|FWRITE);
330 		}
331 	}
332 
333 	/*
334 	 * Destroy the structure
335 	 */
336 	RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
337 	hammer_free_volume(volume);
338 	return(0);
339 }
340 
341 static
342 void
343 hammer_free_volume(hammer_volume_t volume)
344 {
345 	hammer_mount_t hmp = volume->io.hmp;
346 
347 	if (volume->vol_name) {
348 		kfree(volume->vol_name, hmp->m_misc);
349 		volume->vol_name = NULL;
350 	}
351 	if (volume->devvp) {
352 		vrele(volume->devvp);
353 		volume->devvp = NULL;
354 	}
355 	--hammer_count_volumes;
356 	kfree(volume, hmp->m_misc);
357 }
358 
359 /*
360  * Get a HAMMER volume.  The volume must already exist.
361  */
362 hammer_volume_t
363 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
364 {
365 	struct hammer_volume *volume;
366 
367 	/*
368 	 * Locate the volume structure
369 	 */
370 	volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
371 	if (volume == NULL) {
372 		*errorp = ENOENT;
373 		return(NULL);
374 	}
375 	hammer_ref(&volume->io.lock);
376 
377 	/*
378 	 * Deal with on-disk info
379 	 */
380 	if (volume->ondisk == NULL || volume->io.loading) {
381 		*errorp = hammer_load_volume(volume);
382 		if (*errorp) {
383 			hammer_rel_volume(volume, 1);
384 			volume = NULL;
385 		}
386 	} else {
387 		*errorp = 0;
388 	}
389 	return(volume);
390 }
391 
392 int
393 hammer_ref_volume(hammer_volume_t volume)
394 {
395 	int error;
396 
397 	hammer_ref(&volume->io.lock);
398 
399 	/*
400 	 * Deal with on-disk info
401 	 */
402 	if (volume->ondisk == NULL || volume->io.loading) {
403 		error = hammer_load_volume(volume);
404 		if (error)
405 			hammer_rel_volume(volume, 1);
406 	} else {
407 		error = 0;
408 	}
409 	return (error);
410 }
411 
412 hammer_volume_t
413 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
414 {
415 	hammer_volume_t volume;
416 
417 	volume = hmp->rootvol;
418 	KKASSERT(volume != NULL);
419 	hammer_ref(&volume->io.lock);
420 
421 	/*
422 	 * Deal with on-disk info
423 	 */
424 	if (volume->ondisk == NULL || volume->io.loading) {
425 		*errorp = hammer_load_volume(volume);
426 		if (*errorp) {
427 			hammer_rel_volume(volume, 1);
428 			volume = NULL;
429 		}
430 	} else {
431 		*errorp = 0;
432 	}
433 	return (volume);
434 }
435 
436 /*
437  * Load a volume's on-disk information.  The volume must be referenced and
438  * not locked.  We temporarily acquire an exclusive lock to interlock
439  * against releases or multiple get's.
440  */
441 static int
442 hammer_load_volume(hammer_volume_t volume)
443 {
444 	int error;
445 
446 	++volume->io.loading;
447 	hammer_lock_ex(&volume->io.lock);
448 
449 	if (volume->ondisk == NULL) {
450 		error = hammer_io_read(volume->devvp, &volume->io,
451 				       volume->maxraw_off);
452 		if (error == 0)
453 			volume->ondisk = (void *)volume->io.bp->b_data;
454 	} else {
455 		error = 0;
456 	}
457 	--volume->io.loading;
458 	hammer_unlock(&volume->io.lock);
459 	return(error);
460 }
461 
462 /*
463  * Release a volume.  Call hammer_io_release on the last reference.  We have
464  * to acquire an exclusive lock to interlock against volume->ondisk tests
465  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
466  * lock to be held.
467  *
468  * Volumes are not unloaded from memory during normal operation.
469  */
470 void
471 hammer_rel_volume(hammer_volume_t volume, int flush)
472 {
473 	struct buf *bp = NULL;
474 
475 	crit_enter();
476 	if (volume->io.lock.refs == 1) {
477 		++volume->io.loading;
478 		hammer_lock_ex(&volume->io.lock);
479 		if (volume->io.lock.refs == 1) {
480 			volume->ondisk = NULL;
481 			bp = hammer_io_release(&volume->io, flush);
482 		}
483 		--volume->io.loading;
484 		hammer_unlock(&volume->io.lock);
485 	}
486 	hammer_unref(&volume->io.lock);
487 	if (bp)
488 		brelse(bp);
489 	crit_exit();
490 }
491 
492 int
493 hammer_mountcheck_volumes(struct hammer_mount *hmp)
494 {
495 	hammer_volume_t vol;
496 	int i;
497 
498 	for (i = 0; i < hmp->nvolumes; ++i) {
499 		vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
500 		if (vol == NULL)
501 			return(EINVAL);
502 	}
503 	return(0);
504 }
505 
506 /************************************************************************
507  *				BUFFERS					*
508  ************************************************************************
509  *
510  * Manage buffers.  Currently all blockmap-backed zones are direct-mapped
511  * to zone-2 buffer offsets, without a translation stage.  However, the
512  * hammer_buffer structure is indexed by its zoneX_offset, not its
513  * zone2_offset.
514  *
515  * The proper zone must be maintained throughout the code-base all the way
516  * through to the big-block allocator, or routines like hammer_del_buffers()
517  * will not be able to locate all potentially conflicting buffers.
518  */
519 hammer_buffer_t
520 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
521 		  int bytes, int isnew, int *errorp)
522 {
523 	hammer_buffer_t buffer;
524 	hammer_volume_t volume;
525 	hammer_off_t	zone2_offset;
526 	hammer_io_type_t iotype;
527 	int vol_no;
528 	int zone;
529 
530 	buf_offset &= ~HAMMER_BUFMASK64;
531 again:
532 	/*
533 	 * Shortcut if the buffer is already cached
534 	 */
535 	buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
536 	if (buffer) {
537 		if (buffer->io.lock.refs == 0)
538 			++hammer_count_refedbufs;
539 		hammer_ref(&buffer->io.lock);
540 
541 		/*
542 		 * Once refed the ondisk field will not be cleared by
543 		 * any other action.
544 		 */
545 		if (buffer->ondisk && buffer->io.loading == 0) {
546 			*errorp = 0;
547 			return(buffer);
548 		}
549 
550 		/*
551 		 * The buffer is no longer loose if it has a ref, and
552 		 * cannot become loose once it gains a ref.  Loose
553 		 * buffers will never be in a modified state.  This should
554 		 * only occur on the 0->1 transition of refs.
555 		 *
556 		 * lose_list can be modified via a biodone() interrupt.
557 		 */
558 		if (buffer->io.mod_list == &hmp->lose_list) {
559 			crit_enter();	/* biodone race against list */
560 			TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
561 				     mod_entry);
562 			crit_exit();
563 			buffer->io.mod_list = NULL;
564 			KKASSERT(buffer->io.modified == 0);
565 		}
566 		goto found;
567 	}
568 
569 	/*
570 	 * What is the buffer class?
571 	 */
572 	zone = HAMMER_ZONE_DECODE(buf_offset);
573 
574 	switch(zone) {
575 	case HAMMER_ZONE_LARGE_DATA_INDEX:
576 	case HAMMER_ZONE_SMALL_DATA_INDEX:
577 		iotype = HAMMER_STRUCTURE_DATA_BUFFER;
578 		break;
579 	case HAMMER_ZONE_UNDO_INDEX:
580 		iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
581 		break;
582 	case HAMMER_ZONE_META_INDEX:
583 	default:
584 		/*
585 		 * NOTE: inode data and directory entries are placed in this
586 		 * zone.  inode atime/mtime is updated in-place and thus
587 		 * buffers containing inodes must be synchronized as
588 		 * meta-buffers, same as buffers containing B-Tree info.
589 		 */
590 		iotype = HAMMER_STRUCTURE_META_BUFFER;
591 		break;
592 	}
593 
594 	/*
595 	 * Handle blockmap offset translations
596 	 */
597 	if (zone >= HAMMER_ZONE_BTREE_INDEX) {
598 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
599 	} else if (zone == HAMMER_ZONE_UNDO_INDEX) {
600 		zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
601 	} else {
602 		KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
603 		zone2_offset = buf_offset;
604 		*errorp = 0;
605 	}
606 	if (*errorp)
607 		return(NULL);
608 
609 	/*
610 	 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
611 	 * specifications.
612 	 */
613 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
614 		 HAMMER_ZONE_RAW_BUFFER);
615 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
616 	volume = hammer_get_volume(hmp, vol_no, errorp);
617 	if (volume == NULL)
618 		return(NULL);
619 
620 	KKASSERT(zone2_offset < volume->maxbuf_off);
621 
622 	/*
623 	 * Allocate a new buffer structure.  We will check for races later.
624 	 */
625 	++hammer_count_buffers;
626 	buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
627 			 M_WAITOK|M_ZERO|M_USE_RESERVE);
628 	buffer->zone2_offset = zone2_offset;
629 	buffer->zoneX_offset = buf_offset;
630 
631 	hammer_io_init(&buffer->io, volume, iotype);
632 	buffer->io.offset = volume->ondisk->vol_buf_beg +
633 			    (zone2_offset & HAMMER_OFF_SHORT_MASK);
634 	buffer->io.bytes = bytes;
635 	TAILQ_INIT(&buffer->clist);
636 	hammer_ref(&buffer->io.lock);
637 
638 	/*
639 	 * Insert the buffer into the RB tree and handle late collisions.
640 	 */
641 	if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
642 		hammer_unref(&buffer->io.lock);
643 		--hammer_count_buffers;
644 		kfree(buffer, hmp->m_misc);
645 		goto again;
646 	}
647 	++hammer_count_refedbufs;
648 found:
649 
650 	/*
651 	 * Deal with on-disk info and loading races.
652 	 */
653 	if (buffer->ondisk == NULL || buffer->io.loading) {
654 		*errorp = hammer_load_buffer(buffer, isnew);
655 		if (*errorp) {
656 			hammer_rel_buffer(buffer, 1);
657 			buffer = NULL;
658 		}
659 	} else {
660 		*errorp = 0;
661 	}
662 	return(buffer);
663 }
664 
665 /*
666  * This is used by the direct-read code to deal with large-data buffers
667  * created by the reblocker and mirror-write code.  The direct-read code
668  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
669  * running hammer buffers must be fully synced to disk before we can issue
670  * the direct-read.
671  *
672  * This code path is not considered critical as only the rebocker and
673  * mirror-write code will create large-data buffers via the HAMMER buffer
674  * subsystem.  They do that because they operate at the B-Tree level and
675  * do not access the vnode/inode structures.
676  */
677 void
678 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
679 {
680 	hammer_buffer_t buffer;
681 	int error;
682 
683 	KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
684 		 HAMMER_ZONE_LARGE_DATA);
685 
686 	while (bytes > 0) {
687 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
688 				   base_offset);
689 		if (buffer && (buffer->io.modified || buffer->io.running)) {
690 			error = hammer_ref_buffer(buffer);
691 			if (error == 0) {
692 				hammer_io_wait(&buffer->io);
693 				if (buffer->io.modified) {
694 					hammer_io_write_interlock(&buffer->io);
695 					hammer_io_flush(&buffer->io);
696 					hammer_io_done_interlock(&buffer->io);
697 					hammer_io_wait(&buffer->io);
698 				}
699 				hammer_rel_buffer(buffer, 0);
700 			}
701 		}
702 		base_offset += HAMMER_BUFSIZE;
703 		bytes -= HAMMER_BUFSIZE;
704 	}
705 }
706 
707 /*
708  * Destroy all buffers covering the specified zoneX offset range.  This
709  * is called when the related blockmap layer2 entry is freed or when
710  * a direct write bypasses our buffer/buffer-cache subsystem.
711  *
712  * The buffers may be referenced by the caller itself.  Setting reclaim
713  * will cause the buffer to be destroyed when it's ref count reaches zero.
714  *
715  * Return 0 on success, EAGAIN if some buffers could not be destroyed due
716  * to additional references held by other threads, or some other (typically
717  * fatal) error.
718  */
719 int
720 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
721 		   hammer_off_t zone2_offset, int bytes,
722 		   int report_conflicts)
723 {
724 	hammer_buffer_t buffer;
725 	hammer_volume_t volume;
726 	int vol_no;
727 	int error;
728 	int ret_error;
729 
730 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
731 	volume = hammer_get_volume(hmp, vol_no, &ret_error);
732 	KKASSERT(ret_error == 0);
733 
734 	while (bytes > 0) {
735 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
736 				   base_offset);
737 		if (buffer) {
738 			error = hammer_ref_buffer(buffer);
739 			if (error == 0 && buffer->io.lock.refs != 1) {
740 				error = EAGAIN;
741 				hammer_rel_buffer(buffer, 0);
742 			}
743 			if (error == 0) {
744 				KKASSERT(buffer->zone2_offset == zone2_offset);
745 				hammer_io_clear_modify(&buffer->io, 1);
746 				buffer->io.reclaim = 1;
747 				buffer->io.waitdep = 1;
748 				KKASSERT(buffer->io.volume == volume);
749 				hammer_rel_buffer(buffer, 0);
750 			}
751 		} else {
752 			error = hammer_io_inval(volume, zone2_offset);
753 		}
754 		if (error) {
755 			ret_error = error;
756 			if (report_conflicts || (hammer_debug_general & 0x8000))
757 				kprintf("hammer_del_buffers: unable to invalidate %016llx buffer=%p rep=%d\n", base_offset, buffer, report_conflicts);
758 		}
759 		base_offset += HAMMER_BUFSIZE;
760 		zone2_offset += HAMMER_BUFSIZE;
761 		bytes -= HAMMER_BUFSIZE;
762 	}
763 	hammer_rel_volume(volume, 0);
764 	return (ret_error);
765 }
766 
767 static int
768 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
769 {
770 	hammer_volume_t volume;
771 	int error;
772 
773 	/*
774 	 * Load the buffer's on-disk info
775 	 */
776 	volume = buffer->io.volume;
777 	++buffer->io.loading;
778 	hammer_lock_ex(&buffer->io.lock);
779 
780 	if (hammer_debug_io & 0x0001) {
781 		kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
782 			buffer->zoneX_offset, buffer->zone2_offset, isnew,
783 			buffer->ondisk);
784 	}
785 
786 	if (buffer->ondisk == NULL) {
787 		if (isnew) {
788 			error = hammer_io_new(volume->devvp, &buffer->io);
789 		} else {
790 			error = hammer_io_read(volume->devvp, &buffer->io,
791 					       volume->maxraw_off);
792 		}
793 		if (error == 0)
794 			buffer->ondisk = (void *)buffer->io.bp->b_data;
795 	} else if (isnew) {
796 		error = hammer_io_new(volume->devvp, &buffer->io);
797 	} else {
798 		error = 0;
799 	}
800 	--buffer->io.loading;
801 	hammer_unlock(&buffer->io.lock);
802 	return (error);
803 }
804 
805 /*
806  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
807  * This routine is only called during unmount.
808  */
809 int
810 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
811 {
812 	/*
813 	 * Clean up the persistent ref ioerror might have on the buffer
814 	 * and acquire a ref (steal ioerror's if we can).
815 	 */
816 	if (buffer->io.ioerror) {
817 		buffer->io.ioerror = 0;
818 	} else {
819 		if (buffer->io.lock.refs == 0)
820 			++hammer_count_refedbufs;
821 		hammer_ref(&buffer->io.lock);
822 	}
823 
824 	/*
825 	 * We must not flush a dirty buffer to disk on umount.  It should
826 	 * have already been dealt with by the flusher, or we may be in
827 	 * catastrophic failure.
828 	 */
829 	hammer_io_clear_modify(&buffer->io, 1);
830 	hammer_flush_buffer_nodes(buffer);
831 	KKASSERT(buffer->io.lock.refs == 1);
832 	hammer_rel_buffer(buffer, 2);
833 	return(0);
834 }
835 
836 /*
837  * Reference a buffer that is either already referenced or via a specially
838  * handled pointer (aka cursor->buffer).
839  */
840 int
841 hammer_ref_buffer(hammer_buffer_t buffer)
842 {
843 	int error;
844 
845 	if (buffer->io.lock.refs == 0)
846 		++hammer_count_refedbufs;
847 	hammer_ref(&buffer->io.lock);
848 
849 	/*
850 	 * At this point a biodone() will not touch the buffer other then
851 	 * incidental bits.  However, lose_list can be modified via
852 	 * a biodone() interrupt.
853 	 *
854 	 * No longer loose
855 	 */
856 	if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
857 		crit_enter();
858 		TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
859 		buffer->io.mod_list = NULL;
860 		crit_exit();
861 	}
862 
863 	if (buffer->ondisk == NULL || buffer->io.loading) {
864 		error = hammer_load_buffer(buffer, 0);
865 		if (error) {
866 			hammer_rel_buffer(buffer, 1);
867 			/*
868 			 * NOTE: buffer pointer can become stale after
869 			 * the above release.
870 			 */
871 		}
872 	} else {
873 		error = 0;
874 	}
875 	return(error);
876 }
877 
878 /*
879  * Release a buffer.  We have to deal with several places where
880  * another thread can ref the buffer.
881  *
882  * Only destroy the structure itself if the related buffer cache buffer
883  * was disassociated from it.  This ties the management of the structure
884  * to the buffer cache subsystem.  buffer->ondisk determines whether the
885  * embedded io is referenced or not.
886  */
887 void
888 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
889 {
890 	hammer_volume_t volume;
891 	hammer_mount_t hmp;
892 	struct buf *bp = NULL;
893 	int freeme = 0;
894 
895 	hmp = buffer->io.hmp;
896 
897 	crit_enter();
898 	if (buffer->io.lock.refs == 1) {
899 		++buffer->io.loading;	/* force interlock check */
900 		hammer_lock_ex(&buffer->io.lock);
901 		if (buffer->io.lock.refs == 1) {
902 			bp = hammer_io_release(&buffer->io, flush);
903 
904 			if (buffer->io.lock.refs == 1)
905 				--hammer_count_refedbufs;
906 
907 			if (buffer->io.bp == NULL &&
908 			    buffer->io.lock.refs == 1) {
909 				/*
910 				 * Final cleanup
911 				 *
912 				 * NOTE: It is impossible for any associated
913 				 * B-Tree nodes to have refs if the buffer
914 				 * has no additional refs.
915 				 */
916 				RB_REMOVE(hammer_buf_rb_tree,
917 					  &buffer->io.hmp->rb_bufs_root,
918 					  buffer);
919 				volume = buffer->io.volume;
920 				buffer->io.volume = NULL; /* sanity */
921 				hammer_rel_volume(volume, 0);
922 				hammer_io_clear_modlist(&buffer->io);
923 				hammer_flush_buffer_nodes(buffer);
924 				KKASSERT(TAILQ_EMPTY(&buffer->clist));
925 				freeme = 1;
926 			}
927 		}
928 		--buffer->io.loading;
929 		hammer_unlock(&buffer->io.lock);
930 	}
931 	hammer_unref(&buffer->io.lock);
932 	crit_exit();
933 	if (bp)
934 		brelse(bp);
935 	if (freeme) {
936 		--hammer_count_buffers;
937 		kfree(buffer, hmp->m_misc);
938 	}
939 }
940 
941 /*
942  * Access the filesystem buffer containing the specified hammer offset.
943  * buf_offset is a conglomeration of the volume number and vol_buf_beg
944  * relative buffer offset.  It must also have bit 55 set to be valid.
945  * (see hammer_off_t in hammer_disk.h).
946  *
947  * Any prior buffer in *bufferp will be released and replaced by the
948  * requested buffer.
949  *
950  * NOTE: The buffer is indexed via its zoneX_offset but we allow the
951  * passed cached *bufferp to match against either zoneX or zone2.
952  */
953 static __inline
954 void *
955 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
956 	     int *errorp, struct hammer_buffer **bufferp)
957 {
958 	hammer_buffer_t buffer;
959 	int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
960 
961 	buf_offset &= ~HAMMER_BUFMASK64;
962 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
963 
964 	buffer = *bufferp;
965 	if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
966 			       buffer->zoneX_offset != buf_offset)) {
967 		if (buffer)
968 			hammer_rel_buffer(buffer, 0);
969 		buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
970 		*bufferp = buffer;
971 	} else {
972 		*errorp = 0;
973 	}
974 
975 	/*
976 	 * Return a pointer to the buffer data.
977 	 */
978 	if (buffer == NULL)
979 		return(NULL);
980 	else
981 		return((char *)buffer->ondisk + xoff);
982 }
983 
984 void *
985 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
986 	     int *errorp, struct hammer_buffer **bufferp)
987 {
988 	return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
989 }
990 
991 void *
992 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
993 	         int *errorp, struct hammer_buffer **bufferp)
994 {
995 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
996 	return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
997 }
998 
999 /*
1000  * Access the filesystem buffer containing the specified hammer offset.
1001  * No disk read operation occurs.  The result buffer may contain garbage.
1002  *
1003  * Any prior buffer in *bufferp will be released and replaced by the
1004  * requested buffer.
1005  *
1006  * This function marks the buffer dirty but does not increment its
1007  * modify_refs count.
1008  */
1009 static __inline
1010 void *
1011 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1012 	     int *errorp, struct hammer_buffer **bufferp)
1013 {
1014 	hammer_buffer_t buffer;
1015 	int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1016 
1017 	buf_offset &= ~HAMMER_BUFMASK64;
1018 
1019 	buffer = *bufferp;
1020 	if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1021 			       buffer->zoneX_offset != buf_offset)) {
1022 		if (buffer)
1023 			hammer_rel_buffer(buffer, 0);
1024 		buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
1025 		*bufferp = buffer;
1026 	} else {
1027 		*errorp = 0;
1028 	}
1029 
1030 	/*
1031 	 * Return a pointer to the buffer data.
1032 	 */
1033 	if (buffer == NULL)
1034 		return(NULL);
1035 	else
1036 		return((char *)buffer->ondisk + xoff);
1037 }
1038 
1039 void *
1040 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1041 	     int *errorp, struct hammer_buffer **bufferp)
1042 {
1043 	return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1044 }
1045 
1046 void *
1047 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1048 		int *errorp, struct hammer_buffer **bufferp)
1049 {
1050 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1051 	return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1052 }
1053 
1054 /************************************************************************
1055  *				NODES					*
1056  ************************************************************************
1057  *
1058  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1059  * method used by the HAMMER filesystem.
1060  *
1061  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1062  * associated with its buffer, and will only referenced the buffer while
1063  * the node itself is referenced.
1064  *
1065  * A hammer_node can also be passively associated with other HAMMER
1066  * structures, such as inodes, while retaining 0 references.  These
1067  * associations can be cleared backwards using a pointer-to-pointer in
1068  * the hammer_node.
1069  *
1070  * This allows the HAMMER implementation to cache hammer_nodes long-term
1071  * and short-cut a great deal of the infrastructure's complexity.  In
1072  * most cases a cached node can be reacquired without having to dip into
1073  * either the buffer or cluster management code.
1074  *
1075  * The caller must pass a referenced cluster on call and will retain
1076  * ownership of the reference on return.  The node will acquire its own
1077  * additional references, if necessary.
1078  */
1079 hammer_node_t
1080 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1081 		int isnew, int *errorp)
1082 {
1083 	hammer_mount_t hmp = trans->hmp;
1084 	hammer_node_t node;
1085 
1086 	KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1087 
1088 	/*
1089 	 * Locate the structure, allocating one if necessary.
1090 	 */
1091 again:
1092 	node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1093 	if (node == NULL) {
1094 		++hammer_count_nodes;
1095 		node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1096 		node->node_offset = node_offset;
1097 		node->hmp = hmp;
1098 		TAILQ_INIT(&node->cursor_list);
1099 		TAILQ_INIT(&node->cache_list);
1100 		if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1101 			--hammer_count_nodes;
1102 			kfree(node, hmp->m_misc);
1103 			goto again;
1104 		}
1105 	}
1106 	hammer_ref(&node->lock);
1107 	if (node->ondisk) {
1108 		*errorp = 0;
1109 	} else {
1110 		*errorp = hammer_load_node(node, isnew);
1111 		trans->flags |= HAMMER_TRANSF_DIDIO;
1112 	}
1113 	if (*errorp) {
1114 		hammer_rel_node(node);
1115 		node = NULL;
1116 	}
1117 	return(node);
1118 }
1119 
1120 /*
1121  * Reference an already-referenced node.
1122  */
1123 void
1124 hammer_ref_node(hammer_node_t node)
1125 {
1126 	KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1127 	hammer_ref(&node->lock);
1128 }
1129 
1130 /*
1131  * Load a node's on-disk data reference.
1132  */
1133 static int
1134 hammer_load_node(hammer_node_t node, int isnew)
1135 {
1136 	hammer_buffer_t buffer;
1137 	hammer_off_t buf_offset;
1138 	int error;
1139 
1140 	error = 0;
1141 	++node->loading;
1142 	hammer_lock_ex(&node->lock);
1143 	if (node->ondisk == NULL) {
1144 		/*
1145 		 * This is a little confusing but the jist is that
1146 		 * node->buffer determines whether the node is on
1147 		 * the buffer's clist and node->ondisk determines
1148 		 * whether the buffer is referenced.
1149 		 *
1150 		 * We could be racing a buffer release, in which case
1151 		 * node->buffer may become NULL while we are blocked
1152 		 * referencing the buffer.
1153 		 */
1154 		if ((buffer = node->buffer) != NULL) {
1155 			error = hammer_ref_buffer(buffer);
1156 			if (error == 0 && node->buffer == NULL) {
1157 				TAILQ_INSERT_TAIL(&buffer->clist,
1158 						  node, entry);
1159 				node->buffer = buffer;
1160 			}
1161 		} else {
1162 			buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1163 			buffer = hammer_get_buffer(node->hmp, buf_offset,
1164 						   HAMMER_BUFSIZE, 0, &error);
1165 			if (buffer) {
1166 				KKASSERT(error == 0);
1167 				TAILQ_INSERT_TAIL(&buffer->clist,
1168 						  node, entry);
1169 				node->buffer = buffer;
1170 			}
1171 		}
1172 		if (error)
1173 			goto failed;
1174 		node->ondisk = (void *)((char *)buffer->ondisk +
1175 				        (node->node_offset & HAMMER_BUFMASK));
1176 		if (isnew == 0 &&
1177 		    (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1178 			if (hammer_crc_test_btree(node->ondisk) == 0)
1179 				Debugger("CRC FAILED: B-TREE NODE");
1180 			node->flags |= HAMMER_NODE_CRCGOOD;
1181 		}
1182 	}
1183 failed:
1184 	--node->loading;
1185 	hammer_unlock(&node->lock);
1186 	return (error);
1187 }
1188 
1189 /*
1190  * Safely reference a node, interlock against flushes via the IO subsystem.
1191  */
1192 hammer_node_t
1193 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1194 		     int *errorp)
1195 {
1196 	hammer_node_t node;
1197 
1198 	node = cache->node;
1199 	if (node != NULL) {
1200 		hammer_ref(&node->lock);
1201 		if (node->ondisk)
1202 			*errorp = 0;
1203 		else
1204 			*errorp = hammer_load_node(node, 0);
1205 		if (*errorp) {
1206 			hammer_rel_node(node);
1207 			node = NULL;
1208 		}
1209 	} else {
1210 		*errorp = ENOENT;
1211 	}
1212 	return(node);
1213 }
1214 
1215 /*
1216  * Release a hammer_node.  On the last release the node dereferences
1217  * its underlying buffer and may or may not be destroyed.
1218  */
1219 void
1220 hammer_rel_node(hammer_node_t node)
1221 {
1222 	hammer_buffer_t buffer;
1223 
1224 	/*
1225 	 * If this isn't the last ref just decrement the ref count and
1226 	 * return.
1227 	 */
1228 	if (node->lock.refs > 1) {
1229 		hammer_unref(&node->lock);
1230 		return;
1231 	}
1232 
1233 	/*
1234 	 * If there is no ondisk info or no buffer the node failed to load,
1235 	 * remove the last reference and destroy the node.
1236 	 */
1237 	if (node->ondisk == NULL) {
1238 		hammer_unref(&node->lock);
1239 		hammer_flush_node(node);
1240 		/* node is stale now */
1241 		return;
1242 	}
1243 
1244 	/*
1245 	 * Do not disassociate the node from the buffer if it represents
1246 	 * a modified B-Tree node that still needs its crc to be generated.
1247 	 */
1248 	if (node->flags & HAMMER_NODE_NEEDSCRC)
1249 		return;
1250 
1251 	/*
1252 	 * Do final cleanups and then either destroy the node and leave it
1253 	 * passively cached.  The buffer reference is removed regardless.
1254 	 */
1255 	buffer = node->buffer;
1256 	node->ondisk = NULL;
1257 
1258 	if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1259 		hammer_unref(&node->lock);
1260 		hammer_rel_buffer(buffer, 0);
1261 		return;
1262 	}
1263 
1264 	/*
1265 	 * Destroy the node.
1266 	 */
1267 	hammer_unref(&node->lock);
1268 	hammer_flush_node(node);
1269 	/* node is stale */
1270 	hammer_rel_buffer(buffer, 0);
1271 }
1272 
1273 /*
1274  * Free space on-media associated with a B-Tree node.
1275  */
1276 void
1277 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1278 {
1279 	KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1280 	node->flags |= HAMMER_NODE_DELETED;
1281 	hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1282 }
1283 
1284 /*
1285  * Passively cache a referenced hammer_node.  The caller may release
1286  * the node on return.
1287  */
1288 void
1289 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1290 {
1291 	/*
1292 	 * If the node doesn't exist, or is being deleted, don't cache it!
1293 	 *
1294 	 * The node can only ever be NULL in the I/O failure path.
1295 	 */
1296 	if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1297 		return;
1298 	if (cache->node == node)
1299 		return;
1300 	while (cache->node)
1301 		hammer_uncache_node(cache);
1302 	if (node->flags & HAMMER_NODE_DELETED)
1303 		return;
1304 	cache->node = node;
1305 	TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1306 }
1307 
1308 void
1309 hammer_uncache_node(hammer_node_cache_t cache)
1310 {
1311 	hammer_node_t node;
1312 
1313 	if ((node = cache->node) != NULL) {
1314 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1315 		cache->node = NULL;
1316 		if (TAILQ_EMPTY(&node->cache_list))
1317 			hammer_flush_node(node);
1318 	}
1319 }
1320 
1321 /*
1322  * Remove a node's cache references and destroy the node if it has no
1323  * other references or backing store.
1324  */
1325 void
1326 hammer_flush_node(hammer_node_t node)
1327 {
1328 	hammer_node_cache_t cache;
1329 	hammer_buffer_t buffer;
1330 	hammer_mount_t hmp = node->hmp;
1331 
1332 	while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1333 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1334 		cache->node = NULL;
1335 	}
1336 	if (node->lock.refs == 0 && node->ondisk == NULL) {
1337 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1338 		RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1339 		if ((buffer = node->buffer) != NULL) {
1340 			node->buffer = NULL;
1341 			TAILQ_REMOVE(&buffer->clist, node, entry);
1342 			/* buffer is unreferenced because ondisk is NULL */
1343 		}
1344 		--hammer_count_nodes;
1345 		kfree(node, hmp->m_misc);
1346 	}
1347 }
1348 
1349 /*
1350  * Flush passively cached B-Tree nodes associated with this buffer.
1351  * This is only called when the buffer is about to be destroyed, so
1352  * none of the nodes should have any references.  The buffer is locked.
1353  *
1354  * We may be interlocked with the buffer.
1355  */
1356 void
1357 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1358 {
1359 	hammer_node_t node;
1360 
1361 	while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1362 		KKASSERT(node->ondisk == NULL);
1363 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1364 
1365 		if (node->lock.refs == 0) {
1366 			hammer_ref(&node->lock);
1367 			node->flags |= HAMMER_NODE_FLUSH;
1368 			hammer_rel_node(node);
1369 		} else {
1370 			KKASSERT(node->loading != 0);
1371 			KKASSERT(node->buffer != NULL);
1372 			buffer = node->buffer;
1373 			node->buffer = NULL;
1374 			TAILQ_REMOVE(&buffer->clist, node, entry);
1375 			/* buffer is unreferenced because ondisk is NULL */
1376 		}
1377 	}
1378 }
1379 
1380 
1381 /************************************************************************
1382  *				ALLOCATORS				*
1383  ************************************************************************/
1384 
1385 /*
1386  * Allocate a B-Tree node.
1387  */
1388 hammer_node_t
1389 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1390 {
1391 	hammer_buffer_t buffer = NULL;
1392 	hammer_node_t node = NULL;
1393 	hammer_off_t node_offset;
1394 
1395 	node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1396 					    sizeof(struct hammer_node_ondisk),
1397 					    errorp);
1398 	if (*errorp == 0) {
1399 		node = hammer_get_node(trans, node_offset, 1, errorp);
1400 		hammer_modify_node_noundo(trans, node);
1401 		bzero(node->ondisk, sizeof(*node->ondisk));
1402 		hammer_modify_node_done(node);
1403 	}
1404 	if (buffer)
1405 		hammer_rel_buffer(buffer, 0);
1406 	return(node);
1407 }
1408 
1409 /*
1410  * Allocate data.  If the address of a data buffer is supplied then
1411  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1412  * will be set to the related buffer.  The caller must release it when
1413  * finally done.  The initial *data_bufferp should be set to NULL by
1414  * the caller.
1415  *
1416  * The caller is responsible for making hammer_modify*() calls on the
1417  * *data_bufferp.
1418  */
1419 void *
1420 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1421 		  u_int16_t rec_type, hammer_off_t *data_offsetp,
1422 		  struct hammer_buffer **data_bufferp, int *errorp)
1423 {
1424 	void *data;
1425 	int zone;
1426 
1427 	/*
1428 	 * Allocate data
1429 	 */
1430 	if (data_len) {
1431 		switch(rec_type) {
1432 		case HAMMER_RECTYPE_INODE:
1433 		case HAMMER_RECTYPE_DIRENTRY:
1434 		case HAMMER_RECTYPE_EXT:
1435 		case HAMMER_RECTYPE_FIX:
1436 		case HAMMER_RECTYPE_PFS:
1437 			zone = HAMMER_ZONE_META_INDEX;
1438 			break;
1439 		case HAMMER_RECTYPE_DATA:
1440 		case HAMMER_RECTYPE_DB:
1441 			if (data_len <= HAMMER_BUFSIZE / 2) {
1442 				zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1443 			} else {
1444 				data_len = (data_len + HAMMER_BUFMASK) &
1445 					   ~HAMMER_BUFMASK;
1446 				zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1447 			}
1448 			break;
1449 		default:
1450 			panic("hammer_alloc_data: rec_type %04x unknown",
1451 			      rec_type);
1452 			zone = 0;	/* NOT REACHED */
1453 			break;
1454 		}
1455 		*data_offsetp = hammer_blockmap_alloc(trans, zone,
1456 						      data_len, errorp);
1457 	} else {
1458 		*data_offsetp = 0;
1459 	}
1460 	if (*errorp == 0 && data_bufferp) {
1461 		if (data_len) {
1462 			data = hammer_bread_ext(trans->hmp, *data_offsetp,
1463 						data_len, errorp, data_bufferp);
1464 		} else {
1465 			data = NULL;
1466 		}
1467 	} else {
1468 		data = NULL;
1469 	}
1470 	return(data);
1471 }
1472 
1473 /*
1474  * Sync dirty buffers to the media and clean-up any loose ends.
1475  *
1476  * These functions do not start the flusher going, they simply
1477  * queue everything up to the flusher.
1478  */
1479 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1480 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1481 
1482 int
1483 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1484 {
1485 	struct hammer_sync_info info;
1486 
1487 	info.error = 0;
1488 	info.waitfor = waitfor;
1489 	if (waitfor == MNT_WAIT) {
1490 		vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1491 			      hammer_sync_scan1, hammer_sync_scan2, &info);
1492 	} else {
1493 		vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1494 			      hammer_sync_scan1, hammer_sync_scan2, &info);
1495 	}
1496 	return(info.error);
1497 }
1498 
1499 /*
1500  * Filesystem sync.  If doing a synchronous sync make a second pass on
1501  * the vnodes in case any were already flushing during the first pass,
1502  * and activate the flusher twice (the second time brings the UNDO FIFO's
1503  * start position up to the end position after the first call).
1504  */
1505 int
1506 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1507 {
1508 	struct hammer_sync_info info;
1509 
1510 	info.error = 0;
1511 	info.waitfor = MNT_NOWAIT;
1512 	vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1513 		      hammer_sync_scan1, hammer_sync_scan2, &info);
1514 	if (info.error == 0 && waitfor == MNT_WAIT) {
1515 		info.waitfor = waitfor;
1516 		vmntvnodescan(hmp->mp, VMSC_GETVP,
1517 			      hammer_sync_scan1, hammer_sync_scan2, &info);
1518 	}
1519         if (waitfor == MNT_WAIT) {
1520                 hammer_flusher_sync(hmp);
1521                 hammer_flusher_sync(hmp);
1522 	} else {
1523                 hammer_flusher_async(hmp, NULL);
1524                 hammer_flusher_async(hmp, NULL);
1525 	}
1526 	return(info.error);
1527 }
1528 
1529 static int
1530 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1531 {
1532 	struct hammer_inode *ip;
1533 
1534 	ip = VTOI(vp);
1535 	if (vp->v_type == VNON || ip == NULL ||
1536 	    ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1537 	     RB_EMPTY(&vp->v_rbdirty_tree))) {
1538 		return(-1);
1539 	}
1540 	return(0);
1541 }
1542 
1543 static int
1544 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1545 {
1546 	struct hammer_sync_info *info = data;
1547 	struct hammer_inode *ip;
1548 	int error;
1549 
1550 	ip = VTOI(vp);
1551 	if (vp->v_type == VNON || vp->v_type == VBAD ||
1552 	    ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1553 	     RB_EMPTY(&vp->v_rbdirty_tree))) {
1554 		return(0);
1555 	}
1556 	error = VOP_FSYNC(vp, MNT_NOWAIT);
1557 	if (error)
1558 		info->error = error;
1559 	return(0);
1560 }
1561 
1562