xref: /dragonfly/sys/vfs/hammer/hammer_ondisk.c (revision c541a65d)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41 
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47 
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
52 
53 static int
54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
55 {
56 	if (vol1->vol_no < vol2->vol_no)
57 		return(-1);
58 	if (vol1->vol_no > vol2->vol_no)
59 		return(1);
60 	return(0);
61 }
62 
63 static int
64 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
65 {
66 	if (buf1->zoneX_offset < buf2->zoneX_offset)
67 		return(-1);
68 	if (buf1->zoneX_offset > buf2->zoneX_offset)
69 		return(1);
70 	return(0);
71 }
72 
73 static int
74 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
75 {
76 	if (node1->node_offset < node2->node_offset)
77 		return(-1);
78 	if (node1->node_offset > node2->node_offset)
79 		return(1);
80 	return(0);
81 }
82 
83 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
84 	     hammer_vol_rb_compare, int32_t, vol_no);
85 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
86 	     hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
87 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
88 	     hammer_nod_rb_compare, hammer_off_t, node_offset);
89 
90 /************************************************************************
91  *				VOLUMES					*
92  ************************************************************************
93  *
94  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
95  * code on failure.  Volumes must be loaded at mount time, get_volume() will
96  * not load a new volume.
97  *
98  * Calls made to hammer_load_volume() or single-threaded
99  */
100 int
101 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
102 		      struct vnode *devvp)
103 {
104 	struct mount *mp;
105 	hammer_volume_t volume;
106 	struct hammer_volume_ondisk *ondisk;
107 	struct nlookupdata nd;
108 	struct buf *bp = NULL;
109 	int error;
110 	int ronly;
111 	int setmp = 0;
112 
113 	mp = hmp->mp;
114 	ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
115 
116 	/*
117 	 * Allocate a volume structure
118 	 */
119 	++hammer_count_volumes;
120 	volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
121 	volume->vol_name = kstrdup(volname, hmp->m_misc);
122 	volume->io.hmp = hmp;	/* bootstrap */
123 	hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
124 	volume->io.offset = 0LL;
125 	volume->io.bytes = HAMMER_BUFSIZE;
126 
127 	/*
128 	 * Get the device vnode
129 	 */
130 	if (devvp == NULL) {
131 		error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
132 		if (error == 0)
133 			error = nlookup(&nd);
134 		if (error == 0)
135 			error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
136 		nlookup_done(&nd);
137 	} else {
138 		error = 0;
139 		volume->devvp = devvp;
140 	}
141 
142 	if (error == 0) {
143 		if (vn_isdisk(volume->devvp, &error)) {
144 			error = vfs_mountedon(volume->devvp);
145 		}
146 	}
147 	if (error == 0 &&
148 	    count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
149 		error = EBUSY;
150 	}
151 	if (error == 0) {
152 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
153 		error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
154 		if (error == 0) {
155 			error = VOP_OPEN(volume->devvp,
156 					 (ronly ? FREAD : FREAD|FWRITE),
157 					 FSCRED, NULL);
158 		}
159 		vn_unlock(volume->devvp);
160 	}
161 	if (error) {
162 		hammer_free_volume(volume);
163 		return(error);
164 	}
165 	volume->devvp->v_rdev->si_mountpoint = mp;
166 	setmp = 1;
167 
168 	/*
169 	 * Extract the volume number from the volume header and do various
170 	 * sanity checks.
171 	 */
172 	error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
173 	if (error)
174 		goto late_failure;
175 	ondisk = (void *)bp->b_data;
176 	if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
177 		kprintf("hammer_mount: volume %s has an invalid header\n",
178 			volume->vol_name);
179 		error = EFTYPE;
180 		goto late_failure;
181 	}
182 	volume->vol_no = ondisk->vol_no;
183 	volume->buffer_base = ondisk->vol_buf_beg;
184 	volume->vol_flags = ondisk->vol_flags;
185 	volume->nblocks = ondisk->vol_nblocks;
186 	volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
187 				    ondisk->vol_buf_end - ondisk->vol_buf_beg);
188 	volume->maxraw_off = ondisk->vol_buf_end;
189 
190 	if (RB_EMPTY(&hmp->rb_vols_root)) {
191 		hmp->fsid = ondisk->vol_fsid;
192 	} else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
193 		kprintf("hammer_mount: volume %s's fsid does not match "
194 			"other volumes\n", volume->vol_name);
195 		error = EFTYPE;
196 		goto late_failure;
197 	}
198 
199 	/*
200 	 * Insert the volume structure into the red-black tree.
201 	 */
202 	if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
203 		kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
204 			volume->vol_name, volume->vol_no);
205 		error = EEXIST;
206 	}
207 
208 	/*
209 	 * Set the root volume .  HAMMER special cases rootvol the structure.
210 	 * We do not hold a ref because this would prevent related I/O
211 	 * from being flushed.
212 	 */
213 	if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
214 		hmp->rootvol = volume;
215 		hmp->nvolumes = ondisk->vol_count;
216 		if (bp) {
217 			brelse(bp);
218 			bp = NULL;
219 		}
220 		hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
221 			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
222 		hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
223 			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
224 	}
225 late_failure:
226 	if (bp)
227 		brelse(bp);
228 	if (error) {
229 		/*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
230 		if (setmp)
231 			volume->devvp->v_rdev->si_mountpoint = NULL;
232 		VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
233 		hammer_free_volume(volume);
234 	}
235 	return (error);
236 }
237 
238 /*
239  * This is called for each volume when updating the mount point from
240  * read-write to read-only or vise-versa.
241  */
242 int
243 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
244 {
245 	if (volume->devvp) {
246 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
247 		if (volume->io.hmp->ronly) {
248 			/* do not call vinvalbuf */
249 			VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
250 			VOP_CLOSE(volume->devvp, FREAD|FWRITE);
251 		} else {
252 			/* do not call vinvalbuf */
253 			VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
254 			VOP_CLOSE(volume->devvp, FREAD);
255 		}
256 		vn_unlock(volume->devvp);
257 	}
258 	return(0);
259 }
260 
261 /*
262  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
263  * so returns -1 on failure.
264  */
265 int
266 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
267 {
268 	hammer_mount_t hmp = volume->io.hmp;
269 	int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
270 	struct buf *bp;
271 
272 	/*
273 	 * Clean up the root volume pointer, which is held unlocked in hmp.
274 	 */
275 	if (hmp->rootvol == volume)
276 		hmp->rootvol = NULL;
277 
278 	/*
279 	 * We must not flush a dirty buffer to disk on umount.  It should
280 	 * have already been dealt with by the flusher, or we may be in
281 	 * catastrophic failure.
282 	 */
283 	hammer_io_clear_modify(&volume->io, 1);
284 	volume->io.waitdep = 1;
285 	bp = hammer_io_release(&volume->io, 1);
286 
287 	/*
288 	 * Clean up the persistent ref ioerror might have on the volume
289 	 */
290 	if (volume->io.ioerror) {
291 		volume->io.ioerror = 0;
292 		hammer_unref(&volume->io.lock);
293 	}
294 
295 	/*
296 	 * There should be no references on the volume, no clusters, and
297 	 * no super-clusters.
298 	 */
299 	KKASSERT(volume->io.lock.refs == 0);
300 	if (bp)
301 		brelse(bp);
302 
303 	volume->ondisk = NULL;
304 	if (volume->devvp) {
305 		if (volume->devvp->v_rdev &&
306 		    volume->devvp->v_rdev->si_mountpoint == hmp->mp
307 		) {
308 			volume->devvp->v_rdev->si_mountpoint = NULL;
309 		}
310 		if (ronly) {
311 			/*
312 			 * Make sure we don't sync anything to disk if we
313 			 * are in read-only mode (1) or critically-errored
314 			 * (2).  Note that there may be dirty buffers in
315 			 * normal read-only mode from crash recovery.
316 			 */
317 			vinvalbuf(volume->devvp, 0, 0, 0);
318 			VOP_CLOSE(volume->devvp, FREAD);
319 		} else {
320 			/*
321 			 * Normal termination, save any dirty buffers
322 			 * (XXX there really shouldn't be any).
323 			 */
324 			vinvalbuf(volume->devvp, V_SAVE, 0, 0);
325 			VOP_CLOSE(volume->devvp, FREAD|FWRITE);
326 		}
327 	}
328 
329 	/*
330 	 * Destroy the structure
331 	 */
332 	RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
333 	hammer_free_volume(volume);
334 	return(0);
335 }
336 
337 static
338 void
339 hammer_free_volume(hammer_volume_t volume)
340 {
341 	hammer_mount_t hmp = volume->io.hmp;
342 
343 	if (volume->vol_name) {
344 		kfree(volume->vol_name, hmp->m_misc);
345 		volume->vol_name = NULL;
346 	}
347 	if (volume->devvp) {
348 		vrele(volume->devvp);
349 		volume->devvp = NULL;
350 	}
351 	--hammer_count_volumes;
352 	kfree(volume, hmp->m_misc);
353 }
354 
355 /*
356  * Get a HAMMER volume.  The volume must already exist.
357  */
358 hammer_volume_t
359 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
360 {
361 	struct hammer_volume *volume;
362 
363 	/*
364 	 * Locate the volume structure
365 	 */
366 	volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
367 	if (volume == NULL) {
368 		*errorp = ENOENT;
369 		return(NULL);
370 	}
371 	hammer_ref(&volume->io.lock);
372 
373 	/*
374 	 * Deal with on-disk info
375 	 */
376 	if (volume->ondisk == NULL || volume->io.loading) {
377 		*errorp = hammer_load_volume(volume);
378 		if (*errorp) {
379 			hammer_rel_volume(volume, 1);
380 			volume = NULL;
381 		}
382 	} else {
383 		*errorp = 0;
384 	}
385 	return(volume);
386 }
387 
388 int
389 hammer_ref_volume(hammer_volume_t volume)
390 {
391 	int error;
392 
393 	hammer_ref(&volume->io.lock);
394 
395 	/*
396 	 * Deal with on-disk info
397 	 */
398 	if (volume->ondisk == NULL || volume->io.loading) {
399 		error = hammer_load_volume(volume);
400 		if (error)
401 			hammer_rel_volume(volume, 1);
402 	} else {
403 		error = 0;
404 	}
405 	return (error);
406 }
407 
408 hammer_volume_t
409 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
410 {
411 	hammer_volume_t volume;
412 
413 	volume = hmp->rootvol;
414 	KKASSERT(volume != NULL);
415 	hammer_ref(&volume->io.lock);
416 
417 	/*
418 	 * Deal with on-disk info
419 	 */
420 	if (volume->ondisk == NULL || volume->io.loading) {
421 		*errorp = hammer_load_volume(volume);
422 		if (*errorp) {
423 			hammer_rel_volume(volume, 1);
424 			volume = NULL;
425 		}
426 	} else {
427 		*errorp = 0;
428 	}
429 	return (volume);
430 }
431 
432 /*
433  * Load a volume's on-disk information.  The volume must be referenced and
434  * not locked.  We temporarily acquire an exclusive lock to interlock
435  * against releases or multiple get's.
436  */
437 static int
438 hammer_load_volume(hammer_volume_t volume)
439 {
440 	int error;
441 
442 	++volume->io.loading;
443 	hammer_lock_ex(&volume->io.lock);
444 
445 	if (volume->ondisk == NULL) {
446 		error = hammer_io_read(volume->devvp, &volume->io,
447 				       volume->maxraw_off);
448 		if (error == 0)
449 			volume->ondisk = (void *)volume->io.bp->b_data;
450 	} else {
451 		error = 0;
452 	}
453 	--volume->io.loading;
454 	hammer_unlock(&volume->io.lock);
455 	return(error);
456 }
457 
458 /*
459  * Release a volume.  Call hammer_io_release on the last reference.  We have
460  * to acquire an exclusive lock to interlock against volume->ondisk tests
461  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
462  * lock to be held.
463  *
464  * Volumes are not unloaded from memory during normal operation.
465  */
466 void
467 hammer_rel_volume(hammer_volume_t volume, int flush)
468 {
469 	struct buf *bp = NULL;
470 
471 	crit_enter();
472 	if (volume->io.lock.refs == 1) {
473 		++volume->io.loading;
474 		hammer_lock_ex(&volume->io.lock);
475 		if (volume->io.lock.refs == 1) {
476 			volume->ondisk = NULL;
477 			bp = hammer_io_release(&volume->io, flush);
478 		}
479 		--volume->io.loading;
480 		hammer_unlock(&volume->io.lock);
481 	}
482 	hammer_unref(&volume->io.lock);
483 	if (bp)
484 		brelse(bp);
485 	crit_exit();
486 }
487 
488 int
489 hammer_mountcheck_volumes(struct hammer_mount *hmp)
490 {
491 	hammer_volume_t vol;
492 	int i;
493 
494 	for (i = 0; i < hmp->nvolumes; ++i) {
495 		vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
496 		if (vol == NULL)
497 			return(EINVAL);
498 	}
499 	return(0);
500 }
501 
502 /************************************************************************
503  *				BUFFERS					*
504  ************************************************************************
505  *
506  * Manage buffers.  Currently all blockmap-backed zones are translated
507  * to zone-2 buffer offsets.
508  */
509 hammer_buffer_t
510 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
511 		  int bytes, int isnew, int *errorp)
512 {
513 	hammer_buffer_t buffer;
514 	hammer_volume_t volume;
515 	hammer_off_t	zone2_offset;
516 	hammer_io_type_t iotype;
517 	int vol_no;
518 	int zone;
519 
520 	buf_offset &= ~HAMMER_BUFMASK64;
521 again:
522 	/*
523 	 * Shortcut if the buffer is already cached
524 	 */
525 	buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
526 	if (buffer) {
527 		if (buffer->io.lock.refs == 0)
528 			++hammer_count_refedbufs;
529 		hammer_ref(&buffer->io.lock);
530 
531 		/*
532 		 * Once refed the ondisk field will not be cleared by
533 		 * any other action.
534 		 */
535 		if (buffer->ondisk && buffer->io.loading == 0) {
536 			*errorp = 0;
537 			return(buffer);
538 		}
539 
540 		/*
541 		 * The buffer is no longer loose if it has a ref, and
542 		 * cannot become loose once it gains a ref.  Loose
543 		 * buffers will never be in a modified state.  This should
544 		 * only occur on the 0->1 transition of refs.
545 		 *
546 		 * lose_list can be modified via a biodone() interrupt.
547 		 */
548 		if (buffer->io.mod_list == &hmp->lose_list) {
549 			crit_enter();	/* biodone race against list */
550 			TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
551 				     mod_entry);
552 			crit_exit();
553 			buffer->io.mod_list = NULL;
554 			KKASSERT(buffer->io.modified == 0);
555 		}
556 		goto found;
557 	}
558 
559 	/*
560 	 * What is the buffer class?
561 	 */
562 	zone = HAMMER_ZONE_DECODE(buf_offset);
563 
564 	switch(zone) {
565 	case HAMMER_ZONE_LARGE_DATA_INDEX:
566 	case HAMMER_ZONE_SMALL_DATA_INDEX:
567 		iotype = HAMMER_STRUCTURE_DATA_BUFFER;
568 		break;
569 	case HAMMER_ZONE_UNDO_INDEX:
570 		iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
571 		break;
572 	case HAMMER_ZONE_META_INDEX:
573 	default:
574 		/*
575 		 * NOTE: inode data and directory entries are placed in this
576 		 * zone.  inode atime/mtime is updated in-place and thus
577 		 * buffers containing inodes must be synchronized as
578 		 * meta-buffers, same as buffers containing B-Tree info.
579 		 */
580 		iotype = HAMMER_STRUCTURE_META_BUFFER;
581 		break;
582 	}
583 
584 	/*
585 	 * Handle blockmap offset translations
586 	 */
587 	if (zone >= HAMMER_ZONE_BTREE_INDEX) {
588 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
589 	} else if (zone == HAMMER_ZONE_UNDO_INDEX) {
590 		zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
591 	} else {
592 		KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
593 		zone2_offset = buf_offset;
594 		*errorp = 0;
595 	}
596 	if (*errorp)
597 		return(NULL);
598 
599 	/*
600 	 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
601 	 * specifications.
602 	 */
603 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
604 		 HAMMER_ZONE_RAW_BUFFER);
605 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
606 	volume = hammer_get_volume(hmp, vol_no, errorp);
607 	if (volume == NULL)
608 		return(NULL);
609 
610 	KKASSERT(zone2_offset < volume->maxbuf_off);
611 
612 	/*
613 	 * Allocate a new buffer structure.  We will check for races later.
614 	 */
615 	++hammer_count_buffers;
616 	buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
617 			 M_WAITOK|M_ZERO|M_USE_RESERVE);
618 	buffer->zone2_offset = zone2_offset;
619 	buffer->zoneX_offset = buf_offset;
620 
621 	hammer_io_init(&buffer->io, volume, iotype);
622 	buffer->io.offset = volume->ondisk->vol_buf_beg +
623 			    (zone2_offset & HAMMER_OFF_SHORT_MASK);
624 	buffer->io.bytes = bytes;
625 	TAILQ_INIT(&buffer->clist);
626 	hammer_ref(&buffer->io.lock);
627 
628 	/*
629 	 * Insert the buffer into the RB tree and handle late collisions.
630 	 */
631 	if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
632 		hammer_unref(&buffer->io.lock);
633 		--hammer_count_buffers;
634 		kfree(buffer, hmp->m_misc);
635 		goto again;
636 	}
637 	++hammer_count_refedbufs;
638 found:
639 
640 	/*
641 	 * Deal with on-disk info and loading races.
642 	 */
643 	if (buffer->ondisk == NULL || buffer->io.loading) {
644 		*errorp = hammer_load_buffer(buffer, isnew);
645 		if (*errorp) {
646 			hammer_rel_buffer(buffer, 1);
647 			buffer = NULL;
648 		}
649 	} else {
650 		*errorp = 0;
651 	}
652 	return(buffer);
653 }
654 
655 /*
656  * This is used by the direct-read code to deal with large-data buffers
657  * created by the reblocker and mirror-write code.  The direct-read code
658  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
659  * running hammer buffers must be fully synced to disk before we can issue
660  * the direct-read.
661  *
662  * This code path is not considered critical as only the rebocker and
663  * mirror-write code will create large-data buffers via the HAMMER buffer
664  * subsystem.  They do that because they operate at the B-Tree level and
665  * do not access the vnode/inode structures.
666  */
667 void
668 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
669 {
670 	hammer_buffer_t buffer;
671 	int error;
672 
673 	KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
674 		 HAMMER_ZONE_LARGE_DATA);
675 
676 	while (bytes > 0) {
677 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
678 				   base_offset);
679 		if (buffer && (buffer->io.modified || buffer->io.running)) {
680 			error = hammer_ref_buffer(buffer);
681 			if (error == 0) {
682 				hammer_io_wait(&buffer->io);
683 				if (buffer->io.modified) {
684 					hammer_io_write_interlock(&buffer->io);
685 					hammer_io_flush(&buffer->io);
686 					hammer_io_done_interlock(&buffer->io);
687 					hammer_io_wait(&buffer->io);
688 				}
689 				hammer_rel_buffer(buffer, 0);
690 			}
691 		}
692 		base_offset += HAMMER_BUFSIZE;
693 		bytes -= HAMMER_BUFSIZE;
694 	}
695 }
696 
697 /*
698  * Destroy all buffers covering the specified zoneX offset range.  This
699  * is called when the related blockmap layer2 entry is freed or when
700  * a direct write bypasses our buffer/buffer-cache subsystem.
701  *
702  * The buffers may be referenced by the caller itself.  Setting reclaim
703  * will cause the buffer to be destroyed when it's ref count reaches zero.
704  */
705 void
706 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
707 		   hammer_off_t zone2_offset, int bytes)
708 {
709 	hammer_buffer_t buffer;
710 	hammer_volume_t volume;
711 	int vol_no;
712 	int error;
713 
714 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
715 	volume = hammer_get_volume(hmp, vol_no, &error);
716 	KKASSERT(error == 0);
717 
718 	while (bytes > 0) {
719 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
720 				   base_offset);
721 		if (buffer) {
722 			error = hammer_ref_buffer(buffer);
723 			if (error == 0) {
724 				KKASSERT(buffer->zone2_offset == zone2_offset);
725 				hammer_io_clear_modify(&buffer->io, 1);
726 				buffer->io.reclaim = 1;
727 				buffer->io.waitdep = 1;
728 				KKASSERT(buffer->io.volume == volume);
729 				hammer_rel_buffer(buffer, 0);
730 			}
731 		} else {
732 			hammer_io_inval(volume, zone2_offset);
733 		}
734 		base_offset += HAMMER_BUFSIZE;
735 		zone2_offset += HAMMER_BUFSIZE;
736 		bytes -= HAMMER_BUFSIZE;
737 	}
738 	hammer_rel_volume(volume, 0);
739 }
740 
741 static int
742 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
743 {
744 	hammer_volume_t volume;
745 	int error;
746 
747 	/*
748 	 * Load the buffer's on-disk info
749 	 */
750 	volume = buffer->io.volume;
751 	++buffer->io.loading;
752 	hammer_lock_ex(&buffer->io.lock);
753 
754 	if (hammer_debug_io & 0x0001) {
755 		kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
756 			buffer->zoneX_offset, buffer->zone2_offset, isnew,
757 			buffer->ondisk);
758 	}
759 
760 	if (buffer->ondisk == NULL) {
761 		if (isnew) {
762 			error = hammer_io_new(volume->devvp, &buffer->io);
763 		} else {
764 			error = hammer_io_read(volume->devvp, &buffer->io,
765 					       volume->maxraw_off);
766 		}
767 		if (error == 0)
768 			buffer->ondisk = (void *)buffer->io.bp->b_data;
769 	} else if (isnew) {
770 		error = hammer_io_new(volume->devvp, &buffer->io);
771 	} else {
772 		error = 0;
773 	}
774 	--buffer->io.loading;
775 	hammer_unlock(&buffer->io.lock);
776 	return (error);
777 }
778 
779 /*
780  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
781  * This routine is only called during unmount.
782  */
783 int
784 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
785 {
786 	/*
787 	 * Clean up the persistent ref ioerror might have on the buffer
788 	 * and acquire a ref (steal ioerror's if we can).
789 	 */
790 	if (buffer->io.ioerror) {
791 		buffer->io.ioerror = 0;
792 	} else {
793 		if (buffer->io.lock.refs == 0)
794 			++hammer_count_refedbufs;
795 		hammer_ref(&buffer->io.lock);
796 	}
797 
798 	/*
799 	 * We must not flush a dirty buffer to disk on umount.  It should
800 	 * have already been dealt with by the flusher, or we may be in
801 	 * catastrophic failure.
802 	 */
803 	hammer_io_clear_modify(&buffer->io, 1);
804 	hammer_flush_buffer_nodes(buffer);
805 	KKASSERT(buffer->io.lock.refs == 1);
806 	hammer_rel_buffer(buffer, 2);
807 	return(0);
808 }
809 
810 /*
811  * Reference a buffer that is either already referenced or via a specially
812  * handled pointer (aka cursor->buffer).
813  */
814 int
815 hammer_ref_buffer(hammer_buffer_t buffer)
816 {
817 	int error;
818 
819 	if (buffer->io.lock.refs == 0)
820 		++hammer_count_refedbufs;
821 	hammer_ref(&buffer->io.lock);
822 
823 	/*
824 	 * At this point a biodone() will not touch the buffer other then
825 	 * incidental bits.  However, lose_list can be modified via
826 	 * a biodone() interrupt.
827 	 *
828 	 * No longer loose
829 	 */
830 	if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
831 		crit_enter();
832 		TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
833 		buffer->io.mod_list = NULL;
834 		crit_exit();
835 	}
836 
837 	if (buffer->ondisk == NULL || buffer->io.loading) {
838 		error = hammer_load_buffer(buffer, 0);
839 		if (error) {
840 			hammer_rel_buffer(buffer, 1);
841 			/*
842 			 * NOTE: buffer pointer can become stale after
843 			 * the above release.
844 			 */
845 		}
846 	} else {
847 		error = 0;
848 	}
849 	return(error);
850 }
851 
852 /*
853  * Release a buffer.  We have to deal with several places where
854  * another thread can ref the buffer.
855  *
856  * Only destroy the structure itself if the related buffer cache buffer
857  * was disassociated from it.  This ties the management of the structure
858  * to the buffer cache subsystem.  buffer->ondisk determines whether the
859  * embedded io is referenced or not.
860  */
861 void
862 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
863 {
864 	hammer_volume_t volume;
865 	hammer_mount_t hmp;
866 	struct buf *bp = NULL;
867 	int freeme = 0;
868 
869 	hmp = buffer->io.hmp;
870 
871 	crit_enter();
872 	if (buffer->io.lock.refs == 1) {
873 		++buffer->io.loading;	/* force interlock check */
874 		hammer_lock_ex(&buffer->io.lock);
875 		if (buffer->io.lock.refs == 1) {
876 			bp = hammer_io_release(&buffer->io, flush);
877 
878 			if (buffer->io.lock.refs == 1)
879 				--hammer_count_refedbufs;
880 
881 			if (buffer->io.bp == NULL &&
882 			    buffer->io.lock.refs == 1) {
883 				/*
884 				 * Final cleanup
885 				 *
886 				 * NOTE: It is impossible for any associated
887 				 * B-Tree nodes to have refs if the buffer
888 				 * has no additional refs.
889 				 */
890 				RB_REMOVE(hammer_buf_rb_tree,
891 					  &buffer->io.hmp->rb_bufs_root,
892 					  buffer);
893 				volume = buffer->io.volume;
894 				buffer->io.volume = NULL; /* sanity */
895 				hammer_rel_volume(volume, 0);
896 				hammer_io_clear_modlist(&buffer->io);
897 				hammer_flush_buffer_nodes(buffer);
898 				KKASSERT(TAILQ_EMPTY(&buffer->clist));
899 				freeme = 1;
900 			}
901 		}
902 		--buffer->io.loading;
903 		hammer_unlock(&buffer->io.lock);
904 	}
905 	hammer_unref(&buffer->io.lock);
906 	crit_exit();
907 	if (bp)
908 		brelse(bp);
909 	if (freeme) {
910 		--hammer_count_buffers;
911 		kfree(buffer, hmp->m_misc);
912 	}
913 }
914 
915 /*
916  * Access the filesystem buffer containing the specified hammer offset.
917  * buf_offset is a conglomeration of the volume number and vol_buf_beg
918  * relative buffer offset.  It must also have bit 55 set to be valid.
919  * (see hammer_off_t in hammer_disk.h).
920  *
921  * Any prior buffer in *bufferp will be released and replaced by the
922  * requested buffer.
923  */
924 static __inline
925 void *
926 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
927 	     int *errorp, struct hammer_buffer **bufferp)
928 {
929 	hammer_buffer_t buffer;
930 	int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
931 
932 	buf_offset &= ~HAMMER_BUFMASK64;
933 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
934 
935 	buffer = *bufferp;
936 	if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
937 			       buffer->zoneX_offset != buf_offset)) {
938 		if (buffer)
939 			hammer_rel_buffer(buffer, 0);
940 		buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
941 		*bufferp = buffer;
942 	} else {
943 		*errorp = 0;
944 	}
945 
946 	/*
947 	 * Return a pointer to the buffer data.
948 	 */
949 	if (buffer == NULL)
950 		return(NULL);
951 	else
952 		return((char *)buffer->ondisk + xoff);
953 }
954 
955 void *
956 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
957 	     int *errorp, struct hammer_buffer **bufferp)
958 {
959 	return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
960 }
961 
962 void *
963 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
964 	         int *errorp, struct hammer_buffer **bufferp)
965 {
966 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
967 	return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
968 }
969 
970 /*
971  * Access the filesystem buffer containing the specified hammer offset.
972  * No disk read operation occurs.  The result buffer may contain garbage.
973  *
974  * Any prior buffer in *bufferp will be released and replaced by the
975  * requested buffer.
976  *
977  * This function marks the buffer dirty but does not increment its
978  * modify_refs count.
979  */
980 static __inline
981 void *
982 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
983 	     int *errorp, struct hammer_buffer **bufferp)
984 {
985 	hammer_buffer_t buffer;
986 	int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
987 
988 	buf_offset &= ~HAMMER_BUFMASK64;
989 
990 	buffer = *bufferp;
991 	if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
992 			       buffer->zoneX_offset != buf_offset)) {
993 		if (buffer)
994 			hammer_rel_buffer(buffer, 0);
995 		buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
996 		*bufferp = buffer;
997 	} else {
998 		*errorp = 0;
999 	}
1000 
1001 	/*
1002 	 * Return a pointer to the buffer data.
1003 	 */
1004 	if (buffer == NULL)
1005 		return(NULL);
1006 	else
1007 		return((char *)buffer->ondisk + xoff);
1008 }
1009 
1010 void *
1011 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1012 	     int *errorp, struct hammer_buffer **bufferp)
1013 {
1014 	return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1015 }
1016 
1017 void *
1018 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1019 		int *errorp, struct hammer_buffer **bufferp)
1020 {
1021 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1022 	return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1023 }
1024 
1025 /************************************************************************
1026  *				NODES					*
1027  ************************************************************************
1028  *
1029  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1030  * method used by the HAMMER filesystem.
1031  *
1032  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1033  * associated with its buffer, and will only referenced the buffer while
1034  * the node itself is referenced.
1035  *
1036  * A hammer_node can also be passively associated with other HAMMER
1037  * structures, such as inodes, while retaining 0 references.  These
1038  * associations can be cleared backwards using a pointer-to-pointer in
1039  * the hammer_node.
1040  *
1041  * This allows the HAMMER implementation to cache hammer_nodes long-term
1042  * and short-cut a great deal of the infrastructure's complexity.  In
1043  * most cases a cached node can be reacquired without having to dip into
1044  * either the buffer or cluster management code.
1045  *
1046  * The caller must pass a referenced cluster on call and will retain
1047  * ownership of the reference on return.  The node will acquire its own
1048  * additional references, if necessary.
1049  */
1050 hammer_node_t
1051 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1052 		int isnew, int *errorp)
1053 {
1054 	hammer_mount_t hmp = trans->hmp;
1055 	hammer_node_t node;
1056 
1057 	KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1058 
1059 	/*
1060 	 * Locate the structure, allocating one if necessary.
1061 	 */
1062 again:
1063 	node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1064 	if (node == NULL) {
1065 		++hammer_count_nodes;
1066 		node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1067 		node->node_offset = node_offset;
1068 		node->hmp = hmp;
1069 		TAILQ_INIT(&node->cursor_list);
1070 		TAILQ_INIT(&node->cache_list);
1071 		if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1072 			--hammer_count_nodes;
1073 			kfree(node, hmp->m_misc);
1074 			goto again;
1075 		}
1076 	}
1077 	hammer_ref(&node->lock);
1078 	if (node->ondisk) {
1079 		*errorp = 0;
1080 	} else {
1081 		*errorp = hammer_load_node(node, isnew);
1082 		trans->flags |= HAMMER_TRANSF_DIDIO;
1083 	}
1084 	if (*errorp) {
1085 		hammer_rel_node(node);
1086 		node = NULL;
1087 	}
1088 	return(node);
1089 }
1090 
1091 /*
1092  * Reference an already-referenced node.
1093  */
1094 void
1095 hammer_ref_node(hammer_node_t node)
1096 {
1097 	KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1098 	hammer_ref(&node->lock);
1099 }
1100 
1101 /*
1102  * Load a node's on-disk data reference.
1103  */
1104 static int
1105 hammer_load_node(hammer_node_t node, int isnew)
1106 {
1107 	hammer_buffer_t buffer;
1108 	hammer_off_t buf_offset;
1109 	int error;
1110 
1111 	error = 0;
1112 	++node->loading;
1113 	hammer_lock_ex(&node->lock);
1114 	if (node->ondisk == NULL) {
1115 		/*
1116 		 * This is a little confusing but the jist is that
1117 		 * node->buffer determines whether the node is on
1118 		 * the buffer's clist and node->ondisk determines
1119 		 * whether the buffer is referenced.
1120 		 *
1121 		 * We could be racing a buffer release, in which case
1122 		 * node->buffer may become NULL while we are blocked
1123 		 * referencing the buffer.
1124 		 */
1125 		if ((buffer = node->buffer) != NULL) {
1126 			error = hammer_ref_buffer(buffer);
1127 			if (error == 0 && node->buffer == NULL) {
1128 				TAILQ_INSERT_TAIL(&buffer->clist,
1129 						  node, entry);
1130 				node->buffer = buffer;
1131 			}
1132 		} else {
1133 			buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1134 			buffer = hammer_get_buffer(node->hmp, buf_offset,
1135 						   HAMMER_BUFSIZE, 0, &error);
1136 			if (buffer) {
1137 				KKASSERT(error == 0);
1138 				TAILQ_INSERT_TAIL(&buffer->clist,
1139 						  node, entry);
1140 				node->buffer = buffer;
1141 			}
1142 		}
1143 		if (error)
1144 			goto failed;
1145 		node->ondisk = (void *)((char *)buffer->ondisk +
1146 				        (node->node_offset & HAMMER_BUFMASK));
1147 		if (isnew == 0 &&
1148 		    (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1149 			if (hammer_crc_test_btree(node->ondisk) == 0)
1150 				Debugger("CRC FAILED: B-TREE NODE");
1151 			node->flags |= HAMMER_NODE_CRCGOOD;
1152 		}
1153 	}
1154 failed:
1155 	--node->loading;
1156 	hammer_unlock(&node->lock);
1157 	return (error);
1158 }
1159 
1160 /*
1161  * Safely reference a node, interlock against flushes via the IO subsystem.
1162  */
1163 hammer_node_t
1164 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1165 		     int *errorp)
1166 {
1167 	hammer_node_t node;
1168 
1169 	node = cache->node;
1170 	if (node != NULL) {
1171 		hammer_ref(&node->lock);
1172 		if (node->ondisk)
1173 			*errorp = 0;
1174 		else
1175 			*errorp = hammer_load_node(node, 0);
1176 		if (*errorp) {
1177 			hammer_rel_node(node);
1178 			node = NULL;
1179 		}
1180 	} else {
1181 		*errorp = ENOENT;
1182 	}
1183 	return(node);
1184 }
1185 
1186 /*
1187  * Release a hammer_node.  On the last release the node dereferences
1188  * its underlying buffer and may or may not be destroyed.
1189  */
1190 void
1191 hammer_rel_node(hammer_node_t node)
1192 {
1193 	hammer_buffer_t buffer;
1194 
1195 	/*
1196 	 * If this isn't the last ref just decrement the ref count and
1197 	 * return.
1198 	 */
1199 	if (node->lock.refs > 1) {
1200 		hammer_unref(&node->lock);
1201 		return;
1202 	}
1203 
1204 	/*
1205 	 * If there is no ondisk info or no buffer the node failed to load,
1206 	 * remove the last reference and destroy the node.
1207 	 */
1208 	if (node->ondisk == NULL) {
1209 		hammer_unref(&node->lock);
1210 		hammer_flush_node(node);
1211 		/* node is stale now */
1212 		return;
1213 	}
1214 
1215 	/*
1216 	 * Do not disassociate the node from the buffer if it represents
1217 	 * a modified B-Tree node that still needs its crc to be generated.
1218 	 */
1219 	if (node->flags & HAMMER_NODE_NEEDSCRC)
1220 		return;
1221 
1222 	/*
1223 	 * Do final cleanups and then either destroy the node and leave it
1224 	 * passively cached.  The buffer reference is removed regardless.
1225 	 */
1226 	buffer = node->buffer;
1227 	node->ondisk = NULL;
1228 
1229 	if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1230 		hammer_unref(&node->lock);
1231 		hammer_rel_buffer(buffer, 0);
1232 		return;
1233 	}
1234 
1235 	/*
1236 	 * Destroy the node.
1237 	 */
1238 	hammer_unref(&node->lock);
1239 	hammer_flush_node(node);
1240 	/* node is stale */
1241 	hammer_rel_buffer(buffer, 0);
1242 }
1243 
1244 /*
1245  * Free space on-media associated with a B-Tree node.
1246  */
1247 void
1248 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1249 {
1250 	KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1251 	node->flags |= HAMMER_NODE_DELETED;
1252 	hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1253 }
1254 
1255 /*
1256  * Passively cache a referenced hammer_node.  The caller may release
1257  * the node on return.
1258  */
1259 void
1260 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1261 {
1262 	/*
1263 	 * If the node doesn't exist, or is being deleted, don't cache it!
1264 	 *
1265 	 * The node can only ever be NULL in the I/O failure path.
1266 	 */
1267 	if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1268 		return;
1269 	if (cache->node == node)
1270 		return;
1271 	while (cache->node)
1272 		hammer_uncache_node(cache);
1273 	if (node->flags & HAMMER_NODE_DELETED)
1274 		return;
1275 	cache->node = node;
1276 	TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1277 }
1278 
1279 void
1280 hammer_uncache_node(hammer_node_cache_t cache)
1281 {
1282 	hammer_node_t node;
1283 
1284 	if ((node = cache->node) != NULL) {
1285 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1286 		cache->node = NULL;
1287 		if (TAILQ_EMPTY(&node->cache_list))
1288 			hammer_flush_node(node);
1289 	}
1290 }
1291 
1292 /*
1293  * Remove a node's cache references and destroy the node if it has no
1294  * other references or backing store.
1295  */
1296 void
1297 hammer_flush_node(hammer_node_t node)
1298 {
1299 	hammer_node_cache_t cache;
1300 	hammer_buffer_t buffer;
1301 	hammer_mount_t hmp = node->hmp;
1302 
1303 	while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1304 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1305 		cache->node = NULL;
1306 	}
1307 	if (node->lock.refs == 0 && node->ondisk == NULL) {
1308 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1309 		RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1310 		if ((buffer = node->buffer) != NULL) {
1311 			node->buffer = NULL;
1312 			TAILQ_REMOVE(&buffer->clist, node, entry);
1313 			/* buffer is unreferenced because ondisk is NULL */
1314 		}
1315 		--hammer_count_nodes;
1316 		kfree(node, hmp->m_misc);
1317 	}
1318 }
1319 
1320 /*
1321  * Flush passively cached B-Tree nodes associated with this buffer.
1322  * This is only called when the buffer is about to be destroyed, so
1323  * none of the nodes should have any references.  The buffer is locked.
1324  *
1325  * We may be interlocked with the buffer.
1326  */
1327 void
1328 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1329 {
1330 	hammer_node_t node;
1331 
1332 	while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1333 		KKASSERT(node->ondisk == NULL);
1334 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1335 
1336 		if (node->lock.refs == 0) {
1337 			hammer_ref(&node->lock);
1338 			node->flags |= HAMMER_NODE_FLUSH;
1339 			hammer_rel_node(node);
1340 		} else {
1341 			KKASSERT(node->loading != 0);
1342 			KKASSERT(node->buffer != NULL);
1343 			buffer = node->buffer;
1344 			node->buffer = NULL;
1345 			TAILQ_REMOVE(&buffer->clist, node, entry);
1346 			/* buffer is unreferenced because ondisk is NULL */
1347 		}
1348 	}
1349 }
1350 
1351 
1352 /************************************************************************
1353  *				ALLOCATORS				*
1354  ************************************************************************/
1355 
1356 /*
1357  * Allocate a B-Tree node.
1358  */
1359 hammer_node_t
1360 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1361 {
1362 	hammer_buffer_t buffer = NULL;
1363 	hammer_node_t node = NULL;
1364 	hammer_off_t node_offset;
1365 
1366 	node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1367 					    sizeof(struct hammer_node_ondisk),
1368 					    errorp);
1369 	if (*errorp == 0) {
1370 		node = hammer_get_node(trans, node_offset, 1, errorp);
1371 		hammer_modify_node_noundo(trans, node);
1372 		bzero(node->ondisk, sizeof(*node->ondisk));
1373 		hammer_modify_node_done(node);
1374 	}
1375 	if (buffer)
1376 		hammer_rel_buffer(buffer, 0);
1377 	return(node);
1378 }
1379 
1380 /*
1381  * Allocate data.  If the address of a data buffer is supplied then
1382  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1383  * will be set to the related buffer.  The caller must release it when
1384  * finally done.  The initial *data_bufferp should be set to NULL by
1385  * the caller.
1386  *
1387  * The caller is responsible for making hammer_modify*() calls on the
1388  * *data_bufferp.
1389  */
1390 void *
1391 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1392 		  u_int16_t rec_type, hammer_off_t *data_offsetp,
1393 		  struct hammer_buffer **data_bufferp, int *errorp)
1394 {
1395 	void *data;
1396 	int zone;
1397 
1398 	/*
1399 	 * Allocate data
1400 	 */
1401 	if (data_len) {
1402 		switch(rec_type) {
1403 		case HAMMER_RECTYPE_INODE:
1404 		case HAMMER_RECTYPE_DIRENTRY:
1405 		case HAMMER_RECTYPE_EXT:
1406 		case HAMMER_RECTYPE_FIX:
1407 		case HAMMER_RECTYPE_PFS:
1408 			zone = HAMMER_ZONE_META_INDEX;
1409 			break;
1410 		case HAMMER_RECTYPE_DATA:
1411 		case HAMMER_RECTYPE_DB:
1412 			if (data_len <= HAMMER_BUFSIZE / 2) {
1413 				zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1414 			} else {
1415 				data_len = (data_len + HAMMER_BUFMASK) &
1416 					   ~HAMMER_BUFMASK;
1417 				zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1418 			}
1419 			break;
1420 		default:
1421 			panic("hammer_alloc_data: rec_type %04x unknown",
1422 			      rec_type);
1423 			zone = 0;	/* NOT REACHED */
1424 			break;
1425 		}
1426 		*data_offsetp = hammer_blockmap_alloc(trans, zone,
1427 						      data_len, errorp);
1428 	} else {
1429 		*data_offsetp = 0;
1430 	}
1431 	if (*errorp == 0 && data_bufferp) {
1432 		if (data_len) {
1433 			data = hammer_bread_ext(trans->hmp, *data_offsetp,
1434 						data_len, errorp, data_bufferp);
1435 		} else {
1436 			data = NULL;
1437 		}
1438 	} else {
1439 		data = NULL;
1440 	}
1441 	return(data);
1442 }
1443 
1444 /*
1445  * Sync dirty buffers to the media and clean-up any loose ends.
1446  *
1447  * These functions do not start the flusher going, they simply
1448  * queue everything up to the flusher.
1449  */
1450 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1451 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1452 
1453 int
1454 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1455 {
1456 	struct hammer_sync_info info;
1457 
1458 	info.error = 0;
1459 	info.waitfor = waitfor;
1460 	if (waitfor == MNT_WAIT) {
1461 		vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1462 			      hammer_sync_scan1, hammer_sync_scan2, &info);
1463 	} else {
1464 		vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1465 			      hammer_sync_scan1, hammer_sync_scan2, &info);
1466 	}
1467 	return(info.error);
1468 }
1469 
1470 /*
1471  * Filesystem sync.  If doing a synchronous sync make a second pass on
1472  * the vnodes in case any were already flushing during the first pass,
1473  * and activate the flusher twice (the second time brings the UNDO FIFO's
1474  * start position up to the end position after the first call).
1475  */
1476 int
1477 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1478 {
1479 	struct hammer_sync_info info;
1480 
1481 	info.error = 0;
1482 	info.waitfor = MNT_NOWAIT;
1483 	vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1484 		      hammer_sync_scan1, hammer_sync_scan2, &info);
1485 	if (info.error == 0 && waitfor == MNT_WAIT) {
1486 		info.waitfor = waitfor;
1487 		vmntvnodescan(hmp->mp, VMSC_GETVP,
1488 			      hammer_sync_scan1, hammer_sync_scan2, &info);
1489 	}
1490         if (waitfor == MNT_WAIT) {
1491                 hammer_flusher_sync(hmp);
1492                 hammer_flusher_sync(hmp);
1493 	} else {
1494                 hammer_flusher_async(hmp, NULL);
1495                 hammer_flusher_async(hmp, NULL);
1496 	}
1497 	return(info.error);
1498 }
1499 
1500 static int
1501 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1502 {
1503 	struct hammer_inode *ip;
1504 
1505 	ip = VTOI(vp);
1506 	if (vp->v_type == VNON || ip == NULL ||
1507 	    ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1508 	     RB_EMPTY(&vp->v_rbdirty_tree))) {
1509 		return(-1);
1510 	}
1511 	return(0);
1512 }
1513 
1514 static int
1515 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1516 {
1517 	struct hammer_sync_info *info = data;
1518 	struct hammer_inode *ip;
1519 	int error;
1520 
1521 	ip = VTOI(vp);
1522 	if (vp->v_type == VNON || vp->v_type == VBAD ||
1523 	    ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1524 	     RB_EMPTY(&vp->v_rbdirty_tree))) {
1525 		return(0);
1526 	}
1527 	error = VOP_FSYNC(vp, MNT_NOWAIT);
1528 	if (error)
1529 		info->error = error;
1530 	return(0);
1531 }
1532 
1533