xref: /dragonfly/sys/vfs/hammer/hammer_ondisk.c (revision 927da715)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41 
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47 
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
52 
53 static int
54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
55 {
56 	if (vol1->vol_no < vol2->vol_no)
57 		return(-1);
58 	if (vol1->vol_no > vol2->vol_no)
59 		return(1);
60 	return(0);
61 }
62 
63 static int
64 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
65 {
66 	if (buf1->zoneX_offset < buf2->zoneX_offset)
67 		return(-1);
68 	if (buf1->zoneX_offset > buf2->zoneX_offset)
69 		return(1);
70 	return(0);
71 }
72 
73 static int
74 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
75 {
76 	if (node1->node_offset < node2->node_offset)
77 		return(-1);
78 	if (node1->node_offset > node2->node_offset)
79 		return(1);
80 	return(0);
81 }
82 
83 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
84 	     hammer_vol_rb_compare, int32_t, vol_no);
85 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
86 	     hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
87 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
88 	     hammer_nod_rb_compare, hammer_off_t, node_offset);
89 
90 /************************************************************************
91  *				VOLUMES					*
92  ************************************************************************
93  *
94  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
95  * code on failure.  Volumes must be loaded at mount time, get_volume() will
96  * not load a new volume.
97  *
98  * Calls made to hammer_load_volume() or single-threaded
99  */
100 int
101 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
102 		      struct vnode *devvp)
103 {
104 	struct mount *mp;
105 	hammer_volume_t volume;
106 	struct hammer_volume_ondisk *ondisk;
107 	struct nlookupdata nd;
108 	struct buf *bp = NULL;
109 	int error;
110 	int ronly;
111 	int setmp = 0;
112 
113 	mp = hmp->mp;
114 	ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
115 
116 	/*
117 	 * Allocate a volume structure
118 	 */
119 	++hammer_count_volumes;
120 	volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
121 	volume->vol_name = kstrdup(volname, M_HAMMER);
122 	volume->io.hmp = hmp;	/* bootstrap */
123 	hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
124 	volume->io.offset = 0LL;
125 	volume->io.bytes = HAMMER_BUFSIZE;
126 
127 	/*
128 	 * Get the device vnode
129 	 */
130 	if (devvp == NULL) {
131 		error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
132 		if (error == 0)
133 			error = nlookup(&nd);
134 		if (error == 0)
135 			error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
136 		nlookup_done(&nd);
137 	} else {
138 		error = 0;
139 		volume->devvp = devvp;
140 	}
141 
142 	if (error == 0) {
143 		if (vn_isdisk(volume->devvp, &error)) {
144 			error = vfs_mountedon(volume->devvp);
145 		}
146 	}
147 	if (error == 0 &&
148 	    count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
149 		error = EBUSY;
150 	}
151 	if (error == 0) {
152 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
153 		error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
154 		if (error == 0) {
155 			error = VOP_OPEN(volume->devvp,
156 					 (ronly ? FREAD : FREAD|FWRITE),
157 					 FSCRED, NULL);
158 		}
159 		vn_unlock(volume->devvp);
160 	}
161 	if (error) {
162 		hammer_free_volume(volume);
163 		return(error);
164 	}
165 	volume->devvp->v_rdev->si_mountpoint = mp;
166 	setmp = 1;
167 
168 	/*
169 	 * Extract the volume number from the volume header and do various
170 	 * sanity checks.
171 	 */
172 	error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
173 	if (error)
174 		goto late_failure;
175 	ondisk = (void *)bp->b_data;
176 	if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
177 		kprintf("hammer_mount: volume %s has an invalid header\n",
178 			volume->vol_name);
179 		error = EFTYPE;
180 		goto late_failure;
181 	}
182 	volume->vol_no = ondisk->vol_no;
183 	volume->buffer_base = ondisk->vol_buf_beg;
184 	volume->vol_flags = ondisk->vol_flags;
185 	volume->nblocks = ondisk->vol_nblocks;
186 	volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
187 				    ondisk->vol_buf_end - ondisk->vol_buf_beg);
188 	volume->maxraw_off = ondisk->vol_buf_end;
189 
190 	if (RB_EMPTY(&hmp->rb_vols_root)) {
191 		hmp->fsid = ondisk->vol_fsid;
192 	} else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
193 		kprintf("hammer_mount: volume %s's fsid does not match "
194 			"other volumes\n", volume->vol_name);
195 		error = EFTYPE;
196 		goto late_failure;
197 	}
198 
199 	/*
200 	 * Insert the volume structure into the red-black tree.
201 	 */
202 	if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
203 		kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
204 			volume->vol_name, volume->vol_no);
205 		error = EEXIST;
206 	}
207 
208 	/*
209 	 * Set the root volume .  HAMMER special cases rootvol the structure.
210 	 * We do not hold a ref because this would prevent related I/O
211 	 * from being flushed.
212 	 */
213 	if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
214 		hmp->rootvol = volume;
215 		hmp->nvolumes = ondisk->vol_count;
216 		if (bp) {
217 			brelse(bp);
218 			bp = NULL;
219 		}
220 		hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
221 			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
222 		hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
223 			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
224 	}
225 late_failure:
226 	if (bp)
227 		brelse(bp);
228 	if (error) {
229 		/*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
230 		if (setmp)
231 			volume->devvp->v_rdev->si_mountpoint = NULL;
232 		VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
233 		hammer_free_volume(volume);
234 	}
235 	return (error);
236 }
237 
238 /*
239  * This is called for each volume when updating the mount point from
240  * read-write to read-only or vise-versa.
241  */
242 int
243 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
244 {
245 	if (volume->devvp) {
246 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
247 		if (volume->io.hmp->ronly) {
248 			/* do not call vinvalbuf */
249 			VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
250 			VOP_CLOSE(volume->devvp, FREAD|FWRITE);
251 		} else {
252 			/* do not call vinvalbuf */
253 			VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
254 			VOP_CLOSE(volume->devvp, FREAD);
255 		}
256 		vn_unlock(volume->devvp);
257 	}
258 	return(0);
259 }
260 
261 /*
262  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
263  * so returns -1 on failure.
264  */
265 int
266 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
267 {
268 	struct hammer_mount *hmp = volume->io.hmp;
269 	int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
270 	struct buf *bp;
271 
272 	/*
273 	 * Clean up the root volume pointer, which is held unlocked in hmp.
274 	 */
275 	if (hmp->rootvol == volume)
276 		hmp->rootvol = NULL;
277 
278 	/*
279 	 * We must not flush a dirty buffer to disk on umount.  It should
280 	 * have already been dealt with by the flusher, or we may be in
281 	 * catastrophic failure.
282 	 */
283 	hammer_io_clear_modify(&volume->io, 1);
284 	volume->io.waitdep = 1;
285 	bp = hammer_io_release(&volume->io, 1);
286 
287 	/*
288 	 * Clean up the persistent ref ioerror might have on the volume
289 	 */
290 	if (volume->io.ioerror) {
291 		volume->io.ioerror = 0;
292 		hammer_unref(&volume->io.lock);
293 	}
294 
295 	/*
296 	 * There should be no references on the volume, no clusters, and
297 	 * no super-clusters.
298 	 */
299 	KKASSERT(volume->io.lock.refs == 0);
300 	if (bp)
301 		brelse(bp);
302 
303 	volume->ondisk = NULL;
304 	if (volume->devvp) {
305 		if (volume->devvp->v_rdev &&
306 		    volume->devvp->v_rdev->si_mountpoint == hmp->mp
307 		) {
308 			volume->devvp->v_rdev->si_mountpoint = NULL;
309 		}
310 		if (ronly) {
311 			/*
312 			 * Make sure we don't sync anything to disk if we
313 			 * are in read-only mode (1) or critically-errored
314 			 * (2).  Note that there may be dirty buffers in
315 			 * normal read-only mode from crash recovery.
316 			 */
317 			vinvalbuf(volume->devvp, 0, 0, 0);
318 			VOP_CLOSE(volume->devvp, FREAD);
319 		} else {
320 			/*
321 			 * Normal termination, save any dirty buffers
322 			 * (XXX there really shouldn't be any).
323 			 */
324 			vinvalbuf(volume->devvp, V_SAVE, 0, 0);
325 			VOP_CLOSE(volume->devvp, FREAD|FWRITE);
326 		}
327 	}
328 
329 	/*
330 	 * Destroy the structure
331 	 */
332 	RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
333 	hammer_free_volume(volume);
334 	return(0);
335 }
336 
337 static
338 void
339 hammer_free_volume(hammer_volume_t volume)
340 {
341 	if (volume->vol_name) {
342 		kfree(volume->vol_name, M_HAMMER);
343 		volume->vol_name = NULL;
344 	}
345 	if (volume->devvp) {
346 		vrele(volume->devvp);
347 		volume->devvp = NULL;
348 	}
349 	--hammer_count_volumes;
350 	kfree(volume, M_HAMMER);
351 }
352 
353 /*
354  * Get a HAMMER volume.  The volume must already exist.
355  */
356 hammer_volume_t
357 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
358 {
359 	struct hammer_volume *volume;
360 
361 	/*
362 	 * Locate the volume structure
363 	 */
364 	volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
365 	if (volume == NULL) {
366 		*errorp = ENOENT;
367 		return(NULL);
368 	}
369 	hammer_ref(&volume->io.lock);
370 
371 	/*
372 	 * Deal with on-disk info
373 	 */
374 	if (volume->ondisk == NULL || volume->io.loading) {
375 		*errorp = hammer_load_volume(volume);
376 		if (*errorp) {
377 			hammer_rel_volume(volume, 1);
378 			volume = NULL;
379 		}
380 	} else {
381 		*errorp = 0;
382 	}
383 	return(volume);
384 }
385 
386 int
387 hammer_ref_volume(hammer_volume_t volume)
388 {
389 	int error;
390 
391 	hammer_ref(&volume->io.lock);
392 
393 	/*
394 	 * Deal with on-disk info
395 	 */
396 	if (volume->ondisk == NULL || volume->io.loading) {
397 		error = hammer_load_volume(volume);
398 		if (error)
399 			hammer_rel_volume(volume, 1);
400 	} else {
401 		error = 0;
402 	}
403 	return (error);
404 }
405 
406 hammer_volume_t
407 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
408 {
409 	hammer_volume_t volume;
410 
411 	volume = hmp->rootvol;
412 	KKASSERT(volume != NULL);
413 	hammer_ref(&volume->io.lock);
414 
415 	/*
416 	 * Deal with on-disk info
417 	 */
418 	if (volume->ondisk == NULL || volume->io.loading) {
419 		*errorp = hammer_load_volume(volume);
420 		if (*errorp) {
421 			hammer_rel_volume(volume, 1);
422 			volume = NULL;
423 		}
424 	} else {
425 		*errorp = 0;
426 	}
427 	return (volume);
428 }
429 
430 /*
431  * Load a volume's on-disk information.  The volume must be referenced and
432  * not locked.  We temporarily acquire an exclusive lock to interlock
433  * against releases or multiple get's.
434  */
435 static int
436 hammer_load_volume(hammer_volume_t volume)
437 {
438 	int error;
439 
440 	++volume->io.loading;
441 	hammer_lock_ex(&volume->io.lock);
442 
443 	if (volume->ondisk == NULL) {
444 		error = hammer_io_read(volume->devvp, &volume->io,
445 				       volume->maxraw_off);
446 		if (error == 0)
447 			volume->ondisk = (void *)volume->io.bp->b_data;
448 	} else {
449 		error = 0;
450 	}
451 	--volume->io.loading;
452 	hammer_unlock(&volume->io.lock);
453 	return(error);
454 }
455 
456 /*
457  * Release a volume.  Call hammer_io_release on the last reference.  We have
458  * to acquire an exclusive lock to interlock against volume->ondisk tests
459  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
460  * lock to be held.
461  *
462  * Volumes are not unloaded from memory during normal operation.
463  */
464 void
465 hammer_rel_volume(hammer_volume_t volume, int flush)
466 {
467 	struct buf *bp = NULL;
468 
469 	crit_enter();
470 	if (volume->io.lock.refs == 1) {
471 		++volume->io.loading;
472 		hammer_lock_ex(&volume->io.lock);
473 		if (volume->io.lock.refs == 1) {
474 			volume->ondisk = NULL;
475 			bp = hammer_io_release(&volume->io, flush);
476 		}
477 		--volume->io.loading;
478 		hammer_unlock(&volume->io.lock);
479 	}
480 	hammer_unref(&volume->io.lock);
481 	if (bp)
482 		brelse(bp);
483 	crit_exit();
484 }
485 
486 int
487 hammer_mountcheck_volumes(struct hammer_mount *hmp)
488 {
489 	hammer_volume_t vol;
490 	int i;
491 
492 	for (i = 0; i < hmp->nvolumes; ++i) {
493 		vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
494 		if (vol == NULL)
495 			return(EINVAL);
496 	}
497 	return(0);
498 }
499 
500 /************************************************************************
501  *				BUFFERS					*
502  ************************************************************************
503  *
504  * Manage buffers.  Currently all blockmap-backed zones are translated
505  * to zone-2 buffer offsets.
506  */
507 hammer_buffer_t
508 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
509 		  int bytes, int isnew, int *errorp)
510 {
511 	hammer_buffer_t buffer;
512 	hammer_volume_t volume;
513 	hammer_off_t	zone2_offset;
514 	hammer_io_type_t iotype;
515 	int vol_no;
516 	int zone;
517 
518 	buf_offset &= ~HAMMER_BUFMASK64;
519 again:
520 	/*
521 	 * Shortcut if the buffer is already cached
522 	 */
523 	buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
524 	if (buffer) {
525 		if (buffer->io.lock.refs == 0)
526 			++hammer_count_refedbufs;
527 		hammer_ref(&buffer->io.lock);
528 
529 		/*
530 		 * Once refed the ondisk field will not be cleared by
531 		 * any other action.
532 		 */
533 		if (buffer->ondisk && buffer->io.loading == 0) {
534 			*errorp = 0;
535 			return(buffer);
536 		}
537 
538 		/*
539 		 * The buffer is no longer loose if it has a ref, and
540 		 * cannot become loose once it gains a ref.  Loose
541 		 * buffers will never be in a modified state.  This should
542 		 * only occur on the 0->1 transition of refs.
543 		 *
544 		 * lose_list can be modified via a biodone() interrupt.
545 		 */
546 		if (buffer->io.mod_list == &hmp->lose_list) {
547 			crit_enter();	/* biodone race against list */
548 			TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
549 				     mod_entry);
550 			crit_exit();
551 			buffer->io.mod_list = NULL;
552 			KKASSERT(buffer->io.modified == 0);
553 		}
554 		goto found;
555 	}
556 
557 	/*
558 	 * What is the buffer class?
559 	 */
560 	zone = HAMMER_ZONE_DECODE(buf_offset);
561 
562 	switch(zone) {
563 	case HAMMER_ZONE_LARGE_DATA_INDEX:
564 	case HAMMER_ZONE_SMALL_DATA_INDEX:
565 		iotype = HAMMER_STRUCTURE_DATA_BUFFER;
566 		break;
567 	case HAMMER_ZONE_UNDO_INDEX:
568 		iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
569 		break;
570 	case HAMMER_ZONE_META_INDEX:
571 	default:
572 		/*
573 		 * NOTE: inode data and directory entries are placed in this
574 		 * zone.  inode atime/mtime is updated in-place and thus
575 		 * buffers containing inodes must be synchronized as
576 		 * meta-buffers, same as buffers containing B-Tree info.
577 		 */
578 		iotype = HAMMER_STRUCTURE_META_BUFFER;
579 		break;
580 	}
581 
582 	/*
583 	 * Handle blockmap offset translations
584 	 */
585 	if (zone >= HAMMER_ZONE_BTREE_INDEX) {
586 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
587 	} else if (zone == HAMMER_ZONE_UNDO_INDEX) {
588 		zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
589 	} else {
590 		KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
591 		zone2_offset = buf_offset;
592 		*errorp = 0;
593 	}
594 	if (*errorp)
595 		return(NULL);
596 
597 	/*
598 	 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
599 	 * specifications.
600 	 */
601 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
602 		 HAMMER_ZONE_RAW_BUFFER);
603 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
604 	volume = hammer_get_volume(hmp, vol_no, errorp);
605 	if (volume == NULL)
606 		return(NULL);
607 
608 	KKASSERT(zone2_offset < volume->maxbuf_off);
609 
610 	/*
611 	 * Allocate a new buffer structure.  We will check for races later.
612 	 */
613 	++hammer_count_buffers;
614 	buffer = kmalloc(sizeof(*buffer), M_HAMMER,
615 			 M_WAITOK|M_ZERO|M_USE_RESERVE);
616 	buffer->zone2_offset = zone2_offset;
617 	buffer->zoneX_offset = buf_offset;
618 
619 	hammer_io_init(&buffer->io, volume, iotype);
620 	buffer->io.offset = volume->ondisk->vol_buf_beg +
621 			    (zone2_offset & HAMMER_OFF_SHORT_MASK);
622 	buffer->io.bytes = bytes;
623 	TAILQ_INIT(&buffer->clist);
624 	hammer_ref(&buffer->io.lock);
625 
626 	/*
627 	 * Insert the buffer into the RB tree and handle late collisions.
628 	 */
629 	if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
630 		hammer_unref(&buffer->io.lock);
631 		--hammer_count_buffers;
632 		kfree(buffer, M_HAMMER);
633 		goto again;
634 	}
635 	++hammer_count_refedbufs;
636 found:
637 
638 	/*
639 	 * Deal with on-disk info and loading races.
640 	 */
641 	if (buffer->ondisk == NULL || buffer->io.loading) {
642 		*errorp = hammer_load_buffer(buffer, isnew);
643 		if (*errorp) {
644 			hammer_rel_buffer(buffer, 1);
645 			buffer = NULL;
646 		}
647 	} else {
648 		*errorp = 0;
649 	}
650 	return(buffer);
651 }
652 
653 /*
654  * This is used by the direct-read code to deal with large-data buffers
655  * created by the reblocker and mirror-write code.  The direct-read code
656  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
657  * running hammer buffers must be fully synced to disk before we can issue
658  * the direct-read.
659  *
660  * This code path is not considered critical as only the rebocker and
661  * mirror-write code will create large-data buffers via the HAMMER buffer
662  * subsystem.  They do that because they operate at the B-Tree level and
663  * do not access the vnode/inode structures.
664  */
665 void
666 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
667 {
668 	hammer_buffer_t buffer;
669 	int error;
670 
671 	KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
672 		 HAMMER_ZONE_LARGE_DATA);
673 
674 	while (bytes > 0) {
675 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
676 				   base_offset);
677 		if (buffer && (buffer->io.modified || buffer->io.running)) {
678 			error = hammer_ref_buffer(buffer);
679 			if (error == 0) {
680 				hammer_io_wait(&buffer->io);
681 				if (buffer->io.modified) {
682 					hammer_io_write_interlock(&buffer->io);
683 					hammer_io_flush(&buffer->io);
684 					hammer_io_done_interlock(&buffer->io);
685 					hammer_io_wait(&buffer->io);
686 				}
687 				hammer_rel_buffer(buffer, 0);
688 			}
689 		}
690 		base_offset += HAMMER_BUFSIZE;
691 		bytes -= HAMMER_BUFSIZE;
692 	}
693 }
694 
695 /*
696  * Destroy all buffers covering the specified zoneX offset range.  This
697  * is called when the related blockmap layer2 entry is freed or when
698  * a direct write bypasses our buffer/buffer-cache subsystem.
699  *
700  * The buffers may be referenced by the caller itself.  Setting reclaim
701  * will cause the buffer to be destroyed when it's ref count reaches zero.
702  */
703 void
704 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
705 		   hammer_off_t zone2_offset, int bytes)
706 {
707 	hammer_buffer_t buffer;
708 	hammer_volume_t volume;
709 	int vol_no;
710 	int error;
711 
712 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
713 	volume = hammer_get_volume(hmp, vol_no, &error);
714 	KKASSERT(error == 0);
715 
716 	while (bytes > 0) {
717 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
718 				   base_offset);
719 		if (buffer) {
720 			error = hammer_ref_buffer(buffer);
721 			if (error == 0) {
722 				KKASSERT(buffer->zone2_offset == zone2_offset);
723 				hammer_io_clear_modify(&buffer->io, 1);
724 				buffer->io.reclaim = 1;
725 				buffer->io.waitdep = 1;
726 				KKASSERT(buffer->io.volume == volume);
727 				hammer_rel_buffer(buffer, 0);
728 			}
729 		} else {
730 			hammer_io_inval(volume, zone2_offset);
731 		}
732 		base_offset += HAMMER_BUFSIZE;
733 		zone2_offset += HAMMER_BUFSIZE;
734 		bytes -= HAMMER_BUFSIZE;
735 	}
736 	hammer_rel_volume(volume, 0);
737 }
738 
739 static int
740 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
741 {
742 	hammer_volume_t volume;
743 	int error;
744 
745 	/*
746 	 * Load the buffer's on-disk info
747 	 */
748 	volume = buffer->io.volume;
749 	++buffer->io.loading;
750 	hammer_lock_ex(&buffer->io.lock);
751 
752 	if (hammer_debug_io & 0x0001) {
753 		kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
754 			buffer->zoneX_offset, buffer->zone2_offset, isnew,
755 			buffer->ondisk);
756 	}
757 
758 	if (buffer->ondisk == NULL) {
759 		if (isnew) {
760 			error = hammer_io_new(volume->devvp, &buffer->io);
761 		} else {
762 			error = hammer_io_read(volume->devvp, &buffer->io,
763 					       volume->maxraw_off);
764 		}
765 		if (error == 0)
766 			buffer->ondisk = (void *)buffer->io.bp->b_data;
767 	} else if (isnew) {
768 		error = hammer_io_new(volume->devvp, &buffer->io);
769 	} else {
770 		error = 0;
771 	}
772 	--buffer->io.loading;
773 	hammer_unlock(&buffer->io.lock);
774 	return (error);
775 }
776 
777 /*
778  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
779  * This routine is only called during unmount.
780  */
781 int
782 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
783 {
784 	/*
785 	 * Clean up the persistent ref ioerror might have on the buffer
786 	 * and acquire a ref (steal ioerror's if we can).
787 	 */
788 	if (buffer->io.ioerror) {
789 		buffer->io.ioerror = 0;
790 	} else {
791 		if (buffer->io.lock.refs == 0)
792 			++hammer_count_refedbufs;
793 		hammer_ref(&buffer->io.lock);
794 	}
795 
796 	/*
797 	 * We must not flush a dirty buffer to disk on umount.  It should
798 	 * have already been dealt with by the flusher, or we may be in
799 	 * catastrophic failure.
800 	 */
801 	hammer_io_clear_modify(&buffer->io, 1);
802 	hammer_flush_buffer_nodes(buffer);
803 	KKASSERT(buffer->io.lock.refs == 1);
804 	hammer_rel_buffer(buffer, 2);
805 	return(0);
806 }
807 
808 /*
809  * Reference a buffer that is either already referenced or via a specially
810  * handled pointer (aka cursor->buffer).
811  */
812 int
813 hammer_ref_buffer(hammer_buffer_t buffer)
814 {
815 	int error;
816 
817 	if (buffer->io.lock.refs == 0)
818 		++hammer_count_refedbufs;
819 	hammer_ref(&buffer->io.lock);
820 
821 	/*
822 	 * At this point a biodone() will not touch the buffer other then
823 	 * incidental bits.  However, lose_list can be modified via
824 	 * a biodone() interrupt.
825 	 *
826 	 * No longer loose
827 	 */
828 	if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
829 		crit_enter();
830 		TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
831 		buffer->io.mod_list = NULL;
832 		crit_exit();
833 	}
834 
835 	if (buffer->ondisk == NULL || buffer->io.loading) {
836 		error = hammer_load_buffer(buffer, 0);
837 		if (error) {
838 			hammer_rel_buffer(buffer, 1);
839 			/*
840 			 * NOTE: buffer pointer can become stale after
841 			 * the above release.
842 			 */
843 		}
844 	} else {
845 		error = 0;
846 	}
847 	return(error);
848 }
849 
850 /*
851  * Release a buffer.  We have to deal with several places where
852  * another thread can ref the buffer.
853  *
854  * Only destroy the structure itself if the related buffer cache buffer
855  * was disassociated from it.  This ties the management of the structure
856  * to the buffer cache subsystem.  buffer->ondisk determines whether the
857  * embedded io is referenced or not.
858  */
859 void
860 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
861 {
862 	hammer_volume_t volume;
863 	struct buf *bp = NULL;
864 	int freeme = 0;
865 
866 	crit_enter();
867 	if (buffer->io.lock.refs == 1) {
868 		++buffer->io.loading;	/* force interlock check */
869 		hammer_lock_ex(&buffer->io.lock);
870 		if (buffer->io.lock.refs == 1) {
871 			bp = hammer_io_release(&buffer->io, flush);
872 
873 			if (buffer->io.lock.refs == 1)
874 				--hammer_count_refedbufs;
875 
876 			if (buffer->io.bp == NULL &&
877 			    buffer->io.lock.refs == 1) {
878 				/*
879 				 * Final cleanup
880 				 *
881 				 * NOTE: It is impossible for any associated
882 				 * B-Tree nodes to have refs if the buffer
883 				 * has no additional refs.
884 				 */
885 				RB_REMOVE(hammer_buf_rb_tree,
886 					  &buffer->io.hmp->rb_bufs_root,
887 					  buffer);
888 				volume = buffer->io.volume;
889 				buffer->io.volume = NULL; /* sanity */
890 				hammer_rel_volume(volume, 0);
891 				hammer_io_clear_modlist(&buffer->io);
892 				hammer_flush_buffer_nodes(buffer);
893 				KKASSERT(TAILQ_EMPTY(&buffer->clist));
894 				freeme = 1;
895 			}
896 		}
897 		--buffer->io.loading;
898 		hammer_unlock(&buffer->io.lock);
899 	}
900 	hammer_unref(&buffer->io.lock);
901 	crit_exit();
902 	if (bp)
903 		brelse(bp);
904 	if (freeme) {
905 		--hammer_count_buffers;
906 		kfree(buffer, M_HAMMER);
907 	}
908 }
909 
910 /*
911  * Access the filesystem buffer containing the specified hammer offset.
912  * buf_offset is a conglomeration of the volume number and vol_buf_beg
913  * relative buffer offset.  It must also have bit 55 set to be valid.
914  * (see hammer_off_t in hammer_disk.h).
915  *
916  * Any prior buffer in *bufferp will be released and replaced by the
917  * requested buffer.
918  */
919 static __inline
920 void *
921 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
922 	     int *errorp, struct hammer_buffer **bufferp)
923 {
924 	hammer_buffer_t buffer;
925 	int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
926 
927 	buf_offset &= ~HAMMER_BUFMASK64;
928 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
929 
930 	buffer = *bufferp;
931 	if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
932 			       buffer->zoneX_offset != buf_offset)) {
933 		if (buffer)
934 			hammer_rel_buffer(buffer, 0);
935 		buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
936 		*bufferp = buffer;
937 	} else {
938 		*errorp = 0;
939 	}
940 
941 	/*
942 	 * Return a pointer to the buffer data.
943 	 */
944 	if (buffer == NULL)
945 		return(NULL);
946 	else
947 		return((char *)buffer->ondisk + xoff);
948 }
949 
950 void *
951 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
952 	     int *errorp, struct hammer_buffer **bufferp)
953 {
954 	return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
955 }
956 
957 void *
958 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
959 	         int *errorp, struct hammer_buffer **bufferp)
960 {
961 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
962 	return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
963 }
964 
965 /*
966  * Access the filesystem buffer containing the specified hammer offset.
967  * No disk read operation occurs.  The result buffer may contain garbage.
968  *
969  * Any prior buffer in *bufferp will be released and replaced by the
970  * requested buffer.
971  *
972  * This function marks the buffer dirty but does not increment its
973  * modify_refs count.
974  */
975 static __inline
976 void *
977 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
978 	     int *errorp, struct hammer_buffer **bufferp)
979 {
980 	hammer_buffer_t buffer;
981 	int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
982 
983 	buf_offset &= ~HAMMER_BUFMASK64;
984 
985 	buffer = *bufferp;
986 	if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
987 			       buffer->zoneX_offset != buf_offset)) {
988 		if (buffer)
989 			hammer_rel_buffer(buffer, 0);
990 		buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
991 		*bufferp = buffer;
992 	} else {
993 		*errorp = 0;
994 	}
995 
996 	/*
997 	 * Return a pointer to the buffer data.
998 	 */
999 	if (buffer == NULL)
1000 		return(NULL);
1001 	else
1002 		return((char *)buffer->ondisk + xoff);
1003 }
1004 
1005 void *
1006 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1007 	     int *errorp, struct hammer_buffer **bufferp)
1008 {
1009 	return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1010 }
1011 
1012 void *
1013 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1014 		int *errorp, struct hammer_buffer **bufferp)
1015 {
1016 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1017 	return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1018 }
1019 
1020 /************************************************************************
1021  *				NODES					*
1022  ************************************************************************
1023  *
1024  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1025  * method used by the HAMMER filesystem.
1026  *
1027  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1028  * associated with its buffer, and will only referenced the buffer while
1029  * the node itself is referenced.
1030  *
1031  * A hammer_node can also be passively associated with other HAMMER
1032  * structures, such as inodes, while retaining 0 references.  These
1033  * associations can be cleared backwards using a pointer-to-pointer in
1034  * the hammer_node.
1035  *
1036  * This allows the HAMMER implementation to cache hammer_nodes long-term
1037  * and short-cut a great deal of the infrastructure's complexity.  In
1038  * most cases a cached node can be reacquired without having to dip into
1039  * either the buffer or cluster management code.
1040  *
1041  * The caller must pass a referenced cluster on call and will retain
1042  * ownership of the reference on return.  The node will acquire its own
1043  * additional references, if necessary.
1044  */
1045 hammer_node_t
1046 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
1047 		int isnew, int *errorp)
1048 {
1049 	hammer_node_t node;
1050 
1051 	KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1052 
1053 	/*
1054 	 * Locate the structure, allocating one if necessary.
1055 	 */
1056 again:
1057 	node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1058 	if (node == NULL) {
1059 		++hammer_count_nodes;
1060 		node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO|M_USE_RESERVE);
1061 		node->node_offset = node_offset;
1062 		node->hmp = hmp;
1063 		TAILQ_INIT(&node->cursor_list);
1064 		TAILQ_INIT(&node->cache_list);
1065 		if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1066 			--hammer_count_nodes;
1067 			kfree(node, M_HAMMER);
1068 			goto again;
1069 		}
1070 	}
1071 	hammer_ref(&node->lock);
1072 	if (node->ondisk)
1073 		*errorp = 0;
1074 	else
1075 		*errorp = hammer_load_node(node, isnew);
1076 	if (*errorp) {
1077 		hammer_rel_node(node);
1078 		node = NULL;
1079 	}
1080 	return(node);
1081 }
1082 
1083 /*
1084  * Reference an already-referenced node.
1085  */
1086 void
1087 hammer_ref_node(hammer_node_t node)
1088 {
1089 	KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1090 	hammer_ref(&node->lock);
1091 }
1092 
1093 /*
1094  * Load a node's on-disk data reference.
1095  */
1096 static int
1097 hammer_load_node(hammer_node_t node, int isnew)
1098 {
1099 	hammer_buffer_t buffer;
1100 	hammer_off_t buf_offset;
1101 	int error;
1102 
1103 	error = 0;
1104 	++node->loading;
1105 	hammer_lock_ex(&node->lock);
1106 	if (node->ondisk == NULL) {
1107 		/*
1108 		 * This is a little confusing but the jist is that
1109 		 * node->buffer determines whether the node is on
1110 		 * the buffer's clist and node->ondisk determines
1111 		 * whether the buffer is referenced.
1112 		 *
1113 		 * We could be racing a buffer release, in which case
1114 		 * node->buffer may become NULL while we are blocked
1115 		 * referencing the buffer.
1116 		 */
1117 		if ((buffer = node->buffer) != NULL) {
1118 			error = hammer_ref_buffer(buffer);
1119 			if (error == 0 && node->buffer == NULL) {
1120 				TAILQ_INSERT_TAIL(&buffer->clist,
1121 						  node, entry);
1122 				node->buffer = buffer;
1123 			}
1124 		} else {
1125 			buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1126 			buffer = hammer_get_buffer(node->hmp, buf_offset,
1127 						   HAMMER_BUFSIZE, 0, &error);
1128 			if (buffer) {
1129 				KKASSERT(error == 0);
1130 				TAILQ_INSERT_TAIL(&buffer->clist,
1131 						  node, entry);
1132 				node->buffer = buffer;
1133 			}
1134 		}
1135 		if (error)
1136 			goto failed;
1137 		node->ondisk = (void *)((char *)buffer->ondisk +
1138 				        (node->node_offset & HAMMER_BUFMASK));
1139 		if (isnew == 0 &&
1140 		    (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1141 			if (hammer_crc_test_btree(node->ondisk) == 0)
1142 				Debugger("CRC FAILED: B-TREE NODE");
1143 			node->flags |= HAMMER_NODE_CRCGOOD;
1144 		}
1145 	}
1146 failed:
1147 	--node->loading;
1148 	hammer_unlock(&node->lock);
1149 	return (error);
1150 }
1151 
1152 /*
1153  * Safely reference a node, interlock against flushes via the IO subsystem.
1154  */
1155 hammer_node_t
1156 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1157 		     int *errorp)
1158 {
1159 	hammer_node_t node;
1160 
1161 	node = cache->node;
1162 	if (node != NULL) {
1163 		hammer_ref(&node->lock);
1164 		if (node->ondisk)
1165 			*errorp = 0;
1166 		else
1167 			*errorp = hammer_load_node(node, 0);
1168 		if (*errorp) {
1169 			hammer_rel_node(node);
1170 			node = NULL;
1171 		}
1172 	} else {
1173 		*errorp = ENOENT;
1174 	}
1175 	return(node);
1176 }
1177 
1178 /*
1179  * Release a hammer_node.  On the last release the node dereferences
1180  * its underlying buffer and may or may not be destroyed.
1181  */
1182 void
1183 hammer_rel_node(hammer_node_t node)
1184 {
1185 	hammer_buffer_t buffer;
1186 
1187 	/*
1188 	 * If this isn't the last ref just decrement the ref count and
1189 	 * return.
1190 	 */
1191 	if (node->lock.refs > 1) {
1192 		hammer_unref(&node->lock);
1193 		return;
1194 	}
1195 
1196 	/*
1197 	 * If there is no ondisk info or no buffer the node failed to load,
1198 	 * remove the last reference and destroy the node.
1199 	 */
1200 	if (node->ondisk == NULL) {
1201 		hammer_unref(&node->lock);
1202 		hammer_flush_node(node);
1203 		/* node is stale now */
1204 		return;
1205 	}
1206 
1207 	/*
1208 	 * Do not disassociate the node from the buffer if it represents
1209 	 * a modified B-Tree node that still needs its crc to be generated.
1210 	 */
1211 	if (node->flags & HAMMER_NODE_NEEDSCRC)
1212 		return;
1213 
1214 	/*
1215 	 * Do final cleanups and then either destroy the node and leave it
1216 	 * passively cached.  The buffer reference is removed regardless.
1217 	 */
1218 	buffer = node->buffer;
1219 	node->ondisk = NULL;
1220 
1221 	if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1222 		hammer_unref(&node->lock);
1223 		hammer_rel_buffer(buffer, 0);
1224 		return;
1225 	}
1226 
1227 	/*
1228 	 * Destroy the node.
1229 	 */
1230 	hammer_unref(&node->lock);
1231 	hammer_flush_node(node);
1232 	/* node is stale */
1233 	hammer_rel_buffer(buffer, 0);
1234 }
1235 
1236 /*
1237  * Free space on-media associated with a B-Tree node.
1238  */
1239 void
1240 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1241 {
1242 	KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1243 	node->flags |= HAMMER_NODE_DELETED;
1244 	hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1245 }
1246 
1247 /*
1248  * Passively cache a referenced hammer_node.  The caller may release
1249  * the node on return.
1250  */
1251 void
1252 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1253 {
1254 	/*
1255 	 * If the node doesn't exist, or is being deleted, don't cache it!
1256 	 *
1257 	 * The node can only ever be NULL in the I/O failure path.
1258 	 */
1259 	if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1260 		return;
1261 	if (cache->node == node)
1262 		return;
1263 	while (cache->node)
1264 		hammer_uncache_node(cache);
1265 	if (node->flags & HAMMER_NODE_DELETED)
1266 		return;
1267 	cache->node = node;
1268 	TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1269 }
1270 
1271 void
1272 hammer_uncache_node(hammer_node_cache_t cache)
1273 {
1274 	hammer_node_t node;
1275 
1276 	if ((node = cache->node) != NULL) {
1277 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1278 		cache->node = NULL;
1279 		if (TAILQ_EMPTY(&node->cache_list))
1280 			hammer_flush_node(node);
1281 	}
1282 }
1283 
1284 /*
1285  * Remove a node's cache references and destroy the node if it has no
1286  * other references or backing store.
1287  */
1288 void
1289 hammer_flush_node(hammer_node_t node)
1290 {
1291 	hammer_node_cache_t cache;
1292 	hammer_buffer_t buffer;
1293 
1294 	while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1295 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1296 		cache->node = NULL;
1297 	}
1298 	if (node->lock.refs == 0 && node->ondisk == NULL) {
1299 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1300 		RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1301 		if ((buffer = node->buffer) != NULL) {
1302 			node->buffer = NULL;
1303 			TAILQ_REMOVE(&buffer->clist, node, entry);
1304 			/* buffer is unreferenced because ondisk is NULL */
1305 		}
1306 		--hammer_count_nodes;
1307 		kfree(node, M_HAMMER);
1308 	}
1309 }
1310 
1311 /*
1312  * Flush passively cached B-Tree nodes associated with this buffer.
1313  * This is only called when the buffer is about to be destroyed, so
1314  * none of the nodes should have any references.  The buffer is locked.
1315  *
1316  * We may be interlocked with the buffer.
1317  */
1318 void
1319 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1320 {
1321 	hammer_node_t node;
1322 
1323 	while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1324 		KKASSERT(node->ondisk == NULL);
1325 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1326 
1327 		if (node->lock.refs == 0) {
1328 			hammer_ref(&node->lock);
1329 			node->flags |= HAMMER_NODE_FLUSH;
1330 			hammer_rel_node(node);
1331 		} else {
1332 			KKASSERT(node->loading != 0);
1333 			KKASSERT(node->buffer != NULL);
1334 			buffer = node->buffer;
1335 			node->buffer = NULL;
1336 			TAILQ_REMOVE(&buffer->clist, node, entry);
1337 			/* buffer is unreferenced because ondisk is NULL */
1338 		}
1339 	}
1340 }
1341 
1342 
1343 /************************************************************************
1344  *				ALLOCATORS				*
1345  ************************************************************************/
1346 
1347 /*
1348  * Allocate a B-Tree node.
1349  */
1350 hammer_node_t
1351 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1352 {
1353 	hammer_buffer_t buffer = NULL;
1354 	hammer_node_t node = NULL;
1355 	hammer_off_t node_offset;
1356 
1357 	node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1358 					    sizeof(struct hammer_node_ondisk),
1359 					    errorp);
1360 	if (*errorp == 0) {
1361 		node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1362 		hammer_modify_node_noundo(trans, node);
1363 		bzero(node->ondisk, sizeof(*node->ondisk));
1364 		hammer_modify_node_done(node);
1365 	}
1366 	if (buffer)
1367 		hammer_rel_buffer(buffer, 0);
1368 	return(node);
1369 }
1370 
1371 /*
1372  * Allocate data.  If the address of a data buffer is supplied then
1373  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1374  * will be set to the related buffer.  The caller must release it when
1375  * finally done.  The initial *data_bufferp should be set to NULL by
1376  * the caller.
1377  *
1378  * The caller is responsible for making hammer_modify*() calls on the
1379  * *data_bufferp.
1380  */
1381 void *
1382 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1383 		  u_int16_t rec_type, hammer_off_t *data_offsetp,
1384 		  struct hammer_buffer **data_bufferp, int *errorp)
1385 {
1386 	void *data;
1387 	int zone;
1388 
1389 	/*
1390 	 * Allocate data
1391 	 */
1392 	if (data_len) {
1393 		switch(rec_type) {
1394 		case HAMMER_RECTYPE_INODE:
1395 		case HAMMER_RECTYPE_DIRENTRY:
1396 		case HAMMER_RECTYPE_EXT:
1397 		case HAMMER_RECTYPE_FIX:
1398 		case HAMMER_RECTYPE_PFS:
1399 			zone = HAMMER_ZONE_META_INDEX;
1400 			break;
1401 		case HAMMER_RECTYPE_DATA:
1402 		case HAMMER_RECTYPE_DB:
1403 			if (data_len <= HAMMER_BUFSIZE / 2) {
1404 				zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1405 			} else {
1406 				data_len = (data_len + HAMMER_BUFMASK) &
1407 					   ~HAMMER_BUFMASK;
1408 				zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1409 			}
1410 			break;
1411 		default:
1412 			panic("hammer_alloc_data: rec_type %04x unknown",
1413 			      rec_type);
1414 			zone = 0;	/* NOT REACHED */
1415 			break;
1416 		}
1417 		*data_offsetp = hammer_blockmap_alloc(trans, zone,
1418 						      data_len, errorp);
1419 	} else {
1420 		*data_offsetp = 0;
1421 	}
1422 	if (*errorp == 0 && data_bufferp) {
1423 		if (data_len) {
1424 			data = hammer_bread_ext(trans->hmp, *data_offsetp,
1425 						data_len, errorp, data_bufferp);
1426 		} else {
1427 			data = NULL;
1428 		}
1429 	} else {
1430 		data = NULL;
1431 	}
1432 	return(data);
1433 }
1434 
1435 /*
1436  * Sync dirty buffers to the media and clean-up any loose ends.
1437  *
1438  * These functions do not start the flusher going, they simply
1439  * queue everything up to the flusher.
1440  */
1441 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1442 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1443 
1444 int
1445 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1446 {
1447 	struct hammer_sync_info info;
1448 
1449 	info.error = 0;
1450 	info.waitfor = waitfor;
1451 	if (waitfor == MNT_WAIT) {
1452 		vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1453 			      hammer_sync_scan1, hammer_sync_scan2, &info);
1454 	} else {
1455 		vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1456 			      hammer_sync_scan1, hammer_sync_scan2, &info);
1457 	}
1458 	return(info.error);
1459 }
1460 
1461 /*
1462  * Filesystem sync.  If doing a synchronous sync make a second pass on
1463  * the vnodes in case any were already flushing during the first pass,
1464  * and activate the flusher twice (the second time brings the UNDO FIFO's
1465  * start position up to the end position after the first call).
1466  */
1467 int
1468 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1469 {
1470 	struct hammer_sync_info info;
1471 
1472 	info.error = 0;
1473 	info.waitfor = MNT_NOWAIT;
1474 	vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1475 		      hammer_sync_scan1, hammer_sync_scan2, &info);
1476 	if (info.error == 0 && waitfor == MNT_WAIT) {
1477 		info.waitfor = waitfor;
1478 		vmntvnodescan(hmp->mp, VMSC_GETVP,
1479 			      hammer_sync_scan1, hammer_sync_scan2, &info);
1480 	}
1481         if (waitfor == MNT_WAIT) {
1482                 hammer_flusher_sync(hmp);
1483                 hammer_flusher_sync(hmp);
1484 	} else {
1485                 hammer_flusher_async(hmp, NULL);
1486                 hammer_flusher_async(hmp, NULL);
1487 	}
1488 	return(info.error);
1489 }
1490 
1491 static int
1492 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1493 {
1494 	struct hammer_inode *ip;
1495 
1496 	ip = VTOI(vp);
1497 	if (vp->v_type == VNON || ip == NULL ||
1498 	    ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1499 	     RB_EMPTY(&vp->v_rbdirty_tree))) {
1500 		return(-1);
1501 	}
1502 	return(0);
1503 }
1504 
1505 static int
1506 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1507 {
1508 	struct hammer_sync_info *info = data;
1509 	struct hammer_inode *ip;
1510 	int error;
1511 
1512 	ip = VTOI(vp);
1513 	if (vp->v_type == VNON || vp->v_type == VBAD ||
1514 	    ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1515 	     RB_EMPTY(&vp->v_rbdirty_tree))) {
1516 		return(0);
1517 	}
1518 	error = VOP_FSYNC(vp, MNT_NOWAIT);
1519 	if (error)
1520 		info->error = error;
1521 	return(0);
1522 }
1523 
1524