xref: /dragonfly/sys/vfs/hammer/hammer_ondisk.c (revision 82730a9c)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41 
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 
47 #include <sys/buf2.h>
48 
49 static void hammer_free_volume(hammer_volume_t volume);
50 static int hammer_load_volume(hammer_volume_t volume);
51 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
52 static int hammer_load_node(hammer_transaction_t trans,
53 				hammer_node_t node, int isnew);
54 static void _hammer_rel_node(hammer_node_t node, int locked);
55 
56 static int
57 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
58 {
59 	if (vol1->vol_no < vol2->vol_no)
60 		return(-1);
61 	if (vol1->vol_no > vol2->vol_no)
62 		return(1);
63 	return(0);
64 }
65 
66 /*
67  * hammer_buffer structures are indexed via their zoneX_offset, not
68  * their zone2_offset.
69  */
70 static int
71 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
72 {
73 	if (buf1->zoneX_offset < buf2->zoneX_offset)
74 		return(-1);
75 	if (buf1->zoneX_offset > buf2->zoneX_offset)
76 		return(1);
77 	return(0);
78 }
79 
80 static int
81 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
82 {
83 	if (node1->node_offset < node2->node_offset)
84 		return(-1);
85 	if (node1->node_offset > node2->node_offset)
86 		return(1);
87 	return(0);
88 }
89 
90 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
91 	     hammer_vol_rb_compare, int32_t, vol_no);
92 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
93 	     hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
94 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
95 	     hammer_nod_rb_compare, hammer_off_t, node_offset);
96 
97 /************************************************************************
98  *				VOLUMES					*
99  ************************************************************************
100  *
101  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
102  * code on failure.  Volumes must be loaded at mount time, get_volume() will
103  * not load a new volume.
104  *
105  * The passed devvp is vref()'d but not locked.  This function consumes the
106  * ref (typically by associating it with the volume structure).
107  *
108  * Calls made to hammer_load_volume() or single-threaded
109  */
110 int
111 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
112 		      struct vnode *devvp)
113 {
114 	struct mount *mp;
115 	hammer_volume_t volume;
116 	struct hammer_volume_ondisk *ondisk;
117 	struct nlookupdata nd;
118 	struct buf *bp = NULL;
119 	int error;
120 	int ronly;
121 	int setmp = 0;
122 
123 	mp = hmp->mp;
124 	ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
125 
126 	/*
127 	 * Allocate a volume structure
128 	 */
129 	++hammer_count_volumes;
130 	volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
131 	volume->vol_name = kstrdup(volname, hmp->m_misc);
132 	volume->io.hmp = hmp;	/* bootstrap */
133 	hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
134 	volume->io.offset = 0LL;
135 	volume->io.bytes = HAMMER_BUFSIZE;
136 
137 	/*
138 	 * Get the device vnode
139 	 */
140 	if (devvp == NULL) {
141 		error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
142 		if (error == 0)
143 			error = nlookup(&nd);
144 		if (error == 0)
145 			error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
146 		nlookup_done(&nd);
147 	} else {
148 		error = 0;
149 		volume->devvp = devvp;
150 	}
151 
152 	if (error == 0) {
153 		if (vn_isdisk(volume->devvp, &error)) {
154 			error = vfs_mountedon(volume->devvp);
155 		}
156 	}
157 	if (error == 0 && vcount(volume->devvp) > 0)
158 		error = EBUSY;
159 	if (error == 0) {
160 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
161 		error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
162 		if (error == 0) {
163 			error = VOP_OPEN(volume->devvp,
164 					 (ronly ? FREAD : FREAD|FWRITE),
165 					 FSCRED, NULL);
166 		}
167 		vn_unlock(volume->devvp);
168 	}
169 	if (error) {
170 		hammer_free_volume(volume);
171 		return(error);
172 	}
173 	volume->devvp->v_rdev->si_mountpoint = mp;
174 	setmp = 1;
175 
176 	/*
177 	 * Extract the volume number from the volume header and do various
178 	 * sanity checks.
179 	 */
180 	error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
181 	if (error)
182 		goto late_failure;
183 	ondisk = (void *)bp->b_data;
184 	if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
185 		kprintf("hammer_mount: volume %s has an invalid header\n",
186 			volume->vol_name);
187 		error = EFTYPE;
188 		goto late_failure;
189 	}
190 	volume->vol_no = ondisk->vol_no;
191 	volume->buffer_base = ondisk->vol_buf_beg;
192 	volume->vol_flags = ondisk->vol_flags;
193 	volume->nblocks = ondisk->vol_nblocks;
194 	volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
195 				    ondisk->vol_buf_end - ondisk->vol_buf_beg);
196 	volume->maxraw_off = ondisk->vol_buf_end;
197 
198 	if (RB_EMPTY(&hmp->rb_vols_root)) {
199 		hmp->fsid = ondisk->vol_fsid;
200 	} else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
201 		kprintf("hammer_mount: volume %s's fsid does not match "
202 			"other volumes\n", volume->vol_name);
203 		error = EFTYPE;
204 		goto late_failure;
205 	}
206 
207 	/*
208 	 * Insert the volume structure into the red-black tree.
209 	 */
210 	if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
211 		kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
212 			volume->vol_name, volume->vol_no);
213 		error = EEXIST;
214 	}
215 
216 	/*
217 	 * Set the root volume .  HAMMER special cases rootvol the structure.
218 	 * We do not hold a ref because this would prevent related I/O
219 	 * from being flushed.
220 	 */
221 	if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
222 		hmp->rootvol = volume;
223 		hmp->nvolumes = ondisk->vol_count;
224 		if (bp) {
225 			brelse(bp);
226 			bp = NULL;
227 		}
228 		hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
229 			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
230 		hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
231 			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
232 	}
233 late_failure:
234 	if (bp)
235 		brelse(bp);
236 	if (error) {
237 		/*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
238 		if (setmp)
239 			volume->devvp->v_rdev->si_mountpoint = NULL;
240 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
241 		VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
242 		vn_unlock(volume->devvp);
243 		hammer_free_volume(volume);
244 	}
245 	return (error);
246 }
247 
248 /*
249  * This is called for each volume when updating the mount point from
250  * read-write to read-only or vise-versa.
251  */
252 int
253 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
254 {
255 	if (volume->devvp) {
256 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
257 		if (volume->io.hmp->ronly) {
258 			/* do not call vinvalbuf */
259 			VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
260 			VOP_CLOSE(volume->devvp, FREAD|FWRITE);
261 		} else {
262 			/* do not call vinvalbuf */
263 			VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
264 			VOP_CLOSE(volume->devvp, FREAD);
265 		}
266 		vn_unlock(volume->devvp);
267 	}
268 	return(0);
269 }
270 
271 /*
272  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
273  * so returns -1 on failure.
274  */
275 int
276 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
277 {
278 	hammer_mount_t hmp = volume->io.hmp;
279 	int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
280 
281 	/*
282 	 * Clean up the root volume pointer, which is held unlocked in hmp.
283 	 */
284 	if (hmp->rootvol == volume)
285 		hmp->rootvol = NULL;
286 
287 	/*
288 	 * We must not flush a dirty buffer to disk on umount.  It should
289 	 * have already been dealt with by the flusher, or we may be in
290 	 * catastrophic failure.
291 	 */
292 	hammer_io_clear_modify(&volume->io, 1);
293 	volume->io.waitdep = 1;
294 
295 	/*
296 	 * Clean up the persistent ref ioerror might have on the volume
297 	 */
298 	if (volume->io.ioerror)
299 		hammer_io_clear_error_noassert(&volume->io);
300 
301 	/*
302 	 * This should release the bp.  Releasing the volume with flush set
303 	 * implies the interlock is set.
304 	 */
305 	hammer_ref_interlock_true(&volume->io.lock);
306 	hammer_rel_volume(volume, 1);
307 	KKASSERT(volume->io.bp == NULL);
308 
309 	/*
310 	 * There should be no references on the volume, no clusters, and
311 	 * no super-clusters.
312 	 */
313 	KKASSERT(hammer_norefs(&volume->io.lock));
314 
315 	volume->ondisk = NULL;
316 	if (volume->devvp) {
317 		if (volume->devvp->v_rdev &&
318 		    volume->devvp->v_rdev->si_mountpoint == hmp->mp
319 		) {
320 			volume->devvp->v_rdev->si_mountpoint = NULL;
321 		}
322 		if (ronly) {
323 			/*
324 			 * Make sure we don't sync anything to disk if we
325 			 * are in read-only mode (1) or critically-errored
326 			 * (2).  Note that there may be dirty buffers in
327 			 * normal read-only mode from crash recovery.
328 			 */
329 			vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
330 			vinvalbuf(volume->devvp, 0, 0, 0);
331 			VOP_CLOSE(volume->devvp, FREAD);
332 			vn_unlock(volume->devvp);
333 		} else {
334 			/*
335 			 * Normal termination, save any dirty buffers
336 			 * (XXX there really shouldn't be any).
337 			 */
338 			vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
339 			vinvalbuf(volume->devvp, V_SAVE, 0, 0);
340 			VOP_CLOSE(volume->devvp, FREAD|FWRITE);
341 			vn_unlock(volume->devvp);
342 		}
343 	}
344 
345 	/*
346 	 * Destroy the structure
347 	 */
348 	RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
349 	hammer_free_volume(volume);
350 	return(0);
351 }
352 
353 static
354 void
355 hammer_free_volume(hammer_volume_t volume)
356 {
357 	hammer_mount_t hmp = volume->io.hmp;
358 
359 	if (volume->vol_name) {
360 		kfree(volume->vol_name, hmp->m_misc);
361 		volume->vol_name = NULL;
362 	}
363 	if (volume->devvp) {
364 		vrele(volume->devvp);
365 		volume->devvp = NULL;
366 	}
367 	--hammer_count_volumes;
368 	kfree(volume, hmp->m_misc);
369 }
370 
371 /*
372  * Get a HAMMER volume.  The volume must already exist.
373  */
374 hammer_volume_t
375 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
376 {
377 	struct hammer_volume *volume;
378 
379 	/*
380 	 * Locate the volume structure
381 	 */
382 	volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
383 	if (volume == NULL) {
384 		*errorp = ENOENT;
385 		return(NULL);
386 	}
387 
388 	/*
389 	 * Reference the volume, load/check the data on the 0->1 transition.
390 	 * hammer_load_volume() will dispose of the interlock on return,
391 	 * and also clean up the ref count on error.
392 	 */
393 	if (hammer_ref_interlock(&volume->io.lock)) {
394 		*errorp = hammer_load_volume(volume);
395 		if (*errorp)
396 			volume = NULL;
397 	} else {
398 		KKASSERT(volume->ondisk);
399 		*errorp = 0;
400 	}
401 	return(volume);
402 }
403 
404 int
405 hammer_ref_volume(hammer_volume_t volume)
406 {
407 	int error;
408 
409 	/*
410 	 * Reference the volume and deal with the check condition used to
411 	 * load its ondisk info.
412 	 */
413 	if (hammer_ref_interlock(&volume->io.lock)) {
414 		error = hammer_load_volume(volume);
415 	} else {
416 		KKASSERT(volume->ondisk);
417 		error = 0;
418 	}
419 	return (error);
420 }
421 
422 /*
423  * May be called without fs_token
424  */
425 hammer_volume_t
426 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
427 {
428 	hammer_volume_t volume;
429 
430 	volume = hmp->rootvol;
431 	KKASSERT(volume != NULL);
432 
433 	/*
434 	 * Reference the volume and deal with the check condition used to
435 	 * load its ondisk info.
436 	 */
437 	if (hammer_ref_interlock(&volume->io.lock)) {
438 		lwkt_gettoken(&volume->io.hmp->fs_token);
439 		*errorp = hammer_load_volume(volume);
440 		lwkt_reltoken(&volume->io.hmp->fs_token);
441 		if (*errorp)
442 			volume = NULL;
443 	} else {
444 		KKASSERT(volume->ondisk);
445 		*errorp = 0;
446 	}
447 	return (volume);
448 }
449 
450 /*
451  * Load a volume's on-disk information.  The volume must be referenced and
452  * the interlock is held on call.  The interlock will be released on return.
453  * The reference will also be released on return if an error occurs.
454  */
455 static int
456 hammer_load_volume(hammer_volume_t volume)
457 {
458 	int error;
459 
460 	if (volume->ondisk == NULL) {
461 		error = hammer_io_read(volume->devvp, &volume->io,
462 				       HAMMER_BUFSIZE);
463 		if (error == 0) {
464 			volume->ondisk = (void *)volume->io.bp->b_data;
465                         hammer_ref_interlock_done(&volume->io.lock);
466 		} else {
467                         hammer_rel_volume(volume, 1);
468 		}
469 	} else {
470 		error = 0;
471 	}
472 	return(error);
473 }
474 
475 /*
476  * Release a previously acquired reference on the volume.
477  *
478  * Volumes are not unloaded from memory during normal operation.
479  *
480  * May be called without fs_token
481  */
482 void
483 hammer_rel_volume(hammer_volume_t volume, int locked)
484 {
485 	struct buf *bp;
486 
487 	if (hammer_rel_interlock(&volume->io.lock, locked)) {
488 		lwkt_gettoken(&volume->io.hmp->fs_token);
489 		volume->ondisk = NULL;
490 		bp = hammer_io_release(&volume->io, locked);
491 		lwkt_reltoken(&volume->io.hmp->fs_token);
492 		hammer_rel_interlock_done(&volume->io.lock, locked);
493 		if (bp)
494 			brelse(bp);
495 	}
496 }
497 
498 int
499 hammer_mountcheck_volumes(struct hammer_mount *hmp)
500 {
501 	hammer_volume_t vol;
502 	int i;
503 
504 	for (i = 0; i < hmp->nvolumes; ++i) {
505 		vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
506 		if (vol == NULL)
507 			return(EINVAL);
508 	}
509 	return(0);
510 }
511 
512 /************************************************************************
513  *				BUFFERS					*
514  ************************************************************************
515  *
516  * Manage buffers.  Currently most blockmap-backed zones are direct-mapped
517  * to zone-2 buffer offsets, without a translation stage.  However, the
518  * hammer_buffer structure is indexed by its zoneX_offset, not its
519  * zone2_offset.
520  *
521  * The proper zone must be maintained throughout the code-base all the way
522  * through to the big-block allocator, or routines like hammer_del_buffers()
523  * will not be able to locate all potentially conflicting buffers.
524  */
525 
526 /*
527  * Helper function returns whether a zone offset can be directly translated
528  * to a raw buffer index or not.  Really only the volume and undo zones
529  * can't be directly translated.  Volumes are special-cased and undo zones
530  * shouldn't be aliased accessed in read-only mode.
531  *
532  * This function is ONLY used to detect aliased zones during a read-only
533  * mount.
534  */
535 static __inline int
536 hammer_direct_zone(hammer_off_t buf_offset)
537 {
538 	switch(HAMMER_ZONE_DECODE(buf_offset)) {
539 	case HAMMER_ZONE_RAW_BUFFER_INDEX:
540 	case HAMMER_ZONE_FREEMAP_INDEX:
541 	case HAMMER_ZONE_BTREE_INDEX:
542 	case HAMMER_ZONE_META_INDEX:
543 	case HAMMER_ZONE_LARGE_DATA_INDEX:
544 	case HAMMER_ZONE_SMALL_DATA_INDEX:
545 		return(1);
546 	default:
547 		return(0);
548 	}
549 	/* NOT REACHED */
550 }
551 
552 hammer_buffer_t
553 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
554 		  int bytes, int isnew, int *errorp)
555 {
556 	hammer_buffer_t buffer;
557 	hammer_volume_t volume;
558 	hammer_off_t	zone2_offset;
559 	hammer_io_type_t iotype;
560 	int vol_no;
561 	int zone;
562 
563 	buf_offset &= ~HAMMER_BUFMASK64;
564 again:
565 	/*
566 	 * Shortcut if the buffer is already cached
567 	 */
568 	buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
569 	if (buffer) {
570 		/*
571 		 * Once refed the ondisk field will not be cleared by
572 		 * any other action.  Shortcut the operation if the
573 		 * ondisk structure is valid.
574 		 */
575 found_aliased:
576 		if (hammer_ref_interlock(&buffer->io.lock) == 0) {
577 			hammer_io_advance(&buffer->io);
578 			KKASSERT(buffer->ondisk);
579 			*errorp = 0;
580 			return(buffer);
581 		}
582 
583 		/*
584 		 * 0->1 transition or defered 0->1 transition (CHECK),
585 		 * interlock now held.  Shortcut if ondisk is already
586 		 * assigned.
587 		 */
588 		atomic_add_int(&hammer_count_refedbufs, 1);
589 		if (buffer->ondisk) {
590 			hammer_io_advance(&buffer->io);
591 			hammer_ref_interlock_done(&buffer->io.lock);
592 			*errorp = 0;
593 			return(buffer);
594 		}
595 
596 		/*
597 		 * The buffer is no longer loose if it has a ref, and
598 		 * cannot become loose once it gains a ref.  Loose
599 		 * buffers will never be in a modified state.  This should
600 		 * only occur on the 0->1 transition of refs.
601 		 *
602 		 * lose_list can be modified via a biodone() interrupt
603 		 * so the io_token must be held.
604 		 */
605 		if (buffer->io.mod_root == &hmp->lose_root) {
606 			lwkt_gettoken(&hmp->io_token);
607 			if (buffer->io.mod_root == &hmp->lose_root) {
608 				RB_REMOVE(hammer_mod_rb_tree,
609 					  buffer->io.mod_root, &buffer->io);
610 				buffer->io.mod_root = NULL;
611 				KKASSERT(buffer->io.modified == 0);
612 			}
613 			lwkt_reltoken(&hmp->io_token);
614 		}
615 		goto found;
616 	} else if (hmp->ronly && hammer_direct_zone(buf_offset)) {
617 		/*
618 		 * If this is a read-only mount there could be an alias
619 		 * in the raw-zone.  If there is we use that buffer instead.
620 		 *
621 		 * rw mounts will not have aliases.  Also note when going
622 		 * from ro -> rw the recovered raw buffers are flushed and
623 		 * reclaimed, so again there will not be any aliases once
624 		 * the mount is rw.
625 		 */
626 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
627 				   (buf_offset & ~HAMMER_OFF_ZONE_MASK) |
628 				   HAMMER_ZONE_RAW_BUFFER);
629 		if (buffer) {
630 			kprintf("HAMMER: recovered aliased %016jx\n",
631 				(intmax_t)buf_offset);
632 			goto found_aliased;
633 		}
634 	}
635 
636 	/*
637 	 * What is the buffer class?
638 	 */
639 	zone = HAMMER_ZONE_DECODE(buf_offset);
640 
641 	switch(zone) {
642 	case HAMMER_ZONE_LARGE_DATA_INDEX:
643 	case HAMMER_ZONE_SMALL_DATA_INDEX:
644 		iotype = HAMMER_STRUCTURE_DATA_BUFFER;
645 		break;
646 	case HAMMER_ZONE_UNDO_INDEX:
647 		iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
648 		break;
649 	case HAMMER_ZONE_META_INDEX:
650 	default:
651 		/*
652 		 * NOTE: inode data and directory entries are placed in this
653 		 * zone.  inode atime/mtime is updated in-place and thus
654 		 * buffers containing inodes must be synchronized as
655 		 * meta-buffers, same as buffers containing B-Tree info.
656 		 */
657 		iotype = HAMMER_STRUCTURE_META_BUFFER;
658 		break;
659 	}
660 
661 	/*
662 	 * Handle blockmap offset translations
663 	 */
664 	if (zone >= HAMMER_ZONE_BTREE_INDEX) {
665 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
666 	} else if (zone == HAMMER_ZONE_UNDO_INDEX) {
667 		zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
668 	} else {
669 		KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
670 		zone2_offset = buf_offset;
671 		*errorp = 0;
672 	}
673 	if (*errorp)
674 		return(NULL);
675 
676 	/*
677 	 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
678 	 * specifications.
679 	 */
680 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
681 		 HAMMER_ZONE_RAW_BUFFER);
682 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
683 	volume = hammer_get_volume(hmp, vol_no, errorp);
684 	if (volume == NULL)
685 		return(NULL);
686 
687 	KKASSERT(zone2_offset < volume->maxbuf_off);
688 
689 	/*
690 	 * Allocate a new buffer structure.  We will check for races later.
691 	 */
692 	++hammer_count_buffers;
693 	buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
694 			 M_WAITOK|M_ZERO|M_USE_RESERVE);
695 	buffer->zone2_offset = zone2_offset;
696 	buffer->zoneX_offset = buf_offset;
697 
698 	hammer_io_init(&buffer->io, volume, iotype);
699 	buffer->io.offset = volume->ondisk->vol_buf_beg +
700 			    (zone2_offset & HAMMER_OFF_SHORT_MASK);
701 	buffer->io.bytes = bytes;
702 	TAILQ_INIT(&buffer->clist);
703 	hammer_ref_interlock_true(&buffer->io.lock);
704 
705 	/*
706 	 * Insert the buffer into the RB tree and handle late collisions.
707 	 */
708 	if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
709 		hammer_rel_volume(volume, 0);
710 		buffer->io.volume = NULL;			/* safety */
711 		if (hammer_rel_interlock(&buffer->io.lock, 1))	/* safety */
712 			hammer_rel_interlock_done(&buffer->io.lock, 1);
713 		--hammer_count_buffers;
714 		kfree(buffer, hmp->m_misc);
715 		goto again;
716 	}
717 	atomic_add_int(&hammer_count_refedbufs, 1);
718 found:
719 
720 	/*
721 	 * The buffer is referenced and interlocked.  Load the buffer
722 	 * if necessary.  hammer_load_buffer() deals with the interlock
723 	 * and, if an error is returned, also deals with the ref.
724 	 */
725 	if (buffer->ondisk == NULL) {
726 		*errorp = hammer_load_buffer(buffer, isnew);
727 		if (*errorp)
728 			buffer = NULL;
729 	} else {
730 		hammer_io_advance(&buffer->io);
731 		hammer_ref_interlock_done(&buffer->io.lock);
732 		*errorp = 0;
733 	}
734 	return(buffer);
735 }
736 
737 /*
738  * This is used by the direct-read code to deal with large-data buffers
739  * created by the reblocker and mirror-write code.  The direct-read code
740  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
741  * running hammer buffers must be fully synced to disk before we can issue
742  * the direct-read.
743  *
744  * This code path is not considered critical as only the rebocker and
745  * mirror-write code will create large-data buffers via the HAMMER buffer
746  * subsystem.  They do that because they operate at the B-Tree level and
747  * do not access the vnode/inode structures.
748  */
749 void
750 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
751 {
752 	hammer_buffer_t buffer;
753 	int error;
754 
755 	KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
756 		 HAMMER_ZONE_LARGE_DATA);
757 
758 	while (bytes > 0) {
759 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
760 				   base_offset);
761 		if (buffer && (buffer->io.modified || buffer->io.running)) {
762 			error = hammer_ref_buffer(buffer);
763 			if (error == 0) {
764 				hammer_io_wait(&buffer->io);
765 				if (buffer->io.modified) {
766 					hammer_io_write_interlock(&buffer->io);
767 					hammer_io_flush(&buffer->io, 0);
768 					hammer_io_done_interlock(&buffer->io);
769 					hammer_io_wait(&buffer->io);
770 				}
771 				hammer_rel_buffer(buffer, 0);
772 			}
773 		}
774 		base_offset += HAMMER_BUFSIZE;
775 		bytes -= HAMMER_BUFSIZE;
776 	}
777 }
778 
779 /*
780  * Destroy all buffers covering the specified zoneX offset range.  This
781  * is called when the related blockmap layer2 entry is freed or when
782  * a direct write bypasses our buffer/buffer-cache subsystem.
783  *
784  * The buffers may be referenced by the caller itself.  Setting reclaim
785  * will cause the buffer to be destroyed when it's ref count reaches zero.
786  *
787  * Return 0 on success, EAGAIN if some buffers could not be destroyed due
788  * to additional references held by other threads, or some other (typically
789  * fatal) error.
790  */
791 int
792 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
793 		   hammer_off_t zone2_offset, int bytes,
794 		   int report_conflicts)
795 {
796 	hammer_buffer_t buffer;
797 	hammer_volume_t volume;
798 	int vol_no;
799 	int error;
800 	int ret_error;
801 
802 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
803 	volume = hammer_get_volume(hmp, vol_no, &ret_error);
804 	KKASSERT(ret_error == 0);
805 
806 	while (bytes > 0) {
807 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
808 				   base_offset);
809 		if (buffer) {
810 			error = hammer_ref_buffer(buffer);
811 			if (hammer_debug_general & 0x20000) {
812 				kprintf("hammer: delbufr %016jx "
813 					"rerr=%d 1ref=%d\n",
814 					(intmax_t)buffer->zoneX_offset,
815 					error,
816 					hammer_oneref(&buffer->io.lock));
817 			}
818 			if (error == 0 && !hammer_oneref(&buffer->io.lock)) {
819 				error = EAGAIN;
820 				hammer_rel_buffer(buffer, 0);
821 			}
822 			if (error == 0) {
823 				KKASSERT(buffer->zone2_offset == zone2_offset);
824 				hammer_io_clear_modify(&buffer->io, 1);
825 				buffer->io.reclaim = 1;
826 				buffer->io.waitdep = 1;
827 				KKASSERT(buffer->io.volume == volume);
828 				hammer_rel_buffer(buffer, 0);
829 			}
830 		} else {
831 			error = hammer_io_inval(volume, zone2_offset);
832 		}
833 		if (error) {
834 			ret_error = error;
835 			if (report_conflicts ||
836 			    (hammer_debug_general & 0x8000)) {
837 				kprintf("hammer_del_buffers: unable to "
838 					"invalidate %016llx buffer=%p rep=%d\n",
839 					(long long)base_offset,
840 					buffer, report_conflicts);
841 			}
842 		}
843 		base_offset += HAMMER_BUFSIZE;
844 		zone2_offset += HAMMER_BUFSIZE;
845 		bytes -= HAMMER_BUFSIZE;
846 	}
847 	hammer_rel_volume(volume, 0);
848 	return (ret_error);
849 }
850 
851 /*
852  * Given a referenced and interlocked buffer load/validate the data.
853  *
854  * The buffer interlock will be released on return.  If an error is
855  * returned the buffer reference will also be released (and the buffer
856  * pointer will thus be stale).
857  */
858 static int
859 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
860 {
861 	hammer_volume_t volume;
862 	int error;
863 
864 	/*
865 	 * Load the buffer's on-disk info
866 	 */
867 	volume = buffer->io.volume;
868 
869 	if (hammer_debug_io & 0x0004) {
870 		kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
871 			(long long)buffer->zoneX_offset,
872 			(long long)buffer->zone2_offset,
873 			isnew, buffer->ondisk);
874 	}
875 
876 	if (buffer->ondisk == NULL) {
877 		/*
878 		 * Issue the read or generate a new buffer.  When reading
879 		 * the limit argument controls any read-ahead clustering
880 		 * hammer_io_read() is allowed to do.
881 		 *
882 		 * We cannot read-ahead in the large-data zone and we cannot
883 		 * cross a largeblock boundary as the next largeblock might
884 		 * use a different buffer size.
885 		 */
886 		if (isnew) {
887 			error = hammer_io_new(volume->devvp, &buffer->io);
888 		} else if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) ==
889 			   HAMMER_ZONE_LARGE_DATA) {
890 			error = hammer_io_read(volume->devvp, &buffer->io,
891 					       buffer->io.bytes);
892 		} else {
893 			hammer_off_t limit;
894 
895 			limit = (buffer->zone2_offset +
896 				 HAMMER_LARGEBLOCK_MASK64) &
897 				~HAMMER_LARGEBLOCK_MASK64;
898 			limit -= buffer->zone2_offset;
899 			error = hammer_io_read(volume->devvp, &buffer->io,
900 					       limit);
901 		}
902 		if (error == 0)
903 			buffer->ondisk = (void *)buffer->io.bp->b_data;
904 	} else if (isnew) {
905 		error = hammer_io_new(volume->devvp, &buffer->io);
906 	} else {
907 		error = 0;
908 	}
909 	if (error == 0) {
910 		hammer_io_advance(&buffer->io);
911 		hammer_ref_interlock_done(&buffer->io.lock);
912 	} else {
913 		hammer_rel_buffer(buffer, 1);
914 	}
915 	return (error);
916 }
917 
918 /*
919  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
920  * This routine is only called during unmount or when a volume is
921  * removed.
922  *
923  * If data != NULL, it specifies a volume whoose buffers should
924  * be unloaded.
925  */
926 int
927 hammer_unload_buffer(hammer_buffer_t buffer, void *data)
928 {
929 	struct hammer_volume *volume = (struct hammer_volume *) data;
930 
931 	/*
932 	 * If volume != NULL we are only interested in unloading buffers
933 	 * associated with a particular volume.
934 	 */
935 	if (volume != NULL && volume != buffer->io.volume)
936 		return 0;
937 
938 	/*
939 	 * Clean up the persistent ref ioerror might have on the buffer
940 	 * and acquire a ref.  Expect a 0->1 transition.
941 	 */
942 	if (buffer->io.ioerror) {
943 		hammer_io_clear_error_noassert(&buffer->io);
944 		atomic_add_int(&hammer_count_refedbufs, -1);
945 	}
946 	hammer_ref_interlock_true(&buffer->io.lock);
947 	atomic_add_int(&hammer_count_refedbufs, 1);
948 
949 	/*
950 	 * We must not flush a dirty buffer to disk on umount.  It should
951 	 * have already been dealt with by the flusher, or we may be in
952 	 * catastrophic failure.
953 	 *
954 	 * We must set waitdep to ensure that a running buffer is waited
955 	 * on and released prior to us trying to unload the volume.
956 	 */
957 	hammer_io_clear_modify(&buffer->io, 1);
958 	hammer_flush_buffer_nodes(buffer);
959 	buffer->io.waitdep = 1;
960 	hammer_rel_buffer(buffer, 1);
961 	return(0);
962 }
963 
964 /*
965  * Reference a buffer that is either already referenced or via a specially
966  * handled pointer (aka cursor->buffer).
967  */
968 int
969 hammer_ref_buffer(hammer_buffer_t buffer)
970 {
971 	hammer_mount_t hmp;
972 	int error;
973 	int locked;
974 
975 	/*
976 	 * Acquire a ref, plus the buffer will be interlocked on the
977 	 * 0->1 transition.
978 	 */
979 	locked = hammer_ref_interlock(&buffer->io.lock);
980 	hmp = buffer->io.hmp;
981 
982 	/*
983 	 * At this point a biodone() will not touch the buffer other then
984 	 * incidental bits.  However, lose_list can be modified via
985 	 * a biodone() interrupt.
986 	 *
987 	 * No longer loose.  lose_list requires the io_token.
988 	 */
989 	if (buffer->io.mod_root == &hmp->lose_root) {
990 		lwkt_gettoken(&hmp->io_token);
991 		if (buffer->io.mod_root == &hmp->lose_root) {
992 			RB_REMOVE(hammer_mod_rb_tree,
993 				  buffer->io.mod_root, &buffer->io);
994 			buffer->io.mod_root = NULL;
995 		}
996 		lwkt_reltoken(&hmp->io_token);
997 	}
998 
999 	if (locked) {
1000 		atomic_add_int(&hammer_count_refedbufs, 1);
1001 		error = hammer_load_buffer(buffer, 0);
1002 		/* NOTE: on error the buffer pointer is stale */
1003 	} else {
1004 		error = 0;
1005 	}
1006 	return(error);
1007 }
1008 
1009 /*
1010  * Release a reference on the buffer.  On the 1->0 transition the
1011  * underlying IO will be released but the data reference is left
1012  * cached.
1013  *
1014  * Only destroy the structure itself if the related buffer cache buffer
1015  * was disassociated from it.  This ties the management of the structure
1016  * to the buffer cache subsystem.  buffer->ondisk determines whether the
1017  * embedded io is referenced or not.
1018  */
1019 void
1020 hammer_rel_buffer(hammer_buffer_t buffer, int locked)
1021 {
1022 	hammer_volume_t volume;
1023 	hammer_mount_t hmp;
1024 	struct buf *bp = NULL;
1025 	int freeme = 0;
1026 
1027 	hmp = buffer->io.hmp;
1028 
1029 	if (hammer_rel_interlock(&buffer->io.lock, locked) == 0)
1030 		return;
1031 
1032 	/*
1033 	 * hammer_count_refedbufs accounting.  Decrement if we are in
1034 	 * the error path or if CHECK is clear.
1035 	 *
1036 	 * If we are not in the error path and CHECK is set the caller
1037 	 * probably just did a hammer_ref() and didn't account for it,
1038 	 * so we don't account for the loss here.
1039 	 */
1040 	if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0)
1041 		atomic_add_int(&hammer_count_refedbufs, -1);
1042 
1043 	/*
1044 	 * If the caller locked us or the normal released transitions
1045 	 * from 1->0 (and acquired the lock) attempt to release the
1046 	 * io.  If the called locked us we tell hammer_io_release()
1047 	 * to flush (which would be the unload or failure path).
1048 	 */
1049 	bp = hammer_io_release(&buffer->io, locked);
1050 
1051 	/*
1052 	 * If the buffer has no bp association and no refs we can destroy
1053 	 * it.
1054 	 *
1055 	 * NOTE: It is impossible for any associated B-Tree nodes to have
1056 	 * refs if the buffer has no additional refs.
1057 	 */
1058 	if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) {
1059 		RB_REMOVE(hammer_buf_rb_tree,
1060 			  &buffer->io.hmp->rb_bufs_root,
1061 			  buffer);
1062 		volume = buffer->io.volume;
1063 		buffer->io.volume = NULL; /* sanity */
1064 		hammer_rel_volume(volume, 0);
1065 		hammer_io_clear_modlist(&buffer->io);
1066 		hammer_flush_buffer_nodes(buffer);
1067 		KKASSERT(TAILQ_EMPTY(&buffer->clist));
1068 		freeme = 1;
1069 	}
1070 
1071 	/*
1072 	 * Cleanup
1073 	 */
1074 	hammer_rel_interlock_done(&buffer->io.lock, locked);
1075 	if (bp)
1076 		brelse(bp);
1077 	if (freeme) {
1078 		--hammer_count_buffers;
1079 		kfree(buffer, hmp->m_misc);
1080 	}
1081 }
1082 
1083 /*
1084  * Access the filesystem buffer containing the specified hammer offset.
1085  * buf_offset is a conglomeration of the volume number and vol_buf_beg
1086  * relative buffer offset.  It must also have bit 55 set to be valid.
1087  * (see hammer_off_t in hammer_disk.h).
1088  *
1089  * Any prior buffer in *bufferp will be released and replaced by the
1090  * requested buffer.
1091  *
1092  * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1093  * passed cached *bufferp to match against either zoneX or zone2.
1094  */
1095 static __inline
1096 void *
1097 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1098 	     int *errorp, struct hammer_buffer **bufferp)
1099 {
1100 	hammer_buffer_t buffer;
1101 	int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1102 
1103 	buf_offset &= ~HAMMER_BUFMASK64;
1104 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
1105 
1106 	buffer = *bufferp;
1107 	if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1108 			       buffer->zoneX_offset != buf_offset)) {
1109 		if (buffer)
1110 			hammer_rel_buffer(buffer, 0);
1111 		buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
1112 		*bufferp = buffer;
1113 	} else {
1114 		*errorp = 0;
1115 	}
1116 
1117 	/*
1118 	 * Return a pointer to the buffer data.
1119 	 */
1120 	if (buffer == NULL)
1121 		return(NULL);
1122 	else
1123 		return((char *)buffer->ondisk + xoff);
1124 }
1125 
1126 void *
1127 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
1128 	     int *errorp, struct hammer_buffer **bufferp)
1129 {
1130 	return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1131 }
1132 
1133 void *
1134 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1135 	         int *errorp, struct hammer_buffer **bufferp)
1136 {
1137 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1138 	return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
1139 }
1140 
1141 /*
1142  * Access the filesystem buffer containing the specified hammer offset.
1143  * No disk read operation occurs.  The result buffer may contain garbage.
1144  *
1145  * Any prior buffer in *bufferp will be released and replaced by the
1146  * requested buffer.
1147  *
1148  * This function marks the buffer dirty but does not increment its
1149  * modify_refs count.
1150  */
1151 static __inline
1152 void *
1153 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1154 	     int *errorp, struct hammer_buffer **bufferp)
1155 {
1156 	hammer_buffer_t buffer;
1157 	int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1158 
1159 	buf_offset &= ~HAMMER_BUFMASK64;
1160 
1161 	buffer = *bufferp;
1162 	if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1163 			       buffer->zoneX_offset != buf_offset)) {
1164 		if (buffer)
1165 			hammer_rel_buffer(buffer, 0);
1166 		buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
1167 		*bufferp = buffer;
1168 	} else {
1169 		*errorp = 0;
1170 	}
1171 
1172 	/*
1173 	 * Return a pointer to the buffer data.
1174 	 */
1175 	if (buffer == NULL)
1176 		return(NULL);
1177 	else
1178 		return((char *)buffer->ondisk + xoff);
1179 }
1180 
1181 void *
1182 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1183 	     int *errorp, struct hammer_buffer **bufferp)
1184 {
1185 	return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1186 }
1187 
1188 void *
1189 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1190 		int *errorp, struct hammer_buffer **bufferp)
1191 {
1192 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1193 	return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1194 }
1195 
1196 /************************************************************************
1197  *				NODES					*
1198  ************************************************************************
1199  *
1200  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1201  * method used by the HAMMER filesystem.
1202  *
1203  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1204  * associated with its buffer, and will only referenced the buffer while
1205  * the node itself is referenced.
1206  *
1207  * A hammer_node can also be passively associated with other HAMMER
1208  * structures, such as inodes, while retaining 0 references.  These
1209  * associations can be cleared backwards using a pointer-to-pointer in
1210  * the hammer_node.
1211  *
1212  * This allows the HAMMER implementation to cache hammer_nodes long-term
1213  * and short-cut a great deal of the infrastructure's complexity.  In
1214  * most cases a cached node can be reacquired without having to dip into
1215  * either the buffer or cluster management code.
1216  *
1217  * The caller must pass a referenced cluster on call and will retain
1218  * ownership of the reference on return.  The node will acquire its own
1219  * additional references, if necessary.
1220  */
1221 hammer_node_t
1222 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1223 		int isnew, int *errorp)
1224 {
1225 	hammer_mount_t hmp = trans->hmp;
1226 	hammer_node_t node;
1227 	int doload;
1228 
1229 	KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1230 
1231 	/*
1232 	 * Locate the structure, allocating one if necessary.
1233 	 */
1234 again:
1235 	node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1236 	if (node == NULL) {
1237 		++hammer_count_nodes;
1238 		node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1239 		node->node_offset = node_offset;
1240 		node->hmp = hmp;
1241 		TAILQ_INIT(&node->cursor_list);
1242 		TAILQ_INIT(&node->cache_list);
1243 		if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1244 			--hammer_count_nodes;
1245 			kfree(node, hmp->m_misc);
1246 			goto again;
1247 		}
1248 		doload = hammer_ref_interlock_true(&node->lock);
1249 	} else {
1250 		doload = hammer_ref_interlock(&node->lock);
1251 	}
1252 	if (doload) {
1253 		*errorp = hammer_load_node(trans, node, isnew);
1254 		trans->flags |= HAMMER_TRANSF_DIDIO;
1255 		if (*errorp)
1256 			node = NULL;
1257 	} else {
1258 		KKASSERT(node->ondisk);
1259 		*errorp = 0;
1260 		hammer_io_advance(&node->buffer->io);
1261 	}
1262 	return(node);
1263 }
1264 
1265 /*
1266  * Reference an already-referenced node.  0->1 transitions should assert
1267  * so we do not have to deal with hammer_ref() setting CHECK.
1268  */
1269 void
1270 hammer_ref_node(hammer_node_t node)
1271 {
1272 	KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL);
1273 	hammer_ref(&node->lock);
1274 }
1275 
1276 /*
1277  * Load a node's on-disk data reference.  Called with the node referenced
1278  * and interlocked.
1279  *
1280  * On return the node interlock will be unlocked.  If a non-zero error code
1281  * is returned the node will also be dereferenced (and the caller's pointer
1282  * will be stale).
1283  */
1284 static int
1285 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1286 {
1287 	hammer_buffer_t buffer;
1288 	hammer_off_t buf_offset;
1289 	int error;
1290 
1291 	error = 0;
1292 	if (node->ondisk == NULL) {
1293 		/*
1294 		 * This is a little confusing but the jist is that
1295 		 * node->buffer determines whether the node is on
1296 		 * the buffer's clist and node->ondisk determines
1297 		 * whether the buffer is referenced.
1298 		 *
1299 		 * We could be racing a buffer release, in which case
1300 		 * node->buffer may become NULL while we are blocked
1301 		 * referencing the buffer.
1302 		 */
1303 		if ((buffer = node->buffer) != NULL) {
1304 			error = hammer_ref_buffer(buffer);
1305 			if (error == 0 && node->buffer == NULL) {
1306 				TAILQ_INSERT_TAIL(&buffer->clist,
1307 						  node, entry);
1308 				node->buffer = buffer;
1309 			}
1310 		} else {
1311 			buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1312 			buffer = hammer_get_buffer(node->hmp, buf_offset,
1313 						   HAMMER_BUFSIZE, 0, &error);
1314 			if (buffer) {
1315 				KKASSERT(error == 0);
1316 				TAILQ_INSERT_TAIL(&buffer->clist,
1317 						  node, entry);
1318 				node->buffer = buffer;
1319 			}
1320 		}
1321 		if (error)
1322 			goto failed;
1323 		node->ondisk = (void *)((char *)buffer->ondisk +
1324 				        (node->node_offset & HAMMER_BUFMASK));
1325 
1326 		/*
1327 		 * Check CRC.  NOTE: Neither flag is set and the CRC is not
1328 		 * generated on new B-Tree nodes.
1329 		 */
1330 		if (isnew == 0 &&
1331 		    (node->flags & HAMMER_NODE_CRCANY) == 0) {
1332 			if (hammer_crc_test_btree(node->ondisk) == 0) {
1333 				if (hammer_debug_critical)
1334 					Debugger("CRC FAILED: B-TREE NODE");
1335 				node->flags |= HAMMER_NODE_CRCBAD;
1336 			} else {
1337 				node->flags |= HAMMER_NODE_CRCGOOD;
1338 			}
1339 		}
1340 	}
1341 	if (node->flags & HAMMER_NODE_CRCBAD) {
1342 		if (trans->flags & HAMMER_TRANSF_CRCDOM)
1343 			error = EDOM;
1344 		else
1345 			error = EIO;
1346 	}
1347 failed:
1348 	if (error) {
1349 		_hammer_rel_node(node, 1);
1350 	} else {
1351 		hammer_ref_interlock_done(&node->lock);
1352 	}
1353 	return (error);
1354 }
1355 
1356 /*
1357  * Safely reference a node, interlock against flushes via the IO subsystem.
1358  */
1359 hammer_node_t
1360 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1361 		     int *errorp)
1362 {
1363 	hammer_node_t node;
1364 	int doload;
1365 
1366 	node = cache->node;
1367 	if (node != NULL) {
1368 		doload = hammer_ref_interlock(&node->lock);
1369 		if (doload) {
1370 			*errorp = hammer_load_node(trans, node, 0);
1371 			if (*errorp)
1372 				node = NULL;
1373 		} else {
1374 			KKASSERT(node->ondisk);
1375 			if (node->flags & HAMMER_NODE_CRCBAD) {
1376 				if (trans->flags & HAMMER_TRANSF_CRCDOM)
1377 					*errorp = EDOM;
1378 				else
1379 					*errorp = EIO;
1380 				_hammer_rel_node(node, 0);
1381 				node = NULL;
1382 			} else {
1383 				*errorp = 0;
1384 			}
1385 		}
1386 	} else {
1387 		*errorp = ENOENT;
1388 	}
1389 	return(node);
1390 }
1391 
1392 /*
1393  * Release a hammer_node.  On the last release the node dereferences
1394  * its underlying buffer and may or may not be destroyed.
1395  *
1396  * If locked is non-zero the passed node has been interlocked by the
1397  * caller and we are in the failure/unload path, otherwise it has not and
1398  * we are doing a normal release.
1399  *
1400  * This function will dispose of the interlock and the reference.
1401  * On return the node pointer is stale.
1402  */
1403 void
1404 _hammer_rel_node(hammer_node_t node, int locked)
1405 {
1406 	hammer_buffer_t buffer;
1407 
1408 	/*
1409 	 * Deref the node.  If this isn't the 1->0 transition we're basically
1410 	 * done.  If locked is non-zero this function will just deref the
1411 	 * locked node and return TRUE, otherwise it will deref the locked
1412 	 * node and either lock and return TRUE on the 1->0 transition or
1413 	 * not lock and return FALSE.
1414 	 */
1415 	if (hammer_rel_interlock(&node->lock, locked) == 0)
1416 		return;
1417 
1418 	/*
1419 	 * Either locked was non-zero and we are interlocked, or the
1420 	 * hammer_rel_interlock() call returned non-zero and we are
1421 	 * interlocked.
1422 	 *
1423 	 * The ref-count must still be decremented if locked != 0 so
1424 	 * the cleanup required still varies a bit.
1425 	 *
1426 	 * hammer_flush_node() when called with 1 or 2 will dispose of
1427 	 * the lock and possible ref-count.
1428 	 */
1429 	if (node->ondisk == NULL) {
1430 		hammer_flush_node(node, locked + 1);
1431 		/* node is stale now */
1432 		return;
1433 	}
1434 
1435 	/*
1436 	 * Do not disassociate the node from the buffer if it represents
1437 	 * a modified B-Tree node that still needs its crc to be generated.
1438 	 */
1439 	if (node->flags & HAMMER_NODE_NEEDSCRC) {
1440 		hammer_rel_interlock_done(&node->lock, locked);
1441 		return;
1442 	}
1443 
1444 	/*
1445 	 * Do final cleanups and then either destroy the node and leave it
1446 	 * passively cached.  The buffer reference is removed regardless.
1447 	 */
1448 	buffer = node->buffer;
1449 	node->ondisk = NULL;
1450 
1451 	if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1452 		/*
1453 		 * Normal release.
1454 		 */
1455 		hammer_rel_interlock_done(&node->lock, locked);
1456 	} else {
1457 		/*
1458 		 * Destroy the node.
1459 		 */
1460 		hammer_flush_node(node, locked + 1);
1461 		/* node is stale */
1462 
1463 	}
1464 	hammer_rel_buffer(buffer, 0);
1465 }
1466 
1467 void
1468 hammer_rel_node(hammer_node_t node)
1469 {
1470 	_hammer_rel_node(node, 0);
1471 }
1472 
1473 /*
1474  * Free space on-media associated with a B-Tree node.
1475  */
1476 void
1477 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1478 {
1479 	KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1480 	node->flags |= HAMMER_NODE_DELETED;
1481 	hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1482 }
1483 
1484 /*
1485  * Passively cache a referenced hammer_node.  The caller may release
1486  * the node on return.
1487  */
1488 void
1489 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1490 {
1491 	/*
1492 	 * If the node doesn't exist, or is being deleted, don't cache it!
1493 	 *
1494 	 * The node can only ever be NULL in the I/O failure path.
1495 	 */
1496 	if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1497 		return;
1498 	if (cache->node == node)
1499 		return;
1500 	while (cache->node)
1501 		hammer_uncache_node(cache);
1502 	if (node->flags & HAMMER_NODE_DELETED)
1503 		return;
1504 	cache->node = node;
1505 	TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1506 }
1507 
1508 void
1509 hammer_uncache_node(hammer_node_cache_t cache)
1510 {
1511 	hammer_node_t node;
1512 
1513 	if ((node = cache->node) != NULL) {
1514 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1515 		cache->node = NULL;
1516 		if (TAILQ_EMPTY(&node->cache_list))
1517 			hammer_flush_node(node, 0);
1518 	}
1519 }
1520 
1521 /*
1522  * Remove a node's cache references and destroy the node if it has no
1523  * other references or backing store.
1524  *
1525  * locked == 0	Normal unlocked operation
1526  * locked == 1	Call hammer_rel_interlock_done(..., 0);
1527  * locked == 2	Call hammer_rel_interlock_done(..., 1);
1528  *
1529  * XXX for now this isn't even close to being MPSAFE so the refs check
1530  *     is sufficient.
1531  */
1532 void
1533 hammer_flush_node(hammer_node_t node, int locked)
1534 {
1535 	hammer_node_cache_t cache;
1536 	hammer_buffer_t buffer;
1537 	hammer_mount_t hmp = node->hmp;
1538 	int dofree;
1539 
1540 	while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1541 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1542 		cache->node = NULL;
1543 	}
1544 
1545 	/*
1546 	 * NOTE: refs is predisposed if another thread is blocking and
1547 	 *	 will be larger than 0 in that case.  We aren't MPSAFE
1548 	 *	 here.
1549 	 */
1550 	if (node->ondisk == NULL && hammer_norefs(&node->lock)) {
1551 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1552 		RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1553 		if ((buffer = node->buffer) != NULL) {
1554 			node->buffer = NULL;
1555 			TAILQ_REMOVE(&buffer->clist, node, entry);
1556 			/* buffer is unreferenced because ondisk is NULL */
1557 		}
1558 		dofree = 1;
1559 	} else {
1560 		dofree = 0;
1561 	}
1562 
1563 	/*
1564 	 * Deal with the interlock if locked == 1 or locked == 2.
1565 	 */
1566 	if (locked)
1567 		hammer_rel_interlock_done(&node->lock, locked - 1);
1568 
1569 	/*
1570 	 * Destroy if requested
1571 	 */
1572 	if (dofree) {
1573 		--hammer_count_nodes;
1574 		kfree(node, hmp->m_misc);
1575 	}
1576 }
1577 
1578 /*
1579  * Flush passively cached B-Tree nodes associated with this buffer.
1580  * This is only called when the buffer is about to be destroyed, so
1581  * none of the nodes should have any references.  The buffer is locked.
1582  *
1583  * We may be interlocked with the buffer.
1584  */
1585 void
1586 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1587 {
1588 	hammer_node_t node;
1589 
1590 	while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1591 		KKASSERT(node->ondisk == NULL);
1592 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1593 
1594 		if (hammer_try_interlock_norefs(&node->lock)) {
1595 			hammer_ref(&node->lock);
1596 			node->flags |= HAMMER_NODE_FLUSH;
1597 			_hammer_rel_node(node, 1);
1598 		} else {
1599 			KKASSERT(node->buffer != NULL);
1600 			buffer = node->buffer;
1601 			node->buffer = NULL;
1602 			TAILQ_REMOVE(&buffer->clist, node, entry);
1603 			/* buffer is unreferenced because ondisk is NULL */
1604 		}
1605 	}
1606 }
1607 
1608 
1609 /************************************************************************
1610  *				ALLOCATORS				*
1611  ************************************************************************/
1612 
1613 /*
1614  * Allocate a B-Tree node.
1615  */
1616 hammer_node_t
1617 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1618 {
1619 	hammer_buffer_t buffer = NULL;
1620 	hammer_node_t node = NULL;
1621 	hammer_off_t node_offset;
1622 
1623 	node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1624 					    sizeof(struct hammer_node_ondisk),
1625 					    hint, errorp);
1626 	if (*errorp == 0) {
1627 		node = hammer_get_node(trans, node_offset, 1, errorp);
1628 		hammer_modify_node_noundo(trans, node);
1629 		bzero(node->ondisk, sizeof(*node->ondisk));
1630 		hammer_modify_node_done(node);
1631 	}
1632 	if (buffer)
1633 		hammer_rel_buffer(buffer, 0);
1634 	return(node);
1635 }
1636 
1637 /*
1638  * Allocate data.  If the address of a data buffer is supplied then
1639  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1640  * will be set to the related buffer.  The caller must release it when
1641  * finally done.  The initial *data_bufferp should be set to NULL by
1642  * the caller.
1643  *
1644  * The caller is responsible for making hammer_modify*() calls on the
1645  * *data_bufferp.
1646  */
1647 void *
1648 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1649 		  u_int16_t rec_type, hammer_off_t *data_offsetp,
1650 		  struct hammer_buffer **data_bufferp,
1651 		  hammer_off_t hint, int *errorp)
1652 {
1653 	void *data;
1654 	int zone;
1655 
1656 	/*
1657 	 * Allocate data
1658 	 */
1659 	if (data_len) {
1660 		switch(rec_type) {
1661 		case HAMMER_RECTYPE_INODE:
1662 		case HAMMER_RECTYPE_DIRENTRY:
1663 		case HAMMER_RECTYPE_EXT:
1664 		case HAMMER_RECTYPE_FIX:
1665 		case HAMMER_RECTYPE_PFS:
1666 		case HAMMER_RECTYPE_SNAPSHOT:
1667 		case HAMMER_RECTYPE_CONFIG:
1668 			zone = HAMMER_ZONE_META_INDEX;
1669 			break;
1670 		case HAMMER_RECTYPE_DATA:
1671 		case HAMMER_RECTYPE_DB:
1672 			if (data_len <= HAMMER_BUFSIZE / 2) {
1673 				zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1674 			} else {
1675 				data_len = (data_len + HAMMER_BUFMASK) &
1676 					   ~HAMMER_BUFMASK;
1677 				zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1678 			}
1679 			break;
1680 		default:
1681 			panic("hammer_alloc_data: rec_type %04x unknown",
1682 			      rec_type);
1683 			zone = 0;	/* NOT REACHED */
1684 			break;
1685 		}
1686 		*data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1687 						      hint, errorp);
1688 	} else {
1689 		*data_offsetp = 0;
1690 	}
1691 	if (*errorp == 0 && data_bufferp) {
1692 		if (data_len) {
1693 			data = hammer_bread_ext(trans->hmp, *data_offsetp,
1694 						data_len, errorp, data_bufferp);
1695 		} else {
1696 			data = NULL;
1697 		}
1698 	} else {
1699 		data = NULL;
1700 	}
1701 	return(data);
1702 }
1703 
1704 /*
1705  * Sync dirty buffers to the media and clean-up any loose ends.
1706  *
1707  * These functions do not start the flusher going, they simply
1708  * queue everything up to the flusher.
1709  */
1710 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1711 
1712 int
1713 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1714 {
1715 	struct hammer_sync_info info;
1716 
1717 	info.error = 0;
1718 	info.waitfor = waitfor;
1719 	if (waitfor == MNT_WAIT) {
1720 		vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS,
1721 			  hammer_sync_scan2, &info);
1722 	} else {
1723 		vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS | VMSC_NOWAIT,
1724 			  hammer_sync_scan2, &info);
1725 	}
1726 	return(info.error);
1727 }
1728 
1729 /*
1730  * Filesystem sync.  If doing a synchronous sync make a second pass on
1731  * the vnodes in case any were already flushing during the first pass,
1732  * and activate the flusher twice (the second time brings the UNDO FIFO's
1733  * start position up to the end position after the first call).
1734  *
1735  * If doing a lazy sync make just one pass on the vnode list, ignoring
1736  * any new vnodes added to the list while the sync is in progress.
1737  */
1738 int
1739 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1740 {
1741 	struct hammer_sync_info info;
1742 	int flags;
1743 
1744 	flags = VMSC_GETVP;
1745 	if (waitfor & MNT_LAZY)
1746 		flags |= VMSC_ONEPASS;
1747 
1748 	info.error = 0;
1749 	info.waitfor = MNT_NOWAIT;
1750 	vsyncscan(hmp->mp, flags | VMSC_NOWAIT, hammer_sync_scan2, &info);
1751 
1752 	if (info.error == 0 && (waitfor & MNT_WAIT)) {
1753 		info.waitfor = waitfor;
1754 		vsyncscan(hmp->mp, flags, hammer_sync_scan2, &info);
1755 	}
1756         if (waitfor == MNT_WAIT) {
1757                 hammer_flusher_sync(hmp);
1758                 hammer_flusher_sync(hmp);
1759 	} else {
1760                 hammer_flusher_async(hmp, NULL);
1761                 hammer_flusher_async(hmp, NULL);
1762 	}
1763 	return(info.error);
1764 }
1765 
1766 static int
1767 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1768 {
1769 	struct hammer_sync_info *info = data;
1770 	struct hammer_inode *ip;
1771 	int error;
1772 
1773 	ip = VTOI(vp);
1774 	if (ip == NULL)
1775 		return(0);
1776 	if (vp->v_type == VNON || vp->v_type == VBAD) {
1777 		vclrisdirty(vp);
1778 		return(0);
1779 	}
1780 	if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1781 	    RB_EMPTY(&vp->v_rbdirty_tree)) {
1782 		vclrisdirty(vp);
1783 		return(0);
1784 	}
1785 	error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1786 	if (error)
1787 		info->error = error;
1788 	return(0);
1789 }
1790