xref: /dragonfly/sys/vfs/hammer/hammer_ondisk.c (revision 78478697)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41 
42 #include <sys/nlookup.h>
43 #include <sys/buf2.h>
44 
45 #include "hammer.h"
46 
47 static void hammer_free_volume(hammer_volume_t volume);
48 static int hammer_load_volume(hammer_volume_t volume);
49 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
50 static int hammer_load_node(hammer_transaction_t trans,
51 				hammer_node_t node, int isnew);
52 static void _hammer_rel_node(hammer_node_t node, int locked);
53 
54 static int
55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
56 {
57 	if (vol1->vol_no < vol2->vol_no)
58 		return(-1);
59 	if (vol1->vol_no > vol2->vol_no)
60 		return(1);
61 	return(0);
62 }
63 
64 /*
65  * hammer_buffer structures are indexed via their zoneX_offset, not
66  * their zone2_offset.
67  */
68 static int
69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
70 {
71 	if (buf1->zoneX_offset < buf2->zoneX_offset)
72 		return(-1);
73 	if (buf1->zoneX_offset > buf2->zoneX_offset)
74 		return(1);
75 	return(0);
76 }
77 
78 static int
79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
80 {
81 	if (node1->node_offset < node2->node_offset)
82 		return(-1);
83 	if (node1->node_offset > node2->node_offset)
84 		return(1);
85 	return(0);
86 }
87 
88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
89 	     hammer_vol_rb_compare, int32_t, vol_no);
90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
91 	     hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
93 	     hammer_nod_rb_compare, hammer_off_t, node_offset);
94 
95 /************************************************************************
96  *				VOLUMES					*
97  ************************************************************************
98  *
99  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
100  * code on failure.  Volumes must be loaded at mount time or via hammer
101  * volume-add command, hammer_get_volume() will not load a new volume.
102  *
103  * The passed devvp is vref()'d but not locked.  This function consumes the
104  * ref (typically by associating it with the volume structure).
105  *
106  * Calls made to hammer_load_volume() or single-threaded
107  */
108 int
109 hammer_install_volume(hammer_mount_t hmp, const char *volname,
110 		      struct vnode *devvp, void *data)
111 {
112 	struct mount *mp;
113 	hammer_volume_t volume;
114 	struct hammer_volume_ondisk *ondisk;
115 	struct hammer_volume_ondisk *img;
116 	struct nlookupdata nd;
117 	struct buf *bp = NULL;
118 	int error;
119 	int ronly;
120 	int setmp = 0;
121 	int i;
122 
123 	mp = hmp->mp;
124 	ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
125 
126 	/*
127 	 * Allocate a volume structure
128 	 */
129 	++hammer_count_volumes;
130 	volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
131 	volume->vol_name = kstrdup(volname, hmp->m_misc);
132 	volume->io.hmp = hmp;	/* bootstrap */
133 	hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
134 	volume->io.offset = 0LL;
135 	volume->io.bytes = HAMMER_BUFSIZE;
136 
137 	/*
138 	 * Get the device vnode
139 	 */
140 	if (devvp == NULL) {
141 		error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
142 		if (error == 0)
143 			error = nlookup(&nd);
144 		if (error == 0)
145 			error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
146 		nlookup_done(&nd);
147 	} else {
148 		error = 0;
149 		volume->devvp = devvp;
150 	}
151 
152 	if (error == 0) {
153 		if (vn_isdisk(volume->devvp, &error)) {
154 			error = vfs_mountedon(volume->devvp);
155 		}
156 	}
157 	if (error == 0 && vcount(volume->devvp) > 0)
158 		error = EBUSY;
159 	if (error == 0) {
160 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
161 		error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
162 		if (error == 0) {
163 			error = VOP_OPEN(volume->devvp,
164 					 (ronly ? FREAD : FREAD|FWRITE),
165 					 FSCRED, NULL);
166 		}
167 		vn_unlock(volume->devvp);
168 	}
169 	if (error) {
170 		hammer_free_volume(volume);
171 		return(error);
172 	}
173 	volume->devvp->v_rdev->si_mountpoint = mp;
174 	setmp = 1;
175 
176 	/*
177 	 * Extract the volume number from the volume header and do various
178 	 * sanity checks.
179 	 */
180 	error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
181 	if (error)
182 		goto late_failure;
183 	ondisk = (void *)bp->b_data;
184 
185 	/*
186 	 * Initialize the volume header with data if the data is specified.
187 	 */
188 	if (ronly == 0 && data) {
189 		img = (struct hammer_volume_ondisk *)data;
190 		if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) {
191 			hkprintf("Formatting of valid HAMMER volume "
192 				"%s denied. Erase with dd!\n", volname);
193 			error = EFTYPE;
194 			goto late_failure;
195 		}
196 		bcopy(img, ondisk, sizeof(*img));
197 	}
198 
199 	if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
200 		hkprintf("volume %s has an invalid header\n",
201 			volume->vol_name);
202 		for (i = 0; i < (int)sizeof(ondisk->vol_signature); i++) {
203 			kprintf("%02x", ((char*)&ondisk->vol_signature)[i] & 0xFF);
204 			if (i != (int)sizeof(ondisk->vol_signature) - 1)
205 				kprintf(" ");
206 		}
207 		kprintf("\n");
208 		error = EFTYPE;
209 		goto late_failure;
210 	}
211 	volume->vol_no = ondisk->vol_no;
212 	volume->vol_flags = ondisk->vol_flags;
213 	volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
214 				    ondisk->vol_buf_end - ondisk->vol_buf_beg);
215 
216 	if (RB_EMPTY(&hmp->rb_vols_root)) {
217 		hmp->fsid = ondisk->vol_fsid;
218 	} else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
219 		hkprintf("volume %s's fsid does not match other volumes\n",
220 			volume->vol_name);
221 		error = EFTYPE;
222 		goto late_failure;
223 	}
224 
225 	/*
226 	 * Insert the volume structure into the red-black tree.
227 	 */
228 	if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
229 		hkprintf("volume %s has a duplicate vol_no %d\n",
230 			volume->vol_name, volume->vol_no);
231 		error = EEXIST;
232 	}
233 
234 	if (error == 0)
235 		HAMMER_VOLUME_NUMBER_ADD(hmp, volume);
236 
237 	/*
238 	 * Set the root volume .  HAMMER special cases rootvol the structure.
239 	 * We do not hold a ref because this would prevent related I/O
240 	 * from being flushed.
241 	 */
242 	if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
243 		hmp->rootvol = volume;
244 		hmp->nvolumes = ondisk->vol_count;
245 		if (bp) {
246 			brelse(bp);
247 			bp = NULL;
248 		}
249 		hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
250 						HAMMER_BUFFERS_PER_BIGBLOCK;
251 		hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
252 						HAMMER_BUFFERS_PER_BIGBLOCK;
253 	}
254 late_failure:
255 	if (bp)
256 		brelse(bp);
257 	if (error) {
258 		/*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
259 		if (setmp)
260 			volume->devvp->v_rdev->si_mountpoint = NULL;
261 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
262 		VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE, NULL);
263 		vn_unlock(volume->devvp);
264 		hammer_free_volume(volume);
265 	}
266 	return (error);
267 }
268 
269 /*
270  * This is called for each volume when updating the mount point from
271  * read-write to read-only or vise-versa.
272  */
273 int
274 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
275 {
276 	if (volume->devvp) {
277 		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
278 		if (volume->io.hmp->ronly) {
279 			/* do not call vinvalbuf */
280 			VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
281 			VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL);
282 		} else {
283 			/* do not call vinvalbuf */
284 			VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
285 			VOP_CLOSE(volume->devvp, FREAD, NULL);
286 		}
287 		vn_unlock(volume->devvp);
288 	}
289 	return(0);
290 }
291 
292 /*
293  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
294  * so returns -1 on failure.
295  */
296 int
297 hammer_unload_volume(hammer_volume_t volume, void *data)
298 {
299 	hammer_mount_t hmp = volume->io.hmp;
300 	struct buf *bp = NULL;
301 	struct hammer_volume_ondisk *img;
302 	int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
303 	int error;
304 
305 	/*
306 	 * Clear the volume header with data if the data is specified.
307 	 */
308 	if (ronly == 0 && data && volume->devvp) {
309 		img = (struct hammer_volume_ondisk *)data;
310 		error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
311 		if (error || bp->b_bcount < sizeof(*img)) {
312 			hmkprintf(hmp, "Failed to read volume header: %d\n", error);
313 			brelse(bp);
314 		} else {
315 			bcopy(img, bp->b_data, sizeof(*img));
316 			error = bwrite(bp);
317 			if (error)
318 				hmkprintf(hmp, "Failed to clear volume header: %d\n",
319 					error);
320 		}
321 	}
322 
323 	/*
324 	 * Clean up the root volume pointer, which is held unlocked in hmp.
325 	 */
326 	if (hmp->rootvol == volume)
327 		hmp->rootvol = NULL;
328 
329 	/*
330 	 * We must not flush a dirty buffer to disk on umount.  It should
331 	 * have already been dealt with by the flusher, or we may be in
332 	 * catastrophic failure.
333 	 */
334 	hammer_io_clear_modify(&volume->io, 1);
335 	volume->io.waitdep = 1;
336 
337 	/*
338 	 * Clean up the persistent ref ioerror might have on the volume
339 	 */
340 	if (volume->io.ioerror)
341 		hammer_io_clear_error_noassert(&volume->io);
342 
343 	/*
344 	 * This should release the bp.  Releasing the volume with flush set
345 	 * implies the interlock is set.
346 	 */
347 	hammer_ref_interlock_true(&volume->io.lock);
348 	hammer_rel_volume(volume, 1);
349 	KKASSERT(volume->io.bp == NULL);
350 
351 	/*
352 	 * There should be no references on the volume.
353 	 */
354 	KKASSERT(hammer_norefs(&volume->io.lock));
355 
356 	volume->ondisk = NULL;
357 	if (volume->devvp) {
358 		if (volume->devvp->v_rdev &&
359 		    volume->devvp->v_rdev->si_mountpoint == hmp->mp) {
360 			volume->devvp->v_rdev->si_mountpoint = NULL;
361 		}
362 		if (ronly) {
363 			/*
364 			 * Make sure we don't sync anything to disk if we
365 			 * are in read-only mode (1) or critically-errored
366 			 * (2).  Note that there may be dirty buffers in
367 			 * normal read-only mode from crash recovery.
368 			 */
369 			vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
370 			vinvalbuf(volume->devvp, 0, 0, 0);
371 			VOP_CLOSE(volume->devvp, FREAD, NULL);
372 			vn_unlock(volume->devvp);
373 		} else {
374 			/*
375 			 * Normal termination, save any dirty buffers
376 			 * (XXX there really shouldn't be any).
377 			 */
378 			vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
379 			vinvalbuf(volume->devvp, V_SAVE, 0, 0);
380 			VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL);
381 			vn_unlock(volume->devvp);
382 		}
383 	}
384 
385 	/*
386 	 * Destroy the structure
387 	 */
388 	RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
389 	HAMMER_VOLUME_NUMBER_DEL(hmp, volume);
390 	hammer_free_volume(volume);
391 	return(0);
392 }
393 
394 static
395 void
396 hammer_free_volume(hammer_volume_t volume)
397 {
398 	hammer_mount_t hmp = volume->io.hmp;
399 
400 	if (volume->vol_name) {
401 		kfree(volume->vol_name, hmp->m_misc);
402 		volume->vol_name = NULL;
403 	}
404 	if (volume->devvp) {
405 		vrele(volume->devvp);
406 		volume->devvp = NULL;
407 	}
408 	--hammer_count_volumes;
409 	kfree(volume, hmp->m_misc);
410 }
411 
412 /*
413  * Get a HAMMER volume.  The volume must already exist.
414  */
415 hammer_volume_t
416 hammer_get_volume(hammer_mount_t hmp, int32_t vol_no, int *errorp)
417 {
418 	struct hammer_volume *volume;
419 
420 	/*
421 	 * Locate the volume structure
422 	 */
423 	volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
424 	if (volume == NULL) {
425 		*errorp = ENOENT;
426 		return(NULL);
427 	}
428 
429 	/*
430 	 * Reference the volume, load/check the data on the 0->1 transition.
431 	 * hammer_load_volume() will dispose of the interlock on return,
432 	 * and also clean up the ref count on error.
433 	 */
434 	if (hammer_ref_interlock(&volume->io.lock)) {
435 		*errorp = hammer_load_volume(volume);
436 		if (*errorp)
437 			volume = NULL;
438 	} else {
439 		KKASSERT(volume->ondisk);
440 		*errorp = 0;
441 	}
442 	return(volume);
443 }
444 
445 int
446 hammer_ref_volume(hammer_volume_t volume)
447 {
448 	int error;
449 
450 	/*
451 	 * Reference the volume and deal with the check condition used to
452 	 * load its ondisk info.
453 	 */
454 	if (hammer_ref_interlock(&volume->io.lock)) {
455 		error = hammer_load_volume(volume);
456 	} else {
457 		KKASSERT(volume->ondisk);
458 		error = 0;
459 	}
460 	return (error);
461 }
462 
463 /*
464  * May be called without fs_token
465  */
466 hammer_volume_t
467 hammer_get_root_volume(hammer_mount_t hmp, int *errorp)
468 {
469 	hammer_volume_t volume;
470 
471 	volume = hmp->rootvol;
472 	KKASSERT(volume != NULL);
473 
474 	/*
475 	 * Reference the volume and deal with the check condition used to
476 	 * load its ondisk info.
477 	 */
478 	if (hammer_ref_interlock(&volume->io.lock)) {
479 		lwkt_gettoken(&volume->io.hmp->fs_token);
480 		*errorp = hammer_load_volume(volume);
481 		lwkt_reltoken(&volume->io.hmp->fs_token);
482 		if (*errorp)
483 			volume = NULL;
484 	} else {
485 		KKASSERT(volume->ondisk);
486 		*errorp = 0;
487 	}
488 	return (volume);
489 }
490 
491 /*
492  * Load a volume's on-disk information.  The volume must be referenced and
493  * the interlock is held on call.  The interlock will be released on return.
494  * The reference will also be released on return if an error occurs.
495  */
496 static int
497 hammer_load_volume(hammer_volume_t volume)
498 {
499 	int error;
500 
501 	if (volume->ondisk == NULL) {
502 		error = hammer_io_read(volume->devvp, &volume->io,
503 				       HAMMER_BUFSIZE);
504 		if (error == 0) {
505 			volume->ondisk = (void *)volume->io.bp->b_data;
506                         hammer_ref_interlock_done(&volume->io.lock);
507 		} else {
508                         hammer_rel_volume(volume, 1);
509 		}
510 	} else {
511 		error = 0;
512 	}
513 	return(error);
514 }
515 
516 /*
517  * Release a previously acquired reference on the volume.
518  *
519  * Volumes are not unloaded from memory during normal operation.
520  *
521  * May be called without fs_token
522  */
523 void
524 hammer_rel_volume(hammer_volume_t volume, int locked)
525 {
526 	struct buf *bp;
527 
528 	if (hammer_rel_interlock(&volume->io.lock, locked)) {
529 		lwkt_gettoken(&volume->io.hmp->fs_token);
530 		volume->ondisk = NULL;
531 		bp = hammer_io_release(&volume->io, locked);
532 		lwkt_reltoken(&volume->io.hmp->fs_token);
533 		hammer_rel_interlock_done(&volume->io.lock, locked);
534 		if (bp)
535 			brelse(bp);
536 	}
537 }
538 
539 int
540 hammer_mountcheck_volumes(hammer_mount_t hmp)
541 {
542 	hammer_volume_t vol;
543 	int i;
544 
545 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
546 		vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
547 		if (vol == NULL)
548 			return(EINVAL);
549 	}
550 	return(0);
551 }
552 
553 int
554 hammer_get_installed_volumes(hammer_mount_t hmp)
555 {
556 	int i, ret = 0;
557 
558 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, i)
559 		ret++;
560 	return(ret);
561 }
562 
563 /************************************************************************
564  *				BUFFERS					*
565  ************************************************************************
566  *
567  * Manage buffers.  Currently most blockmap-backed zones are direct-mapped
568  * to zone-2 buffer offsets, without a translation stage.  However, the
569  * hammer_buffer structure is indexed by its zoneX_offset, not its
570  * zone2_offset.
571  *
572  * The proper zone must be maintained throughout the code-base all the way
573  * through to the big-block allocator, or routines like hammer_del_buffers()
574  * will not be able to locate all potentially conflicting buffers.
575  */
576 
577 /*
578  * Helper function returns whether a zone offset can be directly translated
579  * to a raw buffer index or not.  Really only the volume and undo zones
580  * can't be directly translated.  Volumes are special-cased and undo zones
581  * shouldn't be aliased accessed in read-only mode.
582  *
583  * This function is ONLY used to detect aliased zones during a read-only
584  * mount.
585  */
586 static __inline int
587 hammer_direct_zone(hammer_off_t buf_offset)
588 {
589 	int zone = HAMMER_ZONE_DECODE(buf_offset);
590 
591 	return(hammer_is_direct_mapped_index(zone));
592 }
593 
594 hammer_buffer_t
595 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
596 		  int bytes, int isnew, int *errorp)
597 {
598 	hammer_buffer_t buffer;
599 	hammer_volume_t volume;
600 	hammer_off_t	zone2_offset;
601 	hammer_io_type_t iotype;
602 	int vol_no;
603 	int zone;
604 
605 	buf_offset &= ~HAMMER_BUFMASK64;
606 again:
607 	/*
608 	 * Shortcut if the buffer is already cached
609 	 */
610 	buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
611 	if (buffer) {
612 		/*
613 		 * Once refed the ondisk field will not be cleared by
614 		 * any other action.  Shortcut the operation if the
615 		 * ondisk structure is valid.
616 		 */
617 found_aliased:
618 		if (hammer_ref_interlock(&buffer->io.lock) == 0) {
619 			hammer_io_advance(&buffer->io);
620 			KKASSERT(buffer->ondisk);
621 			*errorp = 0;
622 			return(buffer);
623 		}
624 
625 		/*
626 		 * 0->1 transition or defered 0->1 transition (CHECK),
627 		 * interlock now held.  Shortcut if ondisk is already
628 		 * assigned.
629 		 */
630 		atomic_add_int(&hammer_count_refedbufs, 1);
631 		if (buffer->ondisk) {
632 			hammer_io_advance(&buffer->io);
633 			hammer_ref_interlock_done(&buffer->io.lock);
634 			*errorp = 0;
635 			return(buffer);
636 		}
637 
638 		/*
639 		 * The buffer is no longer loose if it has a ref, and
640 		 * cannot become loose once it gains a ref.  Loose
641 		 * buffers will never be in a modified state.  This should
642 		 * only occur on the 0->1 transition of refs.
643 		 *
644 		 * lose_root can be modified via a biodone() interrupt
645 		 * so the io_token must be held.
646 		 */
647 		if (buffer->io.mod_root == &hmp->lose_root) {
648 			lwkt_gettoken(&hmp->io_token);
649 			if (buffer->io.mod_root == &hmp->lose_root) {
650 				RB_REMOVE(hammer_mod_rb_tree,
651 					  buffer->io.mod_root, &buffer->io);
652 				buffer->io.mod_root = NULL;
653 				KKASSERT(buffer->io.modified == 0);
654 			}
655 			lwkt_reltoken(&hmp->io_token);
656 		}
657 		goto found;
658 	} else if (hmp->ronly && hammer_direct_zone(buf_offset)) {
659 		/*
660 		 * If this is a read-only mount there could be an alias
661 		 * in the raw-zone.  If there is we use that buffer instead.
662 		 *
663 		 * rw mounts will not have aliases.  Also note when going
664 		 * from ro -> rw the recovered raw buffers are flushed and
665 		 * reclaimed, so again there will not be any aliases once
666 		 * the mount is rw.
667 		 */
668 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
669 				   hammer_xlate_to_zone2(buf_offset));
670 		if (buffer) {
671 			if (hammer_debug_general & 0x0001) {
672 				hkrateprintf(&hmp->kdiag,
673 					    "recovered aliased %016jx\n",
674 					    (intmax_t)buf_offset);
675 			}
676 			goto found_aliased;
677 		}
678 	}
679 
680 	/*
681 	 * What is the buffer class?
682 	 */
683 	zone = HAMMER_ZONE_DECODE(buf_offset);
684 
685 	switch(zone) {
686 	case HAMMER_ZONE_LARGE_DATA_INDEX:
687 	case HAMMER_ZONE_SMALL_DATA_INDEX:
688 		iotype = HAMMER_STRUCTURE_DATA_BUFFER;
689 		break;
690 	case HAMMER_ZONE_UNDO_INDEX:
691 		iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
692 		break;
693 	case HAMMER_ZONE_META_INDEX:
694 	default:
695 		/*
696 		 * NOTE: inode data and directory entries are placed in this
697 		 * zone.  inode atime/mtime is updated in-place and thus
698 		 * buffers containing inodes must be synchronized as
699 		 * meta-buffers, same as buffers containing B-Tree info.
700 		 */
701 		iotype = HAMMER_STRUCTURE_META_BUFFER;
702 		break;
703 	}
704 
705 	/*
706 	 * Handle blockmap offset translations
707 	 */
708 	if (hammer_is_zone2_mapped_index(zone)) {
709 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
710 	} else if (zone == HAMMER_ZONE_UNDO_INDEX) {
711 		zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
712 	} else {
713 		/* Must be zone-2 (not 1 or 4 or 15) */
714 		KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
715 		zone2_offset = buf_offset;
716 		*errorp = 0;
717 	}
718 	if (*errorp)
719 		return(NULL);
720 
721 	/*
722 	 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
723 	 * specifications.
724 	 */
725 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
726 		 HAMMER_ZONE_RAW_BUFFER);
727 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
728 	volume = hammer_get_volume(hmp, vol_no, errorp);
729 	if (volume == NULL)
730 		return(NULL);
731 
732 	KKASSERT(zone2_offset < volume->maxbuf_off);
733 
734 	/*
735 	 * Allocate a new buffer structure.  We will check for races later.
736 	 */
737 	++hammer_count_buffers;
738 	buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
739 			 M_WAITOK|M_ZERO|M_USE_RESERVE);
740 	buffer->zone2_offset = zone2_offset;
741 	buffer->zoneX_offset = buf_offset;
742 
743 	hammer_io_init(&buffer->io, volume, iotype);
744 	buffer->io.offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset);
745 	buffer->io.bytes = bytes;
746 	TAILQ_INIT(&buffer->clist);
747 	hammer_ref_interlock_true(&buffer->io.lock);
748 
749 	/*
750 	 * Insert the buffer into the RB tree and handle late collisions.
751 	 */
752 	if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
753 		hammer_rel_volume(volume, 0);
754 		buffer->io.volume = NULL;			/* safety */
755 		if (hammer_rel_interlock(&buffer->io.lock, 1))	/* safety */
756 			hammer_rel_interlock_done(&buffer->io.lock, 1);
757 		--hammer_count_buffers;
758 		kfree(buffer, hmp->m_misc);
759 		goto again;
760 	}
761 	atomic_add_int(&hammer_count_refedbufs, 1);
762 found:
763 
764 	/*
765 	 * The buffer is referenced and interlocked.  Load the buffer
766 	 * if necessary.  hammer_load_buffer() deals with the interlock
767 	 * and, if an error is returned, also deals with the ref.
768 	 */
769 	if (buffer->ondisk == NULL) {
770 		*errorp = hammer_load_buffer(buffer, isnew);
771 		if (*errorp)
772 			buffer = NULL;
773 	} else {
774 		hammer_io_advance(&buffer->io);
775 		hammer_ref_interlock_done(&buffer->io.lock);
776 		*errorp = 0;
777 	}
778 	return(buffer);
779 }
780 
781 /*
782  * This is used by the direct-read code to deal with large-data buffers
783  * created by the reblocker and mirror-write code.  The direct-read code
784  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
785  * running hammer buffers must be fully synced to disk before we can issue
786  * the direct-read.
787  *
788  * This code path is not considered critical as only the rebocker and
789  * mirror-write code will create large-data buffers via the HAMMER buffer
790  * subsystem.  They do that because they operate at the B-Tree level and
791  * do not access the vnode/inode structures.
792  */
793 void
794 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
795 {
796 	hammer_buffer_t buffer;
797 	int error;
798 
799 	KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
800 		 HAMMER_ZONE_LARGE_DATA);
801 
802 	while (bytes > 0) {
803 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
804 				   base_offset);
805 		if (buffer && (buffer->io.modified || buffer->io.running)) {
806 			error = hammer_ref_buffer(buffer);
807 			if (error == 0) {
808 				hammer_io_wait(&buffer->io);
809 				if (buffer->io.modified) {
810 					hammer_io_write_interlock(&buffer->io);
811 					hammer_io_flush(&buffer->io, 0);
812 					hammer_io_done_interlock(&buffer->io);
813 					hammer_io_wait(&buffer->io);
814 				}
815 				hammer_rel_buffer(buffer, 0);
816 			}
817 		}
818 		base_offset += HAMMER_BUFSIZE;
819 		bytes -= HAMMER_BUFSIZE;
820 	}
821 }
822 
823 /*
824  * Destroy all buffers covering the specified zoneX offset range.  This
825  * is called when the related blockmap layer2 entry is freed or when
826  * a direct write bypasses our buffer/buffer-cache subsystem.
827  *
828  * The buffers may be referenced by the caller itself.  Setting reclaim
829  * will cause the buffer to be destroyed when it's ref count reaches zero.
830  *
831  * Return 0 on success, EAGAIN if some buffers could not be destroyed due
832  * to additional references held by other threads, or some other (typically
833  * fatal) error.
834  */
835 int
836 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
837 		   hammer_off_t zone2_offset, int bytes,
838 		   int report_conflicts)
839 {
840 	hammer_buffer_t buffer;
841 	hammer_volume_t volume;
842 	int vol_no;
843 	int error;
844 	int ret_error;
845 
846 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
847 	volume = hammer_get_volume(hmp, vol_no, &ret_error);
848 	KKASSERT(ret_error == 0);
849 
850 	while (bytes > 0) {
851 		buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
852 				   base_offset);
853 		if (buffer) {
854 			error = hammer_ref_buffer(buffer);
855 			if (hammer_debug_general & 0x20000) {
856 				hkprintf("delbufr %016jx rerr=%d 1ref=%d\n",
857 					(intmax_t)buffer->zoneX_offset,
858 					error,
859 					hammer_oneref(&buffer->io.lock));
860 			}
861 			if (error == 0 && !hammer_oneref(&buffer->io.lock)) {
862 				error = EAGAIN;
863 				hammer_rel_buffer(buffer, 0);
864 			}
865 			if (error == 0) {
866 				KKASSERT(buffer->zone2_offset == zone2_offset);
867 				hammer_io_clear_modify(&buffer->io, 1);
868 				buffer->io.reclaim = 1;
869 				buffer->io.waitdep = 1;
870 				KKASSERT(buffer->io.volume == volume);
871 				hammer_rel_buffer(buffer, 0);
872 			}
873 		} else {
874 			error = hammer_io_inval(volume, zone2_offset);
875 		}
876 		if (error) {
877 			ret_error = error;
878 			if (report_conflicts ||
879 			    (hammer_debug_general & 0x8000)) {
880 				krateprintf(&hmp->kdiag,
881 					"hammer_del_buffers: unable to "
882 					"invalidate %016llx buffer=%p "
883 					"rep=%d lkrefs=%08x\n",
884 					(long long)base_offset,
885 					buffer, report_conflicts,
886 					(buffer ? buffer->io.lock.refs : -1));
887 			}
888 		}
889 		base_offset += HAMMER_BUFSIZE;
890 		zone2_offset += HAMMER_BUFSIZE;
891 		bytes -= HAMMER_BUFSIZE;
892 	}
893 	hammer_rel_volume(volume, 0);
894 	return (ret_error);
895 }
896 
897 /*
898  * Given a referenced and interlocked buffer load/validate the data.
899  *
900  * The buffer interlock will be released on return.  If an error is
901  * returned the buffer reference will also be released (and the buffer
902  * pointer will thus be stale).
903  */
904 static int
905 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
906 {
907 	hammer_volume_t volume;
908 	int error;
909 
910 	/*
911 	 * Load the buffer's on-disk info
912 	 */
913 	volume = buffer->io.volume;
914 
915 	if (hammer_debug_io & 0x0004) {
916 		hdkprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
917 			(long long)buffer->zoneX_offset,
918 			(long long)buffer->zone2_offset,
919 			isnew, buffer->ondisk);
920 	}
921 
922 	if (buffer->ondisk == NULL) {
923 		/*
924 		 * Issue the read or generate a new buffer.  When reading
925 		 * the limit argument controls any read-ahead clustering
926 		 * hammer_io_read() is allowed to do.
927 		 *
928 		 * We cannot read-ahead in the large-data zone and we cannot
929 		 * cross a big-block boundary as the next big-block might
930 		 * use a different buffer size.
931 		 */
932 		if (isnew) {
933 			error = hammer_io_new(volume->devvp, &buffer->io);
934 		} else if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) ==
935 			   HAMMER_ZONE_LARGE_DATA) {
936 			error = hammer_io_read(volume->devvp, &buffer->io,
937 					       buffer->io.bytes);
938 		} else {
939 			hammer_off_t limit;
940 
941 			limit = (buffer->zone2_offset +
942 				 HAMMER_BIGBLOCK_MASK64) &
943 				~HAMMER_BIGBLOCK_MASK64;
944 			limit -= buffer->zone2_offset;
945 			error = hammer_io_read(volume->devvp, &buffer->io,
946 					       limit);
947 		}
948 		if (error == 0)
949 			buffer->ondisk = (void *)buffer->io.bp->b_data;
950 	} else if (isnew) {
951 		error = hammer_io_new(volume->devvp, &buffer->io);
952 	} else {
953 		error = 0;
954 	}
955 	if (error == 0) {
956 		hammer_io_advance(&buffer->io);
957 		hammer_ref_interlock_done(&buffer->io.lock);
958 	} else {
959 		hammer_rel_buffer(buffer, 1);
960 	}
961 	return (error);
962 }
963 
964 /*
965  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
966  * This routine is only called during unmount or when a volume is
967  * removed.
968  *
969  * If data != NULL, it specifies a volume whoose buffers should
970  * be unloaded.
971  */
972 int
973 hammer_unload_buffer(hammer_buffer_t buffer, void *data)
974 {
975 	struct hammer_volume *volume = (struct hammer_volume *) data;
976 
977 	/*
978 	 * If volume != NULL we are only interested in unloading buffers
979 	 * associated with a particular volume.
980 	 */
981 	if (volume != NULL && volume != buffer->io.volume)
982 		return 0;
983 
984 	/*
985 	 * Clean up the persistent ref ioerror might have on the buffer
986 	 * and acquire a ref.  Expect a 0->1 transition.
987 	 */
988 	if (buffer->io.ioerror) {
989 		hammer_io_clear_error_noassert(&buffer->io);
990 		atomic_add_int(&hammer_count_refedbufs, -1);
991 	}
992 	hammer_ref_interlock_true(&buffer->io.lock);
993 	atomic_add_int(&hammer_count_refedbufs, 1);
994 
995 	/*
996 	 * We must not flush a dirty buffer to disk on umount.  It should
997 	 * have already been dealt with by the flusher, or we may be in
998 	 * catastrophic failure.
999 	 *
1000 	 * We must set waitdep to ensure that a running buffer is waited
1001 	 * on and released prior to us trying to unload the volume.
1002 	 */
1003 	hammer_io_clear_modify(&buffer->io, 1);
1004 	hammer_flush_buffer_nodes(buffer);
1005 	buffer->io.waitdep = 1;
1006 	hammer_rel_buffer(buffer, 1);
1007 	return(0);
1008 }
1009 
1010 /*
1011  * Reference a buffer that is either already referenced or via a specially
1012  * handled pointer (aka cursor->buffer).
1013  */
1014 int
1015 hammer_ref_buffer(hammer_buffer_t buffer)
1016 {
1017 	hammer_mount_t hmp;
1018 	int error;
1019 	int locked;
1020 
1021 	/*
1022 	 * Acquire a ref, plus the buffer will be interlocked on the
1023 	 * 0->1 transition.
1024 	 */
1025 	locked = hammer_ref_interlock(&buffer->io.lock);
1026 	hmp = buffer->io.hmp;
1027 
1028 	/*
1029 	 * At this point a biodone() will not touch the buffer other then
1030 	 * incidental bits.  However, lose_root can be modified via
1031 	 * a biodone() interrupt.
1032 	 *
1033 	 * No longer loose.  lose_root requires the io_token.
1034 	 */
1035 	if (buffer->io.mod_root == &hmp->lose_root) {
1036 		lwkt_gettoken(&hmp->io_token);
1037 		if (buffer->io.mod_root == &hmp->lose_root) {
1038 			RB_REMOVE(hammer_mod_rb_tree,
1039 				  buffer->io.mod_root, &buffer->io);
1040 			buffer->io.mod_root = NULL;
1041 		}
1042 		lwkt_reltoken(&hmp->io_token);
1043 	}
1044 
1045 	if (locked) {
1046 		atomic_add_int(&hammer_count_refedbufs, 1);
1047 		error = hammer_load_buffer(buffer, 0);
1048 		/* NOTE: on error the buffer pointer is stale */
1049 	} else {
1050 		error = 0;
1051 	}
1052 	return(error);
1053 }
1054 
1055 /*
1056  * Release a reference on the buffer.  On the 1->0 transition the
1057  * underlying IO will be released but the data reference is left
1058  * cached.
1059  *
1060  * Only destroy the structure itself if the related buffer cache buffer
1061  * was disassociated from it.  This ties the management of the structure
1062  * to the buffer cache subsystem.  buffer->ondisk determines whether the
1063  * embedded io is referenced or not.
1064  */
1065 void
1066 hammer_rel_buffer(hammer_buffer_t buffer, int locked)
1067 {
1068 	hammer_volume_t volume;
1069 	hammer_mount_t hmp;
1070 	struct buf *bp = NULL;
1071 	int freeme = 0;
1072 
1073 	hmp = buffer->io.hmp;
1074 
1075 	if (hammer_rel_interlock(&buffer->io.lock, locked) == 0)
1076 		return;
1077 
1078 	/*
1079 	 * hammer_count_refedbufs accounting.  Decrement if we are in
1080 	 * the error path or if CHECK is clear.
1081 	 *
1082 	 * If we are not in the error path and CHECK is set the caller
1083 	 * probably just did a hammer_ref() and didn't account for it,
1084 	 * so we don't account for the loss here.
1085 	 */
1086 	if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0)
1087 		atomic_add_int(&hammer_count_refedbufs, -1);
1088 
1089 	/*
1090 	 * If the caller locked us or the normal released transitions
1091 	 * from 1->0 (and acquired the lock) attempt to release the
1092 	 * io.  If the called locked us we tell hammer_io_release()
1093 	 * to flush (which would be the unload or failure path).
1094 	 */
1095 	bp = hammer_io_release(&buffer->io, locked);
1096 
1097 	/*
1098 	 * If the buffer has no bp association and no refs we can destroy
1099 	 * it.
1100 	 *
1101 	 * NOTE: It is impossible for any associated B-Tree nodes to have
1102 	 * refs if the buffer has no additional refs.
1103 	 */
1104 	if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) {
1105 		RB_REMOVE(hammer_buf_rb_tree,
1106 			  &buffer->io.hmp->rb_bufs_root,
1107 			  buffer);
1108 		volume = buffer->io.volume;
1109 		buffer->io.volume = NULL; /* sanity */
1110 		hammer_rel_volume(volume, 0);
1111 		hammer_io_clear_modlist(&buffer->io);
1112 		hammer_flush_buffer_nodes(buffer);
1113 		KKASSERT(TAILQ_EMPTY(&buffer->clist));
1114 		freeme = 1;
1115 	}
1116 
1117 	/*
1118 	 * Cleanup
1119 	 */
1120 	hammer_rel_interlock_done(&buffer->io.lock, locked);
1121 	if (bp)
1122 		brelse(bp);
1123 	if (freeme) {
1124 		--hammer_count_buffers;
1125 		kfree(buffer, hmp->m_misc);
1126 	}
1127 }
1128 
1129 /*
1130  * Access the filesystem buffer containing the specified hammer offset.
1131  * buf_offset is a conglomeration of the volume number and vol_buf_beg
1132  * relative buffer offset.  It must also have bit 55 set to be valid.
1133  * (see hammer_off_t in hammer_disk.h).
1134  *
1135  * Any prior buffer in *bufferp will be released and replaced by the
1136  * requested buffer.
1137  *
1138  * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1139  * passed cached *bufferp to match against either zoneX or zone2.
1140  */
1141 static __inline
1142 void *
1143 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1144 	     int isnew, int *errorp, struct hammer_buffer **bufferp)
1145 {
1146 	hammer_buffer_t buffer;
1147 	int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1148 
1149 	buf_offset &= ~HAMMER_BUFMASK64;
1150 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
1151 
1152 	buffer = *bufferp;
1153 	if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1154 			       buffer->zoneX_offset != buf_offset)) {
1155 		if (buffer)
1156 			hammer_rel_buffer(buffer, 0);
1157 		buffer = hammer_get_buffer(hmp, buf_offset, bytes, isnew, errorp);
1158 		*bufferp = buffer;
1159 	} else {
1160 		*errorp = 0;
1161 	}
1162 
1163 	/*
1164 	 * Return a pointer to the buffer data.
1165 	 */
1166 	if (buffer == NULL)
1167 		return(NULL);
1168 	else
1169 		return((char *)buffer->ondisk + xoff);
1170 }
1171 
1172 void *
1173 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
1174 	     int *errorp, struct hammer_buffer **bufferp)
1175 {
1176 	return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 0, errorp, bufferp));
1177 }
1178 
1179 void *
1180 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1181 	         int *errorp, struct hammer_buffer **bufferp)
1182 {
1183 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1184 	return(_hammer_bread(hmp, buf_offset, bytes, 0, errorp, bufferp));
1185 }
1186 
1187 /*
1188  * Access the filesystem buffer containing the specified hammer offset.
1189  * No disk read operation occurs.  The result buffer may contain garbage.
1190  *
1191  * Any prior buffer in *bufferp will be released and replaced by the
1192  * requested buffer.
1193  *
1194  * This function marks the buffer dirty but does not increment its
1195  * modify_refs count.
1196  */
1197 void *
1198 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1199 	     int *errorp, struct hammer_buffer **bufferp)
1200 {
1201 	return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 1, errorp, bufferp));
1202 }
1203 
1204 void *
1205 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1206 		int *errorp, struct hammer_buffer **bufferp)
1207 {
1208 	bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1209 	return(_hammer_bread(hmp, buf_offset, bytes, 1, errorp, bufferp));
1210 }
1211 
1212 /************************************************************************
1213  *				NODES					*
1214  ************************************************************************
1215  *
1216  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1217  * method used by the HAMMER filesystem.
1218  *
1219  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1220  * associated with its buffer, and will only referenced the buffer while
1221  * the node itself is referenced.
1222  *
1223  * A hammer_node can also be passively associated with other HAMMER
1224  * structures, such as inodes, while retaining 0 references.  These
1225  * associations can be cleared backwards using a pointer-to-pointer in
1226  * the hammer_node.
1227  *
1228  * This allows the HAMMER implementation to cache hammer_nodes long-term
1229  * and short-cut a great deal of the infrastructure's complexity.  In
1230  * most cases a cached node can be reacquired without having to dip into
1231  * the B-Tree.
1232  */
1233 hammer_node_t
1234 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1235 		int isnew, int *errorp)
1236 {
1237 	hammer_mount_t hmp = trans->hmp;
1238 	hammer_node_t node;
1239 	int doload;
1240 
1241 	KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1242 
1243 	/*
1244 	 * Locate the structure, allocating one if necessary.
1245 	 */
1246 again:
1247 	node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1248 	if (node == NULL) {
1249 		++hammer_count_nodes;
1250 		node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1251 		node->node_offset = node_offset;
1252 		node->hmp = hmp;
1253 		TAILQ_INIT(&node->cursor_list);
1254 		TAILQ_INIT(&node->cache_list);
1255 		if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1256 			--hammer_count_nodes;
1257 			kfree(node, hmp->m_misc);
1258 			goto again;
1259 		}
1260 		doload = hammer_ref_interlock_true(&node->lock);
1261 	} else {
1262 		doload = hammer_ref_interlock(&node->lock);
1263 	}
1264 	if (doload) {
1265 		*errorp = hammer_load_node(trans, node, isnew);
1266 		trans->flags |= HAMMER_TRANSF_DIDIO;
1267 		if (*errorp)
1268 			node = NULL;
1269 	} else {
1270 		KKASSERT(node->ondisk);
1271 		*errorp = 0;
1272 		hammer_io_advance(&node->buffer->io);
1273 	}
1274 	return(node);
1275 }
1276 
1277 /*
1278  * Reference an already-referenced node.  0->1 transitions should assert
1279  * so we do not have to deal with hammer_ref() setting CHECK.
1280  */
1281 void
1282 hammer_ref_node(hammer_node_t node)
1283 {
1284 	KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL);
1285 	hammer_ref(&node->lock);
1286 }
1287 
1288 /*
1289  * Load a node's on-disk data reference.  Called with the node referenced
1290  * and interlocked.
1291  *
1292  * On return the node interlock will be unlocked.  If a non-zero error code
1293  * is returned the node will also be dereferenced (and the caller's pointer
1294  * will be stale).
1295  */
1296 static int
1297 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1298 {
1299 	hammer_buffer_t buffer;
1300 	hammer_off_t buf_offset;
1301 	int error;
1302 
1303 	error = 0;
1304 	if (node->ondisk == NULL) {
1305 		/*
1306 		 * This is a little confusing but the jist is that
1307 		 * node->buffer determines whether the node is on
1308 		 * the buffer's clist and node->ondisk determines
1309 		 * whether the buffer is referenced.
1310 		 *
1311 		 * We could be racing a buffer release, in which case
1312 		 * node->buffer may become NULL while we are blocked
1313 		 * referencing the buffer.
1314 		 */
1315 		if ((buffer = node->buffer) != NULL) {
1316 			error = hammer_ref_buffer(buffer);
1317 			if (error == 0 && node->buffer == NULL) {
1318 				TAILQ_INSERT_TAIL(&buffer->clist, node, entry);
1319 				node->buffer = buffer;
1320 			}
1321 		} else {
1322 			buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1323 			buffer = hammer_get_buffer(node->hmp, buf_offset,
1324 						   HAMMER_BUFSIZE, 0, &error);
1325 			if (buffer) {
1326 				KKASSERT(error == 0);
1327 				TAILQ_INSERT_TAIL(&buffer->clist, node, entry);
1328 				node->buffer = buffer;
1329 			}
1330 		}
1331 		if (error)
1332 			goto failed;
1333 		node->ondisk = (void *)((char *)buffer->ondisk +
1334 				        (node->node_offset & HAMMER_BUFMASK));
1335 
1336 		/*
1337 		 * Check CRC.  NOTE: Neither flag is set and the CRC is not
1338 		 * generated on new B-Tree nodes.
1339 		 */
1340 		if (isnew == 0 &&
1341 		    (node->flags & HAMMER_NODE_CRCANY) == 0) {
1342 			if (hammer_crc_test_btree(node->ondisk) == 0) {
1343 				hdkprintf("CRC B-TREE NODE @ %016llx/%lu FAILED\n",
1344 					(long long)node->node_offset,
1345 					sizeof(*node->ondisk));
1346 				if (hammer_debug_critical)
1347 					Debugger("CRC FAILED: B-TREE NODE");
1348 				node->flags |= HAMMER_NODE_CRCBAD;
1349 			} else {
1350 				node->flags |= HAMMER_NODE_CRCGOOD;
1351 			}
1352 		}
1353 	}
1354 	if (node->flags & HAMMER_NODE_CRCBAD) {
1355 		if (trans->flags & HAMMER_TRANSF_CRCDOM)
1356 			error = EDOM;
1357 		else
1358 			error = EIO;
1359 	}
1360 failed:
1361 	if (error) {
1362 		_hammer_rel_node(node, 1);
1363 	} else {
1364 		hammer_ref_interlock_done(&node->lock);
1365 	}
1366 	return (error);
1367 }
1368 
1369 /*
1370  * Safely reference a node, interlock against flushes via the IO subsystem.
1371  */
1372 hammer_node_t
1373 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1374 		     int *errorp)
1375 {
1376 	hammer_node_t node;
1377 	int doload;
1378 
1379 	node = cache->node;
1380 	if (node != NULL) {
1381 		doload = hammer_ref_interlock(&node->lock);
1382 		if (doload) {
1383 			*errorp = hammer_load_node(trans, node, 0);
1384 			if (*errorp)
1385 				node = NULL;
1386 		} else {
1387 			KKASSERT(node->ondisk);
1388 			if (node->flags & HAMMER_NODE_CRCBAD) {
1389 				if (trans->flags & HAMMER_TRANSF_CRCDOM)
1390 					*errorp = EDOM;
1391 				else
1392 					*errorp = EIO;
1393 				_hammer_rel_node(node, 0);
1394 				node = NULL;
1395 			} else {
1396 				*errorp = 0;
1397 			}
1398 		}
1399 	} else {
1400 		*errorp = ENOENT;
1401 	}
1402 	return(node);
1403 }
1404 
1405 /*
1406  * Release a hammer_node.  On the last release the node dereferences
1407  * its underlying buffer and may or may not be destroyed.
1408  *
1409  * If locked is non-zero the passed node has been interlocked by the
1410  * caller and we are in the failure/unload path, otherwise it has not and
1411  * we are doing a normal release.
1412  *
1413  * This function will dispose of the interlock and the reference.
1414  * On return the node pointer is stale.
1415  */
1416 void
1417 _hammer_rel_node(hammer_node_t node, int locked)
1418 {
1419 	hammer_buffer_t buffer;
1420 
1421 	/*
1422 	 * Deref the node.  If this isn't the 1->0 transition we're basically
1423 	 * done.  If locked is non-zero this function will just deref the
1424 	 * locked node and return 1, otherwise it will deref the locked
1425 	 * node and either lock and return 1 on the 1->0 transition or
1426 	 * not lock and return 0.
1427 	 */
1428 	if (hammer_rel_interlock(&node->lock, locked) == 0)
1429 		return;
1430 
1431 	/*
1432 	 * Either locked was non-zero and we are interlocked, or the
1433 	 * hammer_rel_interlock() call returned non-zero and we are
1434 	 * interlocked.
1435 	 *
1436 	 * The ref-count must still be decremented if locked != 0 so
1437 	 * the cleanup required still varies a bit.
1438 	 *
1439 	 * hammer_flush_node() when called with 1 or 2 will dispose of
1440 	 * the lock and possible ref-count.
1441 	 */
1442 	if (node->ondisk == NULL) {
1443 		hammer_flush_node(node, locked + 1);
1444 		/* node is stale now */
1445 		return;
1446 	}
1447 
1448 	/*
1449 	 * Do not disassociate the node from the buffer if it represents
1450 	 * a modified B-Tree node that still needs its crc to be generated.
1451 	 */
1452 	if (node->flags & HAMMER_NODE_NEEDSCRC) {
1453 		hammer_rel_interlock_done(&node->lock, locked);
1454 		return;
1455 	}
1456 
1457 	/*
1458 	 * Do final cleanups and then either destroy the node and leave it
1459 	 * passively cached.  The buffer reference is removed regardless.
1460 	 */
1461 	buffer = node->buffer;
1462 	node->ondisk = NULL;
1463 
1464 	if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1465 		/*
1466 		 * Normal release.
1467 		 */
1468 		hammer_rel_interlock_done(&node->lock, locked);
1469 	} else {
1470 		/*
1471 		 * Destroy the node.
1472 		 */
1473 		hammer_flush_node(node, locked + 1);
1474 		/* node is stale */
1475 
1476 	}
1477 	hammer_rel_buffer(buffer, 0);
1478 }
1479 
1480 void
1481 hammer_rel_node(hammer_node_t node)
1482 {
1483 	_hammer_rel_node(node, 0);
1484 }
1485 
1486 /*
1487  * Free space on-media associated with a B-Tree node.
1488  */
1489 void
1490 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1491 {
1492 	KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1493 	node->flags |= HAMMER_NODE_DELETED;
1494 	hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1495 }
1496 
1497 /*
1498  * Passively cache a referenced hammer_node.  The caller may release
1499  * the node on return.
1500  */
1501 void
1502 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1503 {
1504 	/*
1505 	 * If the node doesn't exist, or is being deleted, don't cache it!
1506 	 *
1507 	 * The node can only ever be NULL in the I/O failure path.
1508 	 */
1509 	if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1510 		return;
1511 	if (cache->node == node)
1512 		return;
1513 	while (cache->node)
1514 		hammer_uncache_node(cache);
1515 	if (node->flags & HAMMER_NODE_DELETED)
1516 		return;
1517 	cache->node = node;
1518 	TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1519 }
1520 
1521 void
1522 hammer_uncache_node(hammer_node_cache_t cache)
1523 {
1524 	hammer_node_t node;
1525 
1526 	if ((node = cache->node) != NULL) {
1527 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1528 		cache->node = NULL;
1529 		if (TAILQ_EMPTY(&node->cache_list))
1530 			hammer_flush_node(node, 0);
1531 	}
1532 }
1533 
1534 /*
1535  * Remove a node's cache references and destroy the node if it has no
1536  * other references or backing store.
1537  *
1538  * locked == 0	Normal unlocked operation
1539  * locked == 1	Call hammer_rel_interlock_done(..., 0);
1540  * locked == 2	Call hammer_rel_interlock_done(..., 1);
1541  *
1542  * XXX for now this isn't even close to being MPSAFE so the refs check
1543  *     is sufficient.
1544  */
1545 void
1546 hammer_flush_node(hammer_node_t node, int locked)
1547 {
1548 	hammer_node_cache_t cache;
1549 	hammer_buffer_t buffer;
1550 	hammer_mount_t hmp = node->hmp;
1551 	int dofree;
1552 
1553 	while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1554 		TAILQ_REMOVE(&node->cache_list, cache, entry);
1555 		cache->node = NULL;
1556 	}
1557 
1558 	/*
1559 	 * NOTE: refs is predisposed if another thread is blocking and
1560 	 *	 will be larger than 0 in that case.  We aren't MPSAFE
1561 	 *	 here.
1562 	 */
1563 	if (node->ondisk == NULL && hammer_norefs(&node->lock)) {
1564 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1565 		RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1566 		if ((buffer = node->buffer) != NULL) {
1567 			node->buffer = NULL;
1568 			TAILQ_REMOVE(&buffer->clist, node, entry);
1569 			/* buffer is unreferenced because ondisk is NULL */
1570 		}
1571 		dofree = 1;
1572 	} else {
1573 		dofree = 0;
1574 	}
1575 
1576 	/*
1577 	 * Deal with the interlock if locked == 1 or locked == 2.
1578 	 */
1579 	if (locked)
1580 		hammer_rel_interlock_done(&node->lock, locked - 1);
1581 
1582 	/*
1583 	 * Destroy if requested
1584 	 */
1585 	if (dofree) {
1586 		--hammer_count_nodes;
1587 		kfree(node, hmp->m_misc);
1588 	}
1589 }
1590 
1591 /*
1592  * Flush passively cached B-Tree nodes associated with this buffer.
1593  * This is only called when the buffer is about to be destroyed, so
1594  * none of the nodes should have any references.  The buffer is locked.
1595  *
1596  * We may be interlocked with the buffer.
1597  */
1598 void
1599 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1600 {
1601 	hammer_node_t node;
1602 
1603 	while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1604 		KKASSERT(node->ondisk == NULL);
1605 		KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1606 
1607 		if (hammer_try_interlock_norefs(&node->lock)) {
1608 			hammer_ref(&node->lock);
1609 			node->flags |= HAMMER_NODE_FLUSH;
1610 			_hammer_rel_node(node, 1);
1611 		} else {
1612 			KKASSERT(node->buffer != NULL);
1613 			buffer = node->buffer;
1614 			node->buffer = NULL;
1615 			TAILQ_REMOVE(&buffer->clist, node, entry);
1616 			/* buffer is unreferenced because ondisk is NULL */
1617 		}
1618 	}
1619 }
1620 
1621 
1622 /************************************************************************
1623  *				ALLOCATORS				*
1624  ************************************************************************/
1625 
1626 /*
1627  * Allocate a B-Tree node.
1628  */
1629 hammer_node_t
1630 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1631 {
1632 	hammer_buffer_t buffer = NULL;
1633 	hammer_node_t node = NULL;
1634 	hammer_off_t node_offset;
1635 
1636 	node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1637 					    sizeof(struct hammer_node_ondisk),
1638 					    hint, errorp);
1639 	if (*errorp == 0) {
1640 		node = hammer_get_node(trans, node_offset, 1, errorp);
1641 		hammer_modify_node_noundo(trans, node);
1642 		bzero(node->ondisk, sizeof(*node->ondisk));
1643 		hammer_modify_node_done(node);
1644 	}
1645 	if (buffer)
1646 		hammer_rel_buffer(buffer, 0);
1647 	return(node);
1648 }
1649 
1650 /*
1651  * Allocate data.  If the address of a data buffer is supplied then
1652  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1653  * will be set to the related buffer.  The caller must release it when
1654  * finally done.  The initial *data_bufferp should be set to NULL by
1655  * the caller.
1656  *
1657  * The caller is responsible for making hammer_modify*() calls on the
1658  * *data_bufferp.
1659  */
1660 void *
1661 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1662 		  uint16_t rec_type, hammer_off_t *data_offsetp,
1663 		  struct hammer_buffer **data_bufferp,
1664 		  hammer_off_t hint, int *errorp)
1665 {
1666 	void *data;
1667 	int zone;
1668 
1669 	/*
1670 	 * Allocate data directly from blockmap.
1671 	 */
1672 	if (data_len) {
1673 		switch(rec_type) {
1674 		case HAMMER_RECTYPE_INODE:
1675 		case HAMMER_RECTYPE_DIRENTRY:
1676 		case HAMMER_RECTYPE_EXT:
1677 		case HAMMER_RECTYPE_FIX:
1678 		case HAMMER_RECTYPE_PFS:
1679 		case HAMMER_RECTYPE_SNAPSHOT:
1680 		case HAMMER_RECTYPE_CONFIG:
1681 			zone = HAMMER_ZONE_META_INDEX;
1682 			break;
1683 		case HAMMER_RECTYPE_DATA:
1684 		case HAMMER_RECTYPE_DB:
1685 			/*
1686 			 * Only mirror-write comes here.
1687 			 * Regular allocation path uses blockmap reservation.
1688 			 */
1689 			zone = hammer_data_zone_index(data_len);
1690 			if (zone == HAMMER_ZONE_LARGE_DATA_INDEX) {
1691 				/* round up */
1692 				data_len = (data_len + HAMMER_BUFMASK) &
1693 					   ~HAMMER_BUFMASK;
1694 			}
1695 			break;
1696 		default:
1697 			hpanic("rec_type %04x unknown", rec_type);
1698 			zone = HAMMER_ZONE_UNAVAIL_INDEX; /* NOT REACHED */
1699 			break;
1700 		}
1701 		*data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1702 						      hint, errorp);
1703 	} else {
1704 		*data_offsetp = 0;
1705 	}
1706 
1707 	data = NULL;
1708 	if (*errorp == 0 && data_bufferp && data_len)
1709 		data = hammer_bread_ext(trans->hmp, *data_offsetp, data_len,
1710 					errorp, data_bufferp);
1711 	return(data);
1712 }
1713 
1714 /*
1715  * Sync dirty buffers to the media and clean-up any loose ends.
1716  *
1717  * These functions do not start the flusher going, they simply
1718  * queue everything up to the flusher.
1719  */
1720 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1721 
1722 struct hammer_sync_info {
1723 	int error;
1724 };
1725 
1726 int
1727 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1728 {
1729 	struct hammer_sync_info info;
1730 
1731 	info.error = 0;
1732 	if (waitfor == MNT_WAIT) {
1733 		vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS,
1734 			  hammer_sync_scan2, &info);
1735 	} else {
1736 		vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS | VMSC_NOWAIT,
1737 			  hammer_sync_scan2, &info);
1738 	}
1739 	return(info.error);
1740 }
1741 
1742 /*
1743  * Filesystem sync.  If doing a synchronous sync make a second pass on
1744  * the vnodes in case any were already flushing during the first pass,
1745  * and activate the flusher twice (the second time brings the UNDO FIFO's
1746  * start position up to the end position after the first call).
1747  *
1748  * If doing a lazy sync make just one pass on the vnode list, ignoring
1749  * any new vnodes added to the list while the sync is in progress.
1750  */
1751 int
1752 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1753 {
1754 	struct hammer_sync_info info;
1755 	int flags;
1756 
1757 	flags = VMSC_GETVP;
1758 	if (waitfor & MNT_LAZY)
1759 		flags |= VMSC_ONEPASS;
1760 
1761 	info.error = 0;
1762 	vsyncscan(hmp->mp, flags | VMSC_NOWAIT, hammer_sync_scan2, &info);
1763 
1764 	if (info.error == 0 && (waitfor & MNT_WAIT)) {
1765 		vsyncscan(hmp->mp, flags, hammer_sync_scan2, &info);
1766 	}
1767         if (waitfor == MNT_WAIT) {
1768                 hammer_flusher_sync(hmp);
1769                 hammer_flusher_sync(hmp);
1770 	} else {
1771                 hammer_flusher_async(hmp, NULL);
1772                 hammer_flusher_async(hmp, NULL);
1773 	}
1774 	return(info.error);
1775 }
1776 
1777 static int
1778 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1779 {
1780 	struct hammer_sync_info *info = data;
1781 	struct hammer_inode *ip;
1782 	int error;
1783 
1784 	ip = VTOI(vp);
1785 	if (ip == NULL)
1786 		return(0);
1787 	if (vp->v_type == VNON || vp->v_type == VBAD) {
1788 		vclrisdirty(vp);
1789 		return(0);
1790 	}
1791 	if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1792 	    RB_EMPTY(&vp->v_rbdirty_tree)) {
1793 		vclrisdirty(vp);
1794 		return(0);
1795 	}
1796 	error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1797 	if (error)
1798 		info->error = error;
1799 	return(0);
1800 }
1801