xref: /dragonfly/sbin/hammer/cmd_blockmap.c (revision 279dd846)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
35  */
36 
37 #include "hammer.h"
38 
39 /*
40  * Each collect covers 1<<(19+23) bytes address space of layer 1.
41  * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1).
42  */
43 typedef struct collect {
44 	RB_ENTRY(collect) entry;
45 	hammer_off_t	phys_offset;  /* layer2 address pointed by layer1 */
46 	hammer_off_t	*offsets;  /* big-block offset for layer2[i] */
47 	struct hammer_blockmap_layer2 *track2;  /* track of layer2 entries */
48 	struct hammer_blockmap_layer2 *layer2;  /* 1<<19 x 16 bytes entries */
49 	int error;  /* # of inconsistencies */
50 } *collect_t;
51 
52 static int
53 collect_compare(struct collect *c1, struct collect *c2)
54 {
55 	if (c1->phys_offset < c2->phys_offset)
56 		return(-1);
57 	if (c1->phys_offset > c2->phys_offset)
58 		return(1);
59 	return(0);
60 }
61 
62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree);
63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t);
64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t,
65 	phys_offset);
66 
67 static void dump_blockmap(const char *label, int zone);
68 static void check_freemap(hammer_blockmap_t freemap);
69 static void check_btree_node(hammer_off_t node_offset, int depth);
70 static void check_undo(hammer_blockmap_t undomap);
71 static __inline void collect_btree_root(hammer_off_t node_offset);
72 static __inline void collect_btree_internal(hammer_btree_elm_t elm);
73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm);
74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap);
75 static __inline void collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1);
76 static __inline void collect_undo(hammer_off_t scan_offset,
77 	hammer_fifo_head_t head);
78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone);
79 static struct hammer_blockmap_layer2 *collect_get_track(
80 	collect_t collect, hammer_off_t offset, int zone,
81 	struct hammer_blockmap_layer2 *layer2);
82 static collect_t collect_get(hammer_off_t phys_offset);
83 static void dump_collect_table(void);
84 static void dump_collect(collect_t collect, struct zone_stat *stats);
85 
86 void
87 hammer_cmd_blockmap(void)
88 {
89 	dump_blockmap("freemap", HAMMER_ZONE_FREEMAP_INDEX);
90 }
91 
92 static
93 void
94 dump_blockmap(const char *label, int zone)
95 {
96 	struct volume_info *root_volume;
97 	hammer_blockmap_t rootmap;
98 	hammer_blockmap_t blockmap;
99 	struct hammer_blockmap_layer1 *layer1;
100 	struct hammer_blockmap_layer2 *layer2;
101 	struct buffer_info *buffer1 = NULL;
102 	struct buffer_info *buffer2 = NULL;
103 	hammer_off_t layer1_offset;
104 	hammer_off_t layer2_offset;
105 	hammer_off_t scan1;
106 	hammer_off_t scan2;
107 	struct zone_stat *stats = NULL;
108 	int xerr;
109 	int i;
110 
111 	assert(RootVolNo >= 0);
112 	root_volume = get_volume(RootVolNo);
113 	rootmap = &root_volume->ondisk->vol0_blockmap[zone];
114 	assert(rootmap->phys_offset != 0);
115 
116 	printf("                   "
117 	       "phys             first            next             alloc\n");
118 	for (i = 0; i < HAMMER_MAX_ZONES; i++) {
119 		blockmap = &root_volume->ondisk->vol0_blockmap[i];
120 		if (VerboseOpt || i == zone) {
121 			printf("zone %-2d %-10s %016jx %016jx %016jx %016jx\n",
122 				i, (i == zone ? label : ""),
123 				(uintmax_t)blockmap->phys_offset,
124 				(uintmax_t)blockmap->first_offset,
125 				(uintmax_t)blockmap->next_offset,
126 				(uintmax_t)blockmap->alloc_offset);
127 		}
128 	}
129 
130 	if (VerboseOpt)
131 		stats = hammer_init_zone_stat();
132 
133 	for (scan1 = HAMMER_ZONE_ENCODE(zone, 0);
134 	     scan1 < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
135 	     scan1 += HAMMER_BLOCKMAP_LAYER2) {
136 		/*
137 		 * Dive layer 1.
138 		 */
139 		layer1_offset = rootmap->phys_offset +
140 				HAMMER_BLOCKMAP_LAYER1_OFFSET(scan1);
141 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
142 		xerr = ' ';
143 		if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
144 			xerr = 'B';
145 		if (xerr == ' ' &&
146 		    layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
147 			continue;
148 		}
149 		printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
150 			xerr,
151 			(uintmax_t)scan1,
152 			(uintmax_t)layer1->phys_offset,
153 			(intmax_t)layer1->blocks_free);
154 		if (layer1->phys_offset == HAMMER_BLOCKMAP_FREE)
155 			continue;
156 		for (scan2 = scan1;
157 		     scan2 < scan1 + HAMMER_BLOCKMAP_LAYER2;
158 		     scan2 += HAMMER_BIGBLOCK_SIZE) {
159 			/*
160 			 * Dive layer 2, each entry represents a big-block.
161 			 */
162 			layer2_offset = layer1->phys_offset +
163 					HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2);
164 			layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
165 			xerr = ' ';
166 			if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
167 				xerr = 'B';
168 			printf("%c       %016jx zone=%-2d ",
169 				xerr,
170 				(uintmax_t)scan2,
171 				layer2->zone);
172 			if (VerboseOpt > 1)
173 				printf("vol=%-3d L1=%-7lu L2=%-7lu ",
174 					HAMMER_VOL_DECODE(scan2),
175 					HAMMER_BLOCKMAP_LAYER1_OFFSET(scan2),
176 					HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2));
177 			else if (VerboseOpt > 0)
178 				printf("vol=%-3d L1=%-6lu L2=%-6lu ",
179 					HAMMER_VOL_DECODE(scan2),
180 					HAMMER_BLOCKMAP_LAYER1_INDEX(scan2),
181 					HAMMER_BLOCKMAP_LAYER2_INDEX(scan2));
182 			printf("app=%-7d free=%-7d",
183 				layer2->append_off,
184 				layer2->bytes_free);
185 			if (VerboseOpt)
186 				printf(" crc=%04x-%04x\n",
187 					layer1->layer1_crc,
188 					layer2->entry_crc);
189 			else
190 				printf("\n");
191 
192 			if (VerboseOpt)
193 				hammer_add_zone_stat_layer2(stats, layer2);
194 		}
195 	}
196 	rel_buffer(buffer1);
197 	rel_buffer(buffer2);
198 	rel_volume(root_volume);
199 
200 	if (VerboseOpt) {
201 		hammer_print_zone_stat(stats);
202 		hammer_cleanup_zone_stat(stats);
203 	}
204 }
205 
206 void
207 hammer_cmd_checkmap(void)
208 {
209 	struct volume_info *volume;
210 	hammer_blockmap_t freemap;
211 	hammer_blockmap_t undomap;
212 	hammer_off_t node_offset;
213 
214 	volume = get_volume(RootVolNo);
215 	node_offset = volume->ondisk->vol0_btree_root;
216 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
217 	undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
218 
219 	if (QuietOpt < 3) {
220 		printf("Volume header\trecords=%jd next_tid=%016jx\n",
221 		       (intmax_t)volume->ondisk->vol0_stat_records,
222 		       (uintmax_t)volume->ondisk->vol0_next_tid);
223 		printf("\t\tbufoffset=%016jx\n",
224 		       (uintmax_t)volume->ondisk->vol_buf_beg);
225 		printf("\t\tundosize=%jdMB\n",
226 		       (intmax_t)((undomap->alloc_offset & HAMMER_OFF_LONG_MASK)
227 			/ (1024 * 1024)));
228 	}
229 	rel_volume(volume);
230 
231 	assert(HAMMER_ZONE_UNDO_INDEX < HAMMER_ZONE2_MAPPED_INDEX);
232 	assert(HAMMER_ZONE2_MAPPED_INDEX < HAMMER_MAX_ZONES);
233 	AssertOnFailure = (DebugOpt != 0);
234 
235 	printf("Collecting allocation info from freemap: ");
236 	fflush(stdout);
237 	check_freemap(freemap);
238 	printf("done\n");
239 
240 	printf("Collecting allocation info from B-Tree: ");
241 	fflush(stdout);
242 	check_btree_node(node_offset, 0);
243 	printf("done\n");
244 
245 	printf("Collecting allocation info from UNDO: ");
246 	fflush(stdout);
247 	check_undo(undomap);
248 	printf("done\n");
249 
250 	dump_collect_table();
251 	AssertOnFailure = 1;
252 }
253 
254 static void
255 check_freemap(hammer_blockmap_t freemap)
256 {
257 	hammer_off_t offset;
258 	struct buffer_info *buffer1 = NULL;
259 	struct hammer_blockmap_layer1 *layer1;
260 	int i;
261 
262 	collect_freemap_layer1(freemap);
263 
264 	for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
265 		offset = freemap->phys_offset + i * sizeof(*layer1);
266 		layer1 = get_buffer_data(offset, &buffer1, 0);
267 		if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL)
268 			collect_freemap_layer2(layer1);
269 	}
270 	rel_buffer(buffer1);
271 }
272 
273 static void
274 check_btree_node(hammer_off_t node_offset, int depth)
275 {
276 	struct buffer_info *buffer = NULL;
277 	hammer_node_ondisk_t node;
278 	hammer_btree_elm_t elm;
279 	int i;
280 	char badc;
281 
282 	if (depth == 0)
283 		collect_btree_root(node_offset);
284 	node = get_node(node_offset, &buffer);
285 
286 	if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) == node->crc)
287 		badc = ' ';
288 	else
289 		badc = 'B';
290 
291 	if (badc != ' ') {
292 		printf("%c    NODE %016jx cnt=%02d p=%016jx "
293 		       "type=%c depth=%d",
294 		       badc,
295 		       (uintmax_t)node_offset, node->count,
296 		       (uintmax_t)node->parent,
297 		       (node->type ? node->type : '?'), depth);
298 		printf(" mirror %016jx\n", (uintmax_t)node->mirror_tid);
299 	}
300 
301 	for (i = 0; i < node->count; ++i) {
302 		elm = &node->elms[i];
303 
304 		switch(node->type) {
305 		case HAMMER_BTREE_TYPE_INTERNAL:
306 			if (elm->internal.subtree_offset) {
307 				collect_btree_internal(elm);
308 				check_btree_node(elm->internal.subtree_offset,
309 						 depth + 1);
310 			}
311 			break;
312 		case HAMMER_BTREE_TYPE_LEAF:
313 			if (elm->leaf.data_offset)
314 				collect_btree_leaf(elm);
315 			break;
316 		default:
317 			if (AssertOnFailure)
318 				assert(0);
319 			break;
320 		}
321 	}
322 	rel_buffer(buffer);
323 }
324 
325 static void
326 check_undo(hammer_blockmap_t undomap)
327 {
328 	struct buffer_info *buffer = NULL;
329 	hammer_off_t scan_offset;
330 	hammer_fifo_head_t head;
331 
332 	scan_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
333 	while (scan_offset < undomap->alloc_offset) {
334 		head = get_buffer_data(scan_offset, &buffer, 0);
335 		switch (head->hdr_type) {
336 		case HAMMER_HEAD_TYPE_PAD:
337 		case HAMMER_HEAD_TYPE_DUMMY:
338 		case HAMMER_HEAD_TYPE_UNDO:
339 		case HAMMER_HEAD_TYPE_REDO:
340 			collect_undo(scan_offset, head);
341 			break;
342 		default:
343 			if (AssertOnFailure)
344 				assert(0);
345 			break;
346 		}
347 		if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) ||
348 		     head->hdr_size == 0 ||
349 		     head->hdr_size > HAMMER_UNDO_ALIGN -
350 			((u_int)scan_offset & HAMMER_UNDO_MASK)) {
351 			printf("Illegal size, skipping to next boundary\n");
352 			scan_offset = (scan_offset + HAMMER_UNDO_MASK) &
353 					~HAMMER_UNDO_MASK64;
354 		} else {
355 			scan_offset += head->hdr_size;
356 		}
357 	}
358 	rel_buffer(buffer);
359 }
360 
361 static __inline
362 void
363 collect_freemap_layer1(hammer_blockmap_t freemap)
364 {
365 	/*
366 	 * This translation is necessary to do checkmap properly
367 	 * as zone4 is really just zone2 address space.
368 	 */
369 	hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
370 		HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset);
371 	collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
372 		HAMMER_ZONE_FREEMAP_INDEX);
373 }
374 
375 static __inline
376 void
377 collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1)
378 {
379 	/*
380 	 * This translation is necessary to do checkmap properly
381 	 * as zone4 is really just zone2 address space.
382 	 */
383 	hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
384 		HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset);
385 	collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
386 		HAMMER_ZONE_FREEMAP_INDEX);
387 }
388 
389 static __inline
390 void
391 collect_btree_root(hammer_off_t node_offset)
392 {
393 	collect_blockmap(node_offset,
394 		sizeof(struct hammer_node_ondisk),  /* 4KB */
395 		HAMMER_ZONE_BTREE_INDEX);
396 }
397 
398 static __inline
399 void
400 collect_btree_internal(hammer_btree_elm_t elm)
401 {
402 	collect_blockmap(elm->internal.subtree_offset,
403 		sizeof(struct hammer_node_ondisk),  /* 4KB */
404 		HAMMER_ZONE_BTREE_INDEX);
405 }
406 
407 static __inline
408 void
409 collect_btree_leaf(hammer_btree_elm_t elm)
410 {
411 	int zone;
412 
413 	switch (elm->base.rec_type) {
414 	case HAMMER_RECTYPE_INODE:
415 	case HAMMER_RECTYPE_DIRENTRY:
416 	case HAMMER_RECTYPE_EXT:
417 	case HAMMER_RECTYPE_FIX:
418 	case HAMMER_RECTYPE_PFS:
419 	case HAMMER_RECTYPE_SNAPSHOT:
420 	case HAMMER_RECTYPE_CONFIG:
421 		zone = HAMMER_ZONE_META_INDEX;
422 		break;
423 	case HAMMER_RECTYPE_DATA:
424 	case HAMMER_RECTYPE_DB:
425 		/*
426 		 * There is an exceptional case where HAMMER uses
427 		 * HAMMER_ZONE_LARGE_DATA when the data length is
428 		 * >HAMMER_BUFSIZE/2 (not >=HAMMER_BUFSIZE).
429 		 * This exceptional case is currently being used
430 		 * by mirror write code, however the following code
431 		 * can ignore that and simply use the normal way
432 		 * of selecting a zone using >=HAMMER_BUFSIZE.
433 		 * See hammer_alloc_data() for details.
434 		 */
435 		zone = elm->leaf.data_len >= HAMMER_BUFSIZE ?
436 		       HAMMER_ZONE_LARGE_DATA_INDEX :
437 		       HAMMER_ZONE_SMALL_DATA_INDEX;
438 		break;
439 	default:
440 		zone = HAMMER_ZONE_UNAVAIL_INDEX;
441 		break;
442 	}
443 	collect_blockmap(elm->leaf.data_offset,
444 		(elm->leaf.data_len + 15) & ~15, zone);
445 }
446 
447 static __inline
448 void
449 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head)
450 {
451 	collect_blockmap(scan_offset, head->hdr_size,
452 		HAMMER_ZONE_UNDO_INDEX);
453 }
454 
455 static
456 void
457 collect_blockmap(hammer_off_t offset, int32_t length, int zone)
458 {
459 	struct hammer_blockmap_layer1 layer1;
460 	struct hammer_blockmap_layer2 layer2;
461 	struct hammer_blockmap_layer2 *track2;
462 	hammer_off_t result_offset;
463 	collect_t collect;
464 	int error;
465 
466 	result_offset = blockmap_lookup(offset, &layer1, &layer2, &error);
467 	if (AssertOnFailure) {
468 		assert(HAMMER_ZONE_DECODE(offset) == zone);
469 		assert(HAMMER_ZONE_DECODE(result_offset) ==
470 			HAMMER_ZONE_RAW_BUFFER_INDEX);
471 		assert(error == 0);
472 	}
473 	collect = collect_get(layer1.phys_offset); /* layer2 address */
474 	track2 = collect_get_track(collect, result_offset, zone, &layer2);
475 	track2->bytes_free -= length;
476 }
477 
478 static
479 collect_t
480 collect_get(hammer_off_t phys_offset)
481 {
482 	collect_t collect;
483 
484 	collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset);
485 	if (collect)
486 		return(collect);
487 
488 	collect = calloc(sizeof(*collect), 1);
489 	collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
490 	collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
491 	collect->offsets = malloc(sizeof(hammer_off_t) * HAMMER_BLOCKMAP_RADIX2);
492 	collect->phys_offset = phys_offset;
493 	RB_INSERT(collect_rb_tree, &CollectTree, collect);
494 	bzero(collect->track2, HAMMER_BIGBLOCK_SIZE);
495 	bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE);
496 
497 	return (collect);
498 }
499 
500 static
501 void
502 collect_rel(collect_t collect)
503 {
504 	free(collect->offsets);
505 	free(collect->layer2);
506 	free(collect->track2);
507 	free(collect);
508 }
509 
510 static
511 struct hammer_blockmap_layer2 *
512 collect_get_track(collect_t collect, hammer_off_t offset, int zone,
513 		  struct hammer_blockmap_layer2 *layer2)
514 {
515 	struct hammer_blockmap_layer2 *track2;
516 	size_t i;
517 
518 	i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset);
519 	track2 = &collect->track2[i];
520 	if (track2->entry_crc == 0) {
521 		collect->layer2[i] = *layer2;
522 		collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64;
523 		track2->zone = zone;
524 		track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
525 		track2->entry_crc = 1;	/* steal field to tag track load */
526 	}
527 	return (track2);
528 }
529 
530 static
531 void
532 dump_collect_table(void)
533 {
534 	collect_t collect;
535 	int error = 0;
536 	struct zone_stat *stats = NULL;
537 
538 	if (VerboseOpt)
539 		stats = hammer_init_zone_stat();
540 
541 	RB_FOREACH(collect, collect_rb_tree, &CollectTree) {
542 		dump_collect(collect, stats);
543 		error += collect->error;
544 	}
545 
546 	while ((collect = RB_ROOT(&CollectTree)) != NULL) {
547 		RB_REMOVE(collect_rb_tree, &CollectTree, collect);
548 		collect_rel(collect);
549 	}
550 	assert(RB_EMPTY(&CollectTree));
551 
552 	if (VerboseOpt) {
553 		hammer_print_zone_stat(stats);
554 		hammer_cleanup_zone_stat(stats);
555 	}
556 
557 	if (error || VerboseOpt)
558 		printf("%d errors\n", error);
559 }
560 
561 static
562 void
563 dump_collect(collect_t collect, struct zone_stat *stats)
564 {
565 	struct hammer_blockmap_layer2 *track2;
566 	struct hammer_blockmap_layer2 *layer2;
567 	hammer_off_t offset;
568 	size_t i;
569 	int zone;
570 
571 	for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
572 		track2 = &collect->track2[i];
573 		layer2 = &collect->layer2[i];
574 		offset = collect->offsets[i];
575 
576 		/*
577 		 * Check big-blocks referenced by freemap, data,
578 		 * B-Tree nodes and UNDO fifo.
579 		 */
580 		if (track2->entry_crc == 0)
581 			continue;
582 
583 		zone = layer2->zone;
584 		if (AssertOnFailure) {
585 			assert((zone == HAMMER_ZONE_UNDO_INDEX) ||
586 				(zone == HAMMER_ZONE_FREEMAP_INDEX) ||
587 				(zone >= HAMMER_ZONE2_MAPPED_INDEX &&
588 				 zone < HAMMER_MAX_ZONES));
589 		}
590 		if (VerboseOpt)
591 			hammer_add_zone_stat_layer2(stats, layer2);
592 
593 		if (track2->zone != layer2->zone) {
594 			printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n",
595 				(intmax_t)offset,
596 				track2->zone,
597 				layer2->zone);
598 			collect->error++;
599 		} else if (track2->bytes_free != layer2->bytes_free) {
600 			printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n",
601 				(intmax_t)offset,
602 				layer2->zone,
603 				track2->bytes_free,
604 				layer2->bytes_free);
605 			collect->error++;
606 		} else if (VerboseOpt) {
607 			printf("\tblock=%016jx zone=%-2d %d free (correct)\n",
608 				(intmax_t)offset,
609 				layer2->zone,
610 				track2->bytes_free);
611 		}
612 	}
613 }
614