xref: /dragonfly/sbin/hammer/cmd_blockmap.c (revision 84b31f29)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
35  */
36 
37 #include "hammer.h"
38 
39 /*
40  * Each collect covers 1<<(19+23) bytes address space of layer 1.
41  * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1).
42  */
43 typedef struct collect {
44 	RB_ENTRY(collect) entry;
45 	hammer_off_t	phys_offset;  /* layer2 address pointed by layer1 */
46 	hammer_off_t	*offsets;  /* big-block offset for layer2[i] */
47 	struct hammer_blockmap_layer2 *track2;  /* track of layer2 entries */
48 	struct hammer_blockmap_layer2 *layer2;  /* 1<<19 x 16 bytes entries */
49 	int error;  /* # of inconsistencies */
50 } *collect_t;
51 
52 static int
53 collect_compare(struct collect *c1, struct collect *c2)
54 {
55 	if (c1->phys_offset < c2->phys_offset)
56 		return(-1);
57 	if (c1->phys_offset > c2->phys_offset)
58 		return(1);
59 	return(0);
60 }
61 
62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree);
63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t);
64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t,
65 	phys_offset);
66 
67 static void dump_blockmap(const char *label, int zone);
68 static void check_freemap(hammer_blockmap_t freemap);
69 static void check_btree_node(hammer_off_t node_offset, int depth);
70 static void check_undo(hammer_blockmap_t undomap);
71 static __inline void collect_btree_root(hammer_off_t node_offset);
72 static __inline void collect_btree_internal(hammer_btree_elm_t elm);
73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm);
74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap);
75 static __inline void collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1);
76 static __inline void collect_undo(hammer_off_t scan_offset,
77 	hammer_fifo_head_t head);
78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone);
79 static struct hammer_blockmap_layer2 *collect_get_track(
80 	collect_t collect, hammer_off_t offset, int zone,
81 	struct hammer_blockmap_layer2 *layer2);
82 static collect_t collect_get(hammer_off_t phys_offset);
83 static void dump_collect_table(void);
84 static void dump_collect(collect_t collect, struct zone_stat *stats);
85 
86 static int num_bad_layer1 = 0;
87 static int num_bad_layer2 = 0;
88 static int num_bad_node = 0;
89 
90 void
91 hammer_cmd_blockmap(void)
92 {
93 	dump_blockmap("freemap", HAMMER_ZONE_FREEMAP_INDEX);
94 }
95 
96 static
97 void
98 dump_blockmap(const char *label, int zone)
99 {
100 	struct volume_info *root_volume;
101 	hammer_blockmap_t rootmap;
102 	hammer_blockmap_t blockmap;
103 	struct hammer_blockmap_layer1 *layer1;
104 	struct hammer_blockmap_layer2 *layer2;
105 	struct buffer_info *buffer1 = NULL;
106 	struct buffer_info *buffer2 = NULL;
107 	hammer_off_t layer1_offset;
108 	hammer_off_t layer2_offset;
109 	hammer_off_t scan1;
110 	hammer_off_t scan2;
111 	struct zone_stat *stats = NULL;
112 	int xerr;
113 	int i;
114 
115 	assert(RootVolNo >= 0);
116 	root_volume = get_volume(RootVolNo);
117 	rootmap = &root_volume->ondisk->vol0_blockmap[zone];
118 	assert(rootmap->phys_offset != 0);
119 
120 	printf("                   "
121 	       "phys             first            next             alloc\n");
122 	for (i = 0; i < HAMMER_MAX_ZONES; i++) {
123 		blockmap = &root_volume->ondisk->vol0_blockmap[i];
124 		if (VerboseOpt || i == zone) {
125 			printf("zone %-2d %-10s %016jx %016jx %016jx %016jx\n",
126 				i, (i == zone ? label : ""),
127 				(uintmax_t)blockmap->phys_offset,
128 				(uintmax_t)blockmap->first_offset,
129 				(uintmax_t)blockmap->next_offset,
130 				(uintmax_t)blockmap->alloc_offset);
131 		}
132 	}
133 
134 	if (VerboseOpt)
135 		stats = hammer_init_zone_stat();
136 
137 	for (scan1 = HAMMER_ZONE_ENCODE(zone, 0);
138 	     scan1 < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
139 	     scan1 += HAMMER_BLOCKMAP_LAYER2) {
140 		/*
141 		 * Dive layer 1.
142 		 */
143 		layer1_offset = rootmap->phys_offset +
144 				HAMMER_BLOCKMAP_LAYER1_OFFSET(scan1);
145 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
146 
147 		xerr = ' ';  /* good */
148 		if (layer1->layer1_crc !=
149 		    crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
150 			xerr = 'B';
151 			++num_bad_layer1;
152 		}
153 		if (xerr == ' ' &&
154 		    layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
155 			continue;
156 		}
157 		printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
158 			xerr,
159 			(uintmax_t)scan1,
160 			(uintmax_t)layer1->phys_offset,
161 			(intmax_t)layer1->blocks_free);
162 
163 		for (scan2 = scan1;
164 		     scan2 < scan1 + HAMMER_BLOCKMAP_LAYER2;
165 		     scan2 += HAMMER_BIGBLOCK_SIZE) {
166 			/*
167 			 * Dive layer 2, each entry represents a big-block.
168 			 */
169 			layer2_offset = layer1->phys_offset +
170 					HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2);
171 			layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
172 
173 			xerr = ' ';  /* good */
174 			if (layer2->entry_crc !=
175 			    crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
176 				xerr = 'B';
177 				++num_bad_layer2;
178 			}
179 			printf("%c       %016jx zone=%-2d ",
180 				xerr,
181 				(uintmax_t)scan2,
182 				layer2->zone);
183 			if (VerboseOpt > 1)
184 				printf("vol=%-3d L1=%-7lu L2=%-7lu ",
185 					HAMMER_VOL_DECODE(scan2),
186 					HAMMER_BLOCKMAP_LAYER1_OFFSET(scan2),
187 					HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2));
188 			else if (VerboseOpt > 0)
189 				printf("vol=%-3d L1=%-6lu L2=%-6lu ",
190 					HAMMER_VOL_DECODE(scan2),
191 					HAMMER_BLOCKMAP_LAYER1_INDEX(scan2),
192 					HAMMER_BLOCKMAP_LAYER2_INDEX(scan2));
193 			printf("app=%-7d free=%-7d",
194 				layer2->append_off,
195 				layer2->bytes_free);
196 			if (VerboseOpt)
197 				printf(" crc=%04x-%04x\n",
198 					layer1->layer1_crc,
199 					layer2->entry_crc);
200 			else
201 				printf("\n");
202 
203 			if (VerboseOpt)
204 				hammer_add_zone_stat_layer2(stats, layer2);
205 		}
206 	}
207 	rel_buffer(buffer1);
208 	rel_buffer(buffer2);
209 	rel_volume(root_volume);
210 
211 	if (VerboseOpt) {
212 		hammer_print_zone_stat(stats);
213 		hammer_cleanup_zone_stat(stats);
214 	}
215 
216 	if (num_bad_layer1 || VerboseOpt) {
217 		printf("%d bad layer1\n", num_bad_layer1);
218 	}
219 	if (num_bad_layer2 || VerboseOpt) {
220 		printf("%d bad layer2\n", num_bad_layer1);
221 	}
222 }
223 
224 void
225 hammer_cmd_checkmap(void)
226 {
227 	struct volume_info *volume;
228 	hammer_blockmap_t freemap;
229 	hammer_blockmap_t undomap;
230 	hammer_off_t node_offset;
231 
232 	volume = get_volume(RootVolNo);
233 	node_offset = volume->ondisk->vol0_btree_root;
234 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
235 	undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
236 
237 	if (QuietOpt < 3) {
238 		printf("Volume header\trecords=%jd next_tid=%016jx\n",
239 		       (intmax_t)volume->ondisk->vol0_stat_records,
240 		       (uintmax_t)volume->ondisk->vol0_next_tid);
241 		printf("\t\tbufoffset=%016jx\n",
242 		       (uintmax_t)volume->ondisk->vol_buf_beg);
243 		printf("\t\tundosize=%jdMB\n",
244 		       (intmax_t)((undomap->alloc_offset & HAMMER_OFF_LONG_MASK)
245 			/ (1024 * 1024)));
246 	}
247 	rel_volume(volume);
248 
249 	assert(HAMMER_ZONE_UNDO_INDEX < HAMMER_ZONE2_MAPPED_INDEX);
250 	assert(HAMMER_ZONE2_MAPPED_INDEX < HAMMER_MAX_ZONES);
251 	AssertOnFailure = (DebugOpt != 0);
252 
253 	printf("Collecting allocation info from freemap: ");
254 	fflush(stdout);
255 	check_freemap(freemap);
256 	printf("done\n");
257 
258 	printf("Collecting allocation info from B-Tree: ");
259 	fflush(stdout);
260 	check_btree_node(node_offset, 0);
261 	printf("done\n");
262 
263 	printf("Collecting allocation info from UNDO: ");
264 	fflush(stdout);
265 	check_undo(undomap);
266 	printf("done\n");
267 
268 	dump_collect_table();
269 	AssertOnFailure = 1;
270 }
271 
272 static void
273 check_freemap(hammer_blockmap_t freemap)
274 {
275 	hammer_off_t offset;
276 	struct buffer_info *buffer1 = NULL;
277 	struct hammer_blockmap_layer1 *layer1;
278 	int i;
279 
280 	collect_freemap_layer1(freemap);
281 
282 	for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
283 		offset = freemap->phys_offset + i * sizeof(*layer1);
284 		layer1 = get_buffer_data(offset, &buffer1, 0);
285 		if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL)
286 			collect_freemap_layer2(layer1);
287 	}
288 	rel_buffer(buffer1);
289 }
290 
291 static void
292 check_btree_node(hammer_off_t node_offset, int depth)
293 {
294 	struct buffer_info *buffer = NULL;
295 	hammer_node_ondisk_t node;
296 	hammer_btree_elm_t elm;
297 	int i;
298 	char badc = ' ';  /* good */
299 	char badm = ' ';  /* good */
300 
301 	if (depth == 0)
302 		collect_btree_root(node_offset);
303 	node = get_node(node_offset, &buffer);
304 
305 	if (node == NULL) {
306 		badc = 'B';
307 		badm = 'I';
308 	} else if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) != node->crc) {
309 		badc = 'B';
310 	}
311 
312 	if (badm != ' ' || badc != ' ') {  /* not good */
313 		++num_bad_node;
314 		printf("%c%c   NODE %016jx ",
315 			badc, badm, (uintmax_t)node_offset);
316 		if (node == NULL) {
317 			printf("(IO ERROR)\n");
318 			rel_buffer(buffer);
319 			return;
320 		} else {
321 			printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n",
322 			       node->count,
323 			       (uintmax_t)node->parent,
324 			       (node->type ? node->type : '?'),
325 			       depth,
326 			       (uintmax_t)node->mirror_tid);
327 		}
328 	}
329 
330 	for (i = 0; i < node->count; ++i) {
331 		elm = &node->elms[i];
332 
333 		switch(node->type) {
334 		case HAMMER_BTREE_TYPE_INTERNAL:
335 			if (elm->internal.subtree_offset) {
336 				collect_btree_internal(elm);
337 				check_btree_node(elm->internal.subtree_offset,
338 						 depth + 1);
339 			}
340 			break;
341 		case HAMMER_BTREE_TYPE_LEAF:
342 			if (elm->leaf.data_offset)
343 				collect_btree_leaf(elm);
344 			break;
345 		default:
346 			if (AssertOnFailure)
347 				assert(0);
348 			break;
349 		}
350 	}
351 	rel_buffer(buffer);
352 }
353 
354 static void
355 check_undo(hammer_blockmap_t undomap)
356 {
357 	struct buffer_info *buffer = NULL;
358 	hammer_off_t scan_offset;
359 	hammer_fifo_head_t head;
360 
361 	scan_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
362 	while (scan_offset < undomap->alloc_offset) {
363 		head = get_buffer_data(scan_offset, &buffer, 0);
364 		switch (head->hdr_type) {
365 		case HAMMER_HEAD_TYPE_PAD:
366 		case HAMMER_HEAD_TYPE_DUMMY:
367 		case HAMMER_HEAD_TYPE_UNDO:
368 		case HAMMER_HEAD_TYPE_REDO:
369 			collect_undo(scan_offset, head);
370 			break;
371 		default:
372 			if (AssertOnFailure)
373 				assert(0);
374 			break;
375 		}
376 		if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) ||
377 		     head->hdr_size == 0 ||
378 		     head->hdr_size > HAMMER_UNDO_ALIGN -
379 			((u_int)scan_offset & HAMMER_UNDO_MASK)) {
380 			printf("Illegal size, skipping to next boundary\n");
381 			scan_offset = (scan_offset + HAMMER_UNDO_MASK) &
382 					~HAMMER_UNDO_MASK64;
383 		} else {
384 			scan_offset += head->hdr_size;
385 		}
386 	}
387 	rel_buffer(buffer);
388 }
389 
390 static __inline
391 void
392 collect_freemap_layer1(hammer_blockmap_t freemap)
393 {
394 	/*
395 	 * This translation is necessary to do checkmap properly
396 	 * as zone4 is really just zone2 address space.
397 	 */
398 	hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
399 		HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset);
400 	collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
401 		HAMMER_ZONE_FREEMAP_INDEX);
402 }
403 
404 static __inline
405 void
406 collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1)
407 {
408 	/*
409 	 * This translation is necessary to do checkmap properly
410 	 * as zone4 is really just zone2 address space.
411 	 */
412 	hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
413 		HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset);
414 	collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
415 		HAMMER_ZONE_FREEMAP_INDEX);
416 }
417 
418 static __inline
419 void
420 collect_btree_root(hammer_off_t node_offset)
421 {
422 	collect_blockmap(node_offset,
423 		sizeof(struct hammer_node_ondisk),  /* 4KB */
424 		HAMMER_ZONE_BTREE_INDEX);
425 }
426 
427 static __inline
428 void
429 collect_btree_internal(hammer_btree_elm_t elm)
430 {
431 	collect_blockmap(elm->internal.subtree_offset,
432 		sizeof(struct hammer_node_ondisk),  /* 4KB */
433 		HAMMER_ZONE_BTREE_INDEX);
434 }
435 
436 static __inline
437 void
438 collect_btree_leaf(hammer_btree_elm_t elm)
439 {
440 	int zone;
441 
442 	switch (elm->base.rec_type) {
443 	case HAMMER_RECTYPE_INODE:
444 	case HAMMER_RECTYPE_DIRENTRY:
445 	case HAMMER_RECTYPE_EXT:
446 	case HAMMER_RECTYPE_FIX:
447 	case HAMMER_RECTYPE_PFS:
448 	case HAMMER_RECTYPE_SNAPSHOT:
449 	case HAMMER_RECTYPE_CONFIG:
450 		zone = HAMMER_ZONE_META_INDEX;
451 		break;
452 	case HAMMER_RECTYPE_DATA:
453 	case HAMMER_RECTYPE_DB:
454 		zone = hammer_data_zone_index(elm->leaf.data_len);
455 		break;
456 	default:
457 		zone = HAMMER_ZONE_UNAVAIL_INDEX;
458 		break;
459 	}
460 	collect_blockmap(elm->leaf.data_offset,
461 		(elm->leaf.data_len + 15) & ~15, zone);
462 }
463 
464 static __inline
465 void
466 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head)
467 {
468 	collect_blockmap(scan_offset, head->hdr_size,
469 		HAMMER_ZONE_UNDO_INDEX);
470 }
471 
472 static
473 void
474 collect_blockmap(hammer_off_t offset, int32_t length, int zone)
475 {
476 	struct hammer_blockmap_layer1 layer1;
477 	struct hammer_blockmap_layer2 layer2;
478 	struct hammer_blockmap_layer2 *track2;
479 	hammer_off_t result_offset;
480 	collect_t collect;
481 	int error;
482 
483 	result_offset = blockmap_lookup(offset, &layer1, &layer2, &error);
484 	if (AssertOnFailure) {
485 		assert(HAMMER_ZONE_DECODE(offset) == zone);
486 		assert(HAMMER_ZONE_DECODE(result_offset) ==
487 			HAMMER_ZONE_RAW_BUFFER_INDEX);
488 		assert(error == 0);
489 	}
490 	collect = collect_get(layer1.phys_offset); /* layer2 address */
491 	track2 = collect_get_track(collect, result_offset, zone, &layer2);
492 	track2->bytes_free -= length;
493 }
494 
495 static
496 collect_t
497 collect_get(hammer_off_t phys_offset)
498 {
499 	collect_t collect;
500 
501 	collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset);
502 	if (collect)
503 		return(collect);
504 
505 	collect = calloc(sizeof(*collect), 1);
506 	collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
507 	collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
508 	collect->offsets = malloc(sizeof(hammer_off_t) * HAMMER_BLOCKMAP_RADIX2);
509 	collect->phys_offset = phys_offset;
510 	RB_INSERT(collect_rb_tree, &CollectTree, collect);
511 	bzero(collect->track2, HAMMER_BIGBLOCK_SIZE);
512 	bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE);
513 
514 	return (collect);
515 }
516 
517 static
518 void
519 collect_rel(collect_t collect)
520 {
521 	free(collect->offsets);
522 	free(collect->layer2);
523 	free(collect->track2);
524 	free(collect);
525 }
526 
527 static
528 struct hammer_blockmap_layer2 *
529 collect_get_track(collect_t collect, hammer_off_t offset, int zone,
530 		  struct hammer_blockmap_layer2 *layer2)
531 {
532 	struct hammer_blockmap_layer2 *track2;
533 	size_t i;
534 
535 	i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset);
536 	track2 = &collect->track2[i];
537 	if (track2->entry_crc == 0) {
538 		collect->layer2[i] = *layer2;
539 		collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64;
540 		track2->zone = zone;
541 		track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
542 		track2->entry_crc = 1;	/* steal field to tag track load */
543 	}
544 	return (track2);
545 }
546 
547 static
548 void
549 dump_collect_table(void)
550 {
551 	collect_t collect;
552 	int error = 0;
553 	struct zone_stat *stats = NULL;
554 
555 	if (VerboseOpt)
556 		stats = hammer_init_zone_stat();
557 
558 	RB_FOREACH(collect, collect_rb_tree, &CollectTree) {
559 		dump_collect(collect, stats);
560 		error += collect->error;
561 	}
562 
563 	while ((collect = RB_ROOT(&CollectTree)) != NULL) {
564 		RB_REMOVE(collect_rb_tree, &CollectTree, collect);
565 		collect_rel(collect);
566 	}
567 	assert(RB_EMPTY(&CollectTree));
568 
569 	if (VerboseOpt) {
570 		hammer_print_zone_stat(stats);
571 		hammer_cleanup_zone_stat(stats);
572 	}
573 
574 	if (num_bad_node || VerboseOpt) {
575 		printf("%d bad nodes\n", num_bad_node);
576 	}
577 	if (error || VerboseOpt) {
578 		printf("%d errors\n", error);
579 	}
580 }
581 
582 static
583 void
584 dump_collect(collect_t collect, struct zone_stat *stats)
585 {
586 	struct hammer_blockmap_layer2 *track2;
587 	struct hammer_blockmap_layer2 *layer2;
588 	hammer_off_t offset;
589 	size_t i;
590 	int zone;
591 
592 	for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
593 		track2 = &collect->track2[i];
594 		layer2 = &collect->layer2[i];
595 		offset = collect->offsets[i];
596 
597 		/*
598 		 * Check big-blocks referenced by freemap, data,
599 		 * B-Tree nodes and UNDO fifo.
600 		 */
601 		if (track2->entry_crc == 0)
602 			continue;
603 
604 		zone = layer2->zone;
605 		if (AssertOnFailure) {
606 			assert((zone == HAMMER_ZONE_UNDO_INDEX) ||
607 				(zone == HAMMER_ZONE_FREEMAP_INDEX) ||
608 				(zone >= HAMMER_ZONE2_MAPPED_INDEX &&
609 				 zone < HAMMER_MAX_ZONES));
610 		}
611 		if (VerboseOpt)
612 			hammer_add_zone_stat_layer2(stats, layer2);
613 
614 		if (track2->zone != layer2->zone) {
615 			printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n",
616 				(intmax_t)offset,
617 				track2->zone,
618 				layer2->zone);
619 			collect->error++;
620 		} else if (track2->bytes_free != layer2->bytes_free) {
621 			printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n",
622 				(intmax_t)offset,
623 				layer2->zone,
624 				track2->bytes_free,
625 				layer2->bytes_free);
626 			collect->error++;
627 		} else if (VerboseOpt) {
628 			printf("\tblock=%016jx zone=%-2d %d free (correct)\n",
629 				(intmax_t)offset,
630 				layer2->zone,
631 				track2->bytes_free);
632 		}
633 	}
634 }
635