1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 /* 40 * Each collect covers 1<<(19+23) bytes address space of layer 1. 41 * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1). 42 */ 43 typedef struct collect { 44 RB_ENTRY(collect) entry; 45 hammer_off_t phys_offset; /* layer2 address pointed by layer1 */ 46 hammer_off_t *offsets; /* big-block offset for layer2[i] */ 47 struct hammer_blockmap_layer2 *track2; /* track of layer2 entries */ 48 struct hammer_blockmap_layer2 *layer2; /* 1<<19 x 16 bytes entries */ 49 int error; /* # of inconsistencies */ 50 } *collect_t; 51 52 static int 53 collect_compare(struct collect *c1, struct collect *c2) 54 { 55 if (c1->phys_offset < c2->phys_offset) 56 return(-1); 57 if (c1->phys_offset > c2->phys_offset) 58 return(1); 59 return(0); 60 } 61 62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree); 63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t); 64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t, 65 phys_offset); 66 67 static void dump_blockmap(const char *label, int zone); 68 static void check_freemap(hammer_blockmap_t freemap); 69 static void check_btree_node(hammer_off_t node_offset, int depth); 70 static void check_undo(hammer_blockmap_t undomap); 71 static __inline void collect_btree_root(hammer_off_t node_offset); 72 static __inline void collect_btree_internal(hammer_btree_elm_t elm); 73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm); 74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap); 75 static __inline void collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1); 76 static __inline void collect_undo(hammer_off_t scan_offset, 77 hammer_fifo_head_t head); 78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone); 79 static struct hammer_blockmap_layer2 *collect_get_track( 80 collect_t collect, hammer_off_t offset, int zone, 81 struct hammer_blockmap_layer2 *layer2); 82 static collect_t collect_get(hammer_off_t phys_offset); 83 static void dump_collect_table(void); 84 static void dump_collect(collect_t collect, struct zone_stat *stats); 85 86 static int num_bad_layer1 = 0; 87 static int num_bad_layer2 = 0; 88 static int num_bad_node = 0; 89 90 void 91 hammer_cmd_blockmap(void) 92 { 93 dump_blockmap("freemap", HAMMER_ZONE_FREEMAP_INDEX); 94 } 95 96 static 97 void 98 dump_blockmap(const char *label, int zone) 99 { 100 struct volume_info *root_volume; 101 hammer_blockmap_t rootmap; 102 hammer_blockmap_t blockmap; 103 struct hammer_blockmap_layer1 *layer1; 104 struct hammer_blockmap_layer2 *layer2; 105 struct buffer_info *buffer1 = NULL; 106 struct buffer_info *buffer2 = NULL; 107 hammer_off_t layer1_offset; 108 hammer_off_t layer2_offset; 109 hammer_off_t phys_offset; 110 hammer_off_t block_offset; 111 struct zone_stat *stats = NULL; 112 int xerr, aerr, ferr; 113 int i; 114 115 root_volume = get_root_volume(); 116 rootmap = &root_volume->ondisk->vol0_blockmap[zone]; 117 assert(rootmap->phys_offset != 0); 118 119 printf(" " 120 "phys first next alloc\n"); 121 for (i = 0; i < HAMMER_MAX_ZONES; i++) { 122 blockmap = &root_volume->ondisk->vol0_blockmap[i]; 123 if (VerboseOpt || i == zone) { 124 printf("zone %-2d %-10s %016jx %016jx %016jx %016jx\n", 125 i, (i == zone ? label : ""), 126 (uintmax_t)blockmap->phys_offset, 127 (uintmax_t)blockmap->first_offset, 128 (uintmax_t)blockmap->next_offset, 129 (uintmax_t)blockmap->alloc_offset); 130 } 131 } 132 133 if (VerboseOpt) 134 stats = hammer_init_zone_stat(); 135 136 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0); 137 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK); 138 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 139 /* 140 * Dive layer 1. 141 */ 142 layer1_offset = rootmap->phys_offset + 143 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 144 layer1 = get_buffer_data(layer1_offset, &buffer1, 0); 145 146 xerr = ' '; /* good */ 147 if (layer1->layer1_crc != 148 crc32(layer1, HAMMER_LAYER1_CRCSIZE)) { 149 xerr = 'B'; 150 ++num_bad_layer1; 151 } 152 if (xerr == ' ' && 153 layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) { 154 continue; 155 } 156 printf("%c layer1 %016jx @%016jx blocks-free %jd\n", 157 xerr, 158 (uintmax_t)phys_offset, 159 (uintmax_t)layer1->phys_offset, 160 (intmax_t)layer1->blocks_free); 161 162 for (block_offset = 0; 163 block_offset < HAMMER_BLOCKMAP_LAYER2; 164 block_offset += HAMMER_BIGBLOCK_SIZE) { 165 hammer_off_t zone_offset = phys_offset + block_offset; 166 /* 167 * Dive layer 2, each entry represents a big-block. 168 */ 169 layer2_offset = layer1->phys_offset + 170 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset); 171 layer2 = get_buffer_data(layer2_offset, &buffer2, 0); 172 173 xerr = aerr = ferr = ' '; /* good */ 174 if (layer2->entry_crc != 175 crc32(layer2, HAMMER_LAYER2_CRCSIZE)) { 176 xerr = 'B'; 177 ++num_bad_layer2; 178 } 179 if (layer2->append_off > HAMMER_BIGBLOCK_SIZE) { 180 aerr = 'A'; 181 ++num_bad_layer2; 182 } 183 if (layer2->bytes_free < 0 || 184 layer2->bytes_free > HAMMER_BIGBLOCK_SIZE) { 185 ferr = 'F'; 186 ++num_bad_layer2; 187 } 188 189 if (VerboseOpt < 2 && 190 xerr == ' ' && aerr == ' ' && ferr == ' ' && 191 layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) { 192 break; 193 } 194 printf("%c%c%c %016jx zone=%-2d ", 195 xerr, aerr, ferr, (uintmax_t)zone_offset, layer2->zone); 196 if (VerboseOpt) { 197 printf("vol=%-3d L1#=%-6d L2#=%-6d L1=%-7lu L2=%-7lu ", 198 HAMMER_VOL_DECODE(zone_offset), 199 HAMMER_BLOCKMAP_LAYER1_INDEX(zone_offset), 200 HAMMER_BLOCKMAP_LAYER2_INDEX(zone_offset), 201 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset), 202 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset)); 203 } 204 printf("app=%-7d free=%-7d", 205 layer2->append_off, 206 layer2->bytes_free); 207 if (VerboseOpt) { 208 double bytes_used = HAMMER_BIGBLOCK_SIZE - 209 layer2->bytes_free; 210 printf(" fill=%-5.1lf crc=%08x-%08x\n", 211 bytes_used * 100 / HAMMER_BIGBLOCK_SIZE, 212 layer1->layer1_crc, 213 layer2->entry_crc); 214 } else { 215 printf("\n"); 216 } 217 218 if (VerboseOpt) 219 hammer_add_zone_stat_layer2(stats, layer2); 220 } 221 } 222 rel_buffer(buffer1); 223 rel_buffer(buffer2); 224 rel_volume(root_volume); 225 226 if (VerboseOpt) { 227 hammer_print_zone_stat(stats); 228 hammer_cleanup_zone_stat(stats); 229 } 230 231 if (num_bad_layer1 || VerboseOpt) { 232 printf("%d bad layer1\n", num_bad_layer1); 233 } 234 if (num_bad_layer2 || VerboseOpt) { 235 printf("%d bad layer2\n", num_bad_layer1); 236 } 237 } 238 239 void 240 hammer_cmd_checkmap(void) 241 { 242 struct volume_info *volume; 243 hammer_blockmap_t freemap; 244 hammer_blockmap_t undomap; 245 hammer_off_t node_offset; 246 247 volume = get_root_volume(); 248 node_offset = volume->ondisk->vol0_btree_root; 249 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 250 undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX]; 251 252 if (QuietOpt < 3) { 253 printf("Volume header\tnext_tid=%016jx\n", 254 (uintmax_t)volume->ondisk->vol0_next_tid); 255 printf("\t\tbufoffset=%016jx\n", 256 (uintmax_t)volume->ondisk->vol_buf_beg); 257 printf("\t\tundosize=%jdMB\n", 258 (intmax_t)((undomap->alloc_offset & HAMMER_OFF_LONG_MASK) 259 / (1024 * 1024))); 260 } 261 rel_volume(volume); 262 263 AssertOnFailure = (DebugOpt != 0); 264 265 printf("Collecting allocation info from freemap: "); 266 fflush(stdout); 267 check_freemap(freemap); 268 printf("done\n"); 269 270 printf("Collecting allocation info from B-Tree: "); 271 fflush(stdout); 272 check_btree_node(node_offset, 0); 273 printf("done\n"); 274 275 printf("Collecting allocation info from UNDO: "); 276 fflush(stdout); 277 check_undo(undomap); 278 printf("done\n"); 279 280 dump_collect_table(); 281 } 282 283 static void 284 check_freemap(hammer_blockmap_t freemap) 285 { 286 hammer_off_t offset; 287 struct buffer_info *buffer1 = NULL; 288 struct hammer_blockmap_layer1 *layer1; 289 int i; 290 291 collect_freemap_layer1(freemap); 292 293 for (i = 0; i < HAMMER_BLOCKMAP_RADIX1; ++i) { 294 offset = freemap->phys_offset + i * sizeof(*layer1); 295 layer1 = get_buffer_data(offset, &buffer1, 0); 296 if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL) 297 collect_freemap_layer2(layer1); 298 } 299 rel_buffer(buffer1); 300 } 301 302 static void 303 check_btree_node(hammer_off_t node_offset, int depth) 304 { 305 struct buffer_info *buffer = NULL; 306 hammer_node_ondisk_t node; 307 hammer_btree_elm_t elm; 308 int i; 309 char badc = ' '; /* good */ 310 char badm = ' '; /* good */ 311 312 if (depth == 0) 313 collect_btree_root(node_offset); 314 node = get_node(node_offset, &buffer); 315 316 if (node == NULL) { 317 badc = 'B'; 318 badm = 'I'; 319 } else if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) != node->crc) { 320 badc = 'B'; 321 } 322 323 if (badm != ' ' || badc != ' ') { /* not good */ 324 ++num_bad_node; 325 printf("%c%c NODE %016jx ", 326 badc, badm, (uintmax_t)node_offset); 327 if (node == NULL) { 328 printf("(IO ERROR)\n"); 329 rel_buffer(buffer); 330 return; 331 } else { 332 printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n", 333 node->count, 334 (uintmax_t)node->parent, 335 (node->type ? node->type : '?'), 336 depth, 337 (uintmax_t)node->mirror_tid); 338 } 339 } 340 341 for (i = 0; i < node->count; ++i) { 342 elm = &node->elms[i]; 343 344 switch(node->type) { 345 case HAMMER_BTREE_TYPE_INTERNAL: 346 if (elm->internal.subtree_offset) { 347 collect_btree_internal(elm); 348 check_btree_node(elm->internal.subtree_offset, 349 depth + 1); 350 } 351 break; 352 case HAMMER_BTREE_TYPE_LEAF: 353 if (elm->leaf.data_offset) 354 collect_btree_leaf(elm); 355 break; 356 default: 357 assert(!AssertOnFailure); 358 break; 359 } 360 } 361 rel_buffer(buffer); 362 } 363 364 static void 365 check_undo(hammer_blockmap_t undomap) 366 { 367 struct buffer_info *buffer = NULL; 368 hammer_off_t scan_offset; 369 hammer_fifo_head_t head; 370 371 scan_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0); 372 while (scan_offset < undomap->alloc_offset) { 373 head = get_buffer_data(scan_offset, &buffer, 0); 374 switch (head->hdr_type) { 375 case HAMMER_HEAD_TYPE_PAD: 376 case HAMMER_HEAD_TYPE_DUMMY: 377 case HAMMER_HEAD_TYPE_UNDO: 378 case HAMMER_HEAD_TYPE_REDO: 379 collect_undo(scan_offset, head); 380 break; 381 default: 382 assert(!AssertOnFailure); 383 break; 384 } 385 if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) || 386 head->hdr_size == 0 || 387 head->hdr_size > HAMMER_UNDO_ALIGN - 388 ((u_int)scan_offset & HAMMER_UNDO_MASK)) { 389 printf("Illegal size, skipping to next boundary\n"); 390 scan_offset = (scan_offset + HAMMER_UNDO_MASK) & 391 ~HAMMER_UNDO_MASK64; 392 } else { 393 scan_offset += head->hdr_size; 394 } 395 } 396 rel_buffer(buffer); 397 } 398 399 static __inline 400 void 401 collect_freemap_layer1(hammer_blockmap_t freemap) 402 { 403 /* 404 * This translation is necessary to do checkmap properly 405 * as zone4 is really just zone2 address space. 406 */ 407 hammer_off_t zone4_offset = hammer_xlate_to_zoneX( 408 HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset); 409 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE, 410 HAMMER_ZONE_FREEMAP_INDEX); 411 } 412 413 static __inline 414 void 415 collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1) 416 { 417 /* 418 * This translation is necessary to do checkmap properly 419 * as zone4 is really just zone2 address space. 420 */ 421 hammer_off_t zone4_offset = hammer_xlate_to_zoneX( 422 HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset); 423 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE, 424 HAMMER_ZONE_FREEMAP_INDEX); 425 } 426 427 static __inline 428 void 429 collect_btree_root(hammer_off_t node_offset) 430 { 431 collect_blockmap(node_offset, 432 sizeof(struct hammer_node_ondisk), /* 4KB */ 433 HAMMER_ZONE_BTREE_INDEX); 434 } 435 436 static __inline 437 void 438 collect_btree_internal(hammer_btree_elm_t elm) 439 { 440 collect_blockmap(elm->internal.subtree_offset, 441 sizeof(struct hammer_node_ondisk), /* 4KB */ 442 HAMMER_ZONE_BTREE_INDEX); 443 } 444 445 static __inline 446 void 447 collect_btree_leaf(hammer_btree_elm_t elm) 448 { 449 int zone; 450 451 switch (elm->base.rec_type) { 452 case HAMMER_RECTYPE_INODE: 453 case HAMMER_RECTYPE_DIRENTRY: 454 case HAMMER_RECTYPE_EXT: 455 case HAMMER_RECTYPE_FIX: 456 case HAMMER_RECTYPE_PFS: 457 case HAMMER_RECTYPE_SNAPSHOT: 458 case HAMMER_RECTYPE_CONFIG: 459 zone = HAMMER_ZONE_META_INDEX; 460 break; 461 case HAMMER_RECTYPE_DATA: 462 case HAMMER_RECTYPE_DB: 463 zone = hammer_data_zone_index(elm->leaf.data_len); 464 break; 465 default: 466 zone = HAMMER_ZONE_UNAVAIL_INDEX; 467 break; 468 } 469 collect_blockmap(elm->leaf.data_offset, 470 (elm->leaf.data_len + 15) & ~15, zone); 471 } 472 473 static __inline 474 void 475 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head) 476 { 477 collect_blockmap(scan_offset, head->hdr_size, 478 HAMMER_ZONE_UNDO_INDEX); 479 } 480 481 static 482 void 483 collect_blockmap(hammer_off_t offset, int32_t length, int zone) 484 { 485 struct hammer_blockmap_layer1 layer1; 486 struct hammer_blockmap_layer2 layer2; 487 struct hammer_blockmap_layer2 *track2; 488 hammer_off_t result_offset; 489 collect_t collect; 490 int error; 491 492 result_offset = blockmap_lookup(offset, &layer1, &layer2, &error); 493 if (AssertOnFailure) { 494 assert(HAMMER_ZONE_DECODE(offset) == zone); 495 assert(HAMMER_ZONE_DECODE(result_offset) == 496 HAMMER_ZONE_RAW_BUFFER_INDEX); 497 assert(error == 0); 498 } 499 collect = collect_get(layer1.phys_offset); /* layer2 address */ 500 track2 = collect_get_track(collect, result_offset, zone, &layer2); 501 track2->bytes_free -= length; 502 } 503 504 static 505 collect_t 506 collect_get(hammer_off_t phys_offset) 507 { 508 collect_t collect; 509 510 collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset); 511 if (collect) 512 return(collect); 513 514 collect = calloc(sizeof(*collect), 1); 515 collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */ 516 collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */ 517 collect->offsets = malloc(sizeof(hammer_off_t) * HAMMER_BLOCKMAP_RADIX2); 518 collect->phys_offset = phys_offset; 519 RB_INSERT(collect_rb_tree, &CollectTree, collect); 520 bzero(collect->track2, HAMMER_BIGBLOCK_SIZE); 521 bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE); 522 523 return (collect); 524 } 525 526 static 527 void 528 collect_rel(collect_t collect) 529 { 530 free(collect->offsets); 531 free(collect->layer2); 532 free(collect->track2); 533 free(collect); 534 } 535 536 static 537 struct hammer_blockmap_layer2 * 538 collect_get_track(collect_t collect, hammer_off_t offset, int zone, 539 struct hammer_blockmap_layer2 *layer2) 540 { 541 struct hammer_blockmap_layer2 *track2; 542 size_t i; 543 544 i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset); 545 track2 = &collect->track2[i]; 546 if (track2->entry_crc == 0) { 547 collect->layer2[i] = *layer2; 548 collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64; 549 track2->zone = zone; 550 track2->bytes_free = HAMMER_BIGBLOCK_SIZE; 551 track2->entry_crc = 1; /* steal field to tag track load */ 552 } 553 return (track2); 554 } 555 556 static 557 void 558 dump_collect_table(void) 559 { 560 collect_t collect; 561 int error = 0; 562 struct zone_stat *stats = NULL; 563 564 if (VerboseOpt) 565 stats = hammer_init_zone_stat(); 566 567 RB_FOREACH(collect, collect_rb_tree, &CollectTree) { 568 dump_collect(collect, stats); 569 error += collect->error; 570 } 571 572 while ((collect = RB_ROOT(&CollectTree)) != NULL) { 573 RB_REMOVE(collect_rb_tree, &CollectTree, collect); 574 collect_rel(collect); 575 } 576 assert(RB_EMPTY(&CollectTree)); 577 578 if (VerboseOpt) { 579 hammer_print_zone_stat(stats); 580 hammer_cleanup_zone_stat(stats); 581 } 582 583 if (num_bad_node || VerboseOpt) { 584 printf("%d bad nodes\n", num_bad_node); 585 } 586 if (error || VerboseOpt) { 587 printf("%d errors\n", error); 588 } 589 } 590 591 static 592 void 593 dump_collect(collect_t collect, struct zone_stat *stats) 594 { 595 struct hammer_blockmap_layer2 *track2; 596 struct hammer_blockmap_layer2 *layer2; 597 hammer_off_t offset; 598 int i, zone; 599 600 for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) { 601 track2 = &collect->track2[i]; 602 layer2 = &collect->layer2[i]; 603 offset = collect->offsets[i]; 604 605 /* 606 * Check big-blocks referenced by freemap, data, 607 * B-Tree nodes and UNDO fifo. 608 */ 609 if (track2->entry_crc == 0) 610 continue; 611 612 zone = layer2->zone; 613 if (AssertOnFailure) { 614 assert((zone == HAMMER_ZONE_UNDO_INDEX) || 615 (zone == HAMMER_ZONE_FREEMAP_INDEX) || 616 hammer_is_zone2_mapped_index(zone)); 617 } 618 if (VerboseOpt) 619 hammer_add_zone_stat_layer2(stats, layer2); 620 621 if (track2->zone != layer2->zone) { 622 printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n", 623 (intmax_t)offset, 624 track2->zone, 625 layer2->zone); 626 collect->error++; 627 } else if (track2->bytes_free != layer2->bytes_free) { 628 printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n", 629 (intmax_t)offset, 630 layer2->zone, 631 track2->bytes_free, 632 layer2->bytes_free); 633 collect->error++; 634 } else if (VerboseOpt) { 635 printf("\tblock=%016jx zone=%-2d %d free (correct)\n", 636 (intmax_t)offset, 637 layer2->zone, 638 track2->bytes_free); 639 } 640 } 641 } 642