1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 /* 40 * Each collect covers 1<<(19+23) bytes address space of layer 1. 41 * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1). 42 */ 43 typedef struct collect { 44 RB_ENTRY(collect) entry; 45 hammer_off_t phys_offset; /* layer2 address pointed by layer1 */ 46 hammer_off_t *offsets; /* big-block offset for layer2[i] */ 47 struct hammer_blockmap_layer2 *track2; /* track of layer2 entries */ 48 struct hammer_blockmap_layer2 *layer2; /* 1<<19 x 16 bytes entries */ 49 int error; /* # of inconsistencies */ 50 } *collect_t; 51 52 static int 53 collect_compare(struct collect *c1, struct collect *c2) 54 { 55 if (c1->phys_offset < c2->phys_offset) 56 return(-1); 57 if (c1->phys_offset > c2->phys_offset) 58 return(1); 59 return(0); 60 } 61 62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree); 63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t); 64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t, 65 phys_offset); 66 67 static void dump_blockmap(const char *label, int zone); 68 static void check_freemap(hammer_blockmap_t freemap); 69 static void check_btree_node(hammer_off_t node_offset, int depth); 70 static void check_undo(hammer_blockmap_t undomap); 71 static __inline void collect_btree_root(hammer_off_t node_offset); 72 static __inline void collect_btree_internal(hammer_btree_elm_t elm); 73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm); 74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap); 75 static __inline void collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1); 76 static __inline void collect_undo(hammer_off_t scan_offset, 77 hammer_fifo_head_t head); 78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone); 79 static struct hammer_blockmap_layer2 *collect_get_track( 80 collect_t collect, hammer_off_t offset, int zone, 81 struct hammer_blockmap_layer2 *layer2); 82 static collect_t collect_get(hammer_off_t phys_offset); 83 static void dump_collect_table(void); 84 static void dump_collect(collect_t collect, struct zone_stat *stats); 85 86 void 87 hammer_cmd_blockmap(void) 88 { 89 dump_blockmap("freemap", HAMMER_ZONE_FREEMAP_INDEX); 90 } 91 92 static 93 void 94 dump_blockmap(const char *label, int zone) 95 { 96 struct volume_info *root_volume; 97 hammer_blockmap_t rootmap; 98 struct hammer_blockmap_layer1 *layer1; 99 struct hammer_blockmap_layer2 *layer2; 100 struct buffer_info *buffer1 = NULL; 101 struct buffer_info *buffer2 = NULL; 102 hammer_off_t layer1_offset; 103 hammer_off_t layer2_offset; 104 hammer_off_t scan1; 105 hammer_off_t scan2; 106 struct zone_stat *stats = NULL; 107 int xerr; 108 109 assert(RootVolNo >= 0); 110 root_volume = get_volume(RootVolNo); 111 rootmap = &root_volume->ondisk->vol0_blockmap[zone]; 112 assert(rootmap->phys_offset != 0); 113 114 printf("zone %-16s phys %016jx first %016jx next %016jx alloc %016jx\n", 115 label, 116 (uintmax_t)rootmap->phys_offset, 117 (uintmax_t)rootmap->first_offset, 118 (uintmax_t)rootmap->next_offset, 119 (uintmax_t)rootmap->alloc_offset); 120 121 if (VerboseOpt) 122 stats = hammer_init_zone_stat(); 123 124 for (scan1 = HAMMER_ZONE_ENCODE(zone, 0); 125 scan1 < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK); 126 scan1 += HAMMER_BLOCKMAP_LAYER2) { 127 /* 128 * Dive layer 1. 129 */ 130 layer1_offset = rootmap->phys_offset + 131 HAMMER_BLOCKMAP_LAYER1_OFFSET(scan1); 132 layer1 = get_buffer_data(layer1_offset, &buffer1, 0); 133 xerr = ' '; 134 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) 135 xerr = 'B'; 136 if (xerr == ' ' && 137 layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) { 138 continue; 139 } 140 printf("%c layer1 %016jx @%016jx blocks-free %jd\n", 141 xerr, 142 (uintmax_t)scan1, 143 (uintmax_t)layer1->phys_offset, 144 (intmax_t)layer1->blocks_free); 145 if (layer1->phys_offset == HAMMER_BLOCKMAP_FREE) 146 continue; 147 for (scan2 = scan1; 148 scan2 < scan1 + HAMMER_BLOCKMAP_LAYER2; 149 scan2 += HAMMER_BIGBLOCK_SIZE 150 ) { 151 /* 152 * Dive layer 2, each entry represents a big-block. 153 */ 154 layer2_offset = layer1->phys_offset + 155 HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2); 156 layer2 = get_buffer_data(layer2_offset, &buffer2, 0); 157 xerr = ' '; 158 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) 159 xerr = 'B'; 160 printf("%c %016jx zone=%-2d ", 161 xerr, 162 (uintmax_t)scan2, 163 layer2->zone); 164 if (VerboseOpt > 1) 165 printf("vol=%-3d L1=%-7lu L2=%-7lu ", 166 HAMMER_VOL_DECODE(scan2), 167 HAMMER_BLOCKMAP_LAYER1_OFFSET(scan2), 168 HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2)); 169 else if (VerboseOpt > 0) 170 printf("vol=%-3d L1=%-6lu L2=%-6lu ", 171 HAMMER_VOL_DECODE(scan2), 172 HAMMER_BLOCKMAP_LAYER1_INDEX(scan2), 173 HAMMER_BLOCKMAP_LAYER2_INDEX(scan2)); 174 printf("app=%-7d free=%-7d\n", 175 layer2->append_off, 176 layer2->bytes_free); 177 178 if (VerboseOpt) 179 hammer_add_zone_stat_layer2(stats, layer2); 180 } 181 } 182 rel_buffer(buffer1); 183 rel_buffer(buffer2); 184 rel_volume(root_volume); 185 186 if (VerboseOpt) { 187 hammer_print_zone_stat(stats); 188 hammer_cleanup_zone_stat(stats); 189 } 190 } 191 192 void 193 hammer_cmd_checkmap(void) 194 { 195 struct volume_info *volume; 196 hammer_blockmap_t freemap; 197 hammer_blockmap_t undomap; 198 hammer_off_t node_offset; 199 200 volume = get_volume(RootVolNo); 201 node_offset = volume->ondisk->vol0_btree_root; 202 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 203 undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX]; 204 205 if (QuietOpt < 3) { 206 printf("Volume header\trecords=%jd next_tid=%016jx\n", 207 (intmax_t)volume->ondisk->vol0_stat_records, 208 (uintmax_t)volume->ondisk->vol0_next_tid); 209 printf("\t\tbufoffset=%016jx\n", 210 (uintmax_t)volume->ondisk->vol_buf_beg); 211 printf("\t\tundosize=%jdMB\n", 212 (intmax_t)((undomap->alloc_offset & HAMMER_OFF_LONG_MASK) 213 / (1024 * 1024))); 214 } 215 rel_volume(volume); 216 217 assert(HAMMER_ZONE_UNDO_INDEX < HAMMER_ZONE2_MAPPED_INDEX); 218 assert(HAMMER_ZONE2_MAPPED_INDEX < HAMMER_MAX_ZONES); 219 AssertOnFailure = (DebugOpt != 0); 220 221 printf("Collecting allocation info from freemap: "); 222 fflush(stdout); 223 check_freemap(freemap); 224 printf("done\n"); 225 226 printf("Collecting allocation info from B-Tree: "); 227 fflush(stdout); 228 check_btree_node(node_offset, 0); 229 printf("done\n"); 230 231 printf("Collecting allocation info from UNDO: "); 232 fflush(stdout); 233 check_undo(undomap); 234 printf("done\n"); 235 236 dump_collect_table(); 237 AssertOnFailure = 1; 238 } 239 240 static void 241 check_freemap(hammer_blockmap_t freemap) 242 { 243 hammer_off_t offset; 244 struct buffer_info *buffer1 = NULL; 245 struct hammer_blockmap_layer1 *layer1; 246 int i; 247 248 collect_freemap_layer1(freemap); 249 250 for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) { 251 offset = freemap->phys_offset + i * sizeof(*layer1); 252 layer1 = get_buffer_data(offset, &buffer1, 0); 253 if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL) 254 collect_freemap_layer2(layer1); 255 } 256 rel_buffer(buffer1); 257 } 258 259 static void 260 check_btree_node(hammer_off_t node_offset, int depth) 261 { 262 struct buffer_info *buffer = NULL; 263 hammer_node_ondisk_t node; 264 hammer_btree_elm_t elm; 265 int i; 266 char badc; 267 268 if (depth == 0) 269 collect_btree_root(node_offset); 270 node = get_node(node_offset, &buffer); 271 272 if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) == node->crc) 273 badc = ' '; 274 else 275 badc = 'B'; 276 277 if (badc != ' ') { 278 printf("%c NODE %016jx cnt=%02d p=%016jx " 279 "type=%c depth=%d", 280 badc, 281 (uintmax_t)node_offset, node->count, 282 (uintmax_t)node->parent, 283 (node->type ? node->type : '?'), depth); 284 printf(" mirror %016jx\n", (uintmax_t)node->mirror_tid); 285 } 286 287 for (i = 0; i < node->count; ++i) { 288 elm = &node->elms[i]; 289 290 switch(node->type) { 291 case HAMMER_BTREE_TYPE_INTERNAL: 292 if (elm->internal.subtree_offset) { 293 collect_btree_internal(elm); 294 check_btree_node(elm->internal.subtree_offset, 295 depth + 1); 296 } 297 break; 298 case HAMMER_BTREE_TYPE_LEAF: 299 if (elm->leaf.data_offset) 300 collect_btree_leaf(elm); 301 break; 302 default: 303 if (AssertOnFailure) 304 assert(0); 305 break; 306 } 307 } 308 rel_buffer(buffer); 309 } 310 311 static void 312 check_undo(hammer_blockmap_t undomap) 313 { 314 struct buffer_info *buffer = NULL; 315 hammer_off_t scan_offset; 316 hammer_fifo_head_t head; 317 318 scan_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0); 319 while (scan_offset < undomap->alloc_offset) { 320 head = get_buffer_data(scan_offset, &buffer, 0); 321 switch (head->hdr_type) { 322 case HAMMER_HEAD_TYPE_PAD: 323 case HAMMER_HEAD_TYPE_DUMMY: 324 case HAMMER_HEAD_TYPE_UNDO: 325 case HAMMER_HEAD_TYPE_REDO: 326 collect_undo(scan_offset, head); 327 break; 328 default: 329 if (AssertOnFailure) 330 assert(0); 331 break; 332 } 333 if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) || 334 head->hdr_size == 0 || 335 head->hdr_size > HAMMER_UNDO_ALIGN - 336 ((u_int)scan_offset & HAMMER_UNDO_MASK)) { 337 printf("Illegal size, skipping to next boundary\n"); 338 scan_offset = (scan_offset + HAMMER_UNDO_MASK) & 339 ~HAMMER_UNDO_MASK64; 340 } else { 341 scan_offset += head->hdr_size; 342 } 343 } 344 rel_buffer(buffer); 345 } 346 347 static __inline 348 void 349 collect_freemap_layer1(hammer_blockmap_t freemap) 350 { 351 /* 352 * This translation is necessary to do checkmap properly 353 * as zone4 is really just zone2 address space. 354 */ 355 hammer_off_t zone4_offset = hammer_xlate_to_zoneX( 356 HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset); 357 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE, 358 HAMMER_ZONE_FREEMAP_INDEX); 359 } 360 361 static __inline 362 void 363 collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1) 364 { 365 /* 366 * This translation is necessary to do checkmap properly 367 * as zone4 is really just zone2 address space. 368 */ 369 hammer_off_t zone4_offset = hammer_xlate_to_zoneX( 370 HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset); 371 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE, 372 HAMMER_ZONE_FREEMAP_INDEX); 373 } 374 375 static __inline 376 void 377 collect_btree_root(hammer_off_t node_offset) 378 { 379 collect_blockmap(node_offset, 380 sizeof(struct hammer_node_ondisk), /* 4KB */ 381 HAMMER_ZONE_BTREE_INDEX); 382 } 383 384 static __inline 385 void 386 collect_btree_internal(hammer_btree_elm_t elm) 387 { 388 collect_blockmap(elm->internal.subtree_offset, 389 sizeof(struct hammer_node_ondisk), /* 4KB */ 390 HAMMER_ZONE_BTREE_INDEX); 391 } 392 393 static __inline 394 void 395 collect_btree_leaf(hammer_btree_elm_t elm) 396 { 397 int zone; 398 399 switch (elm->base.rec_type) { 400 case HAMMER_RECTYPE_INODE: 401 case HAMMER_RECTYPE_DIRENTRY: 402 case HAMMER_RECTYPE_EXT: 403 case HAMMER_RECTYPE_FIX: 404 case HAMMER_RECTYPE_PFS: 405 case HAMMER_RECTYPE_SNAPSHOT: 406 case HAMMER_RECTYPE_CONFIG: 407 zone = HAMMER_ZONE_META_INDEX; 408 break; 409 case HAMMER_RECTYPE_DATA: 410 case HAMMER_RECTYPE_DB: 411 /* 412 * There is an exceptional case where HAMMER uses 413 * HAMMER_ZONE_LARGE_DATA when the data length is 414 * >HAMMER_BUFSIZE/2 (not >=HAMMER_BUFSIZE). 415 * This exceptional case is currently being used 416 * by mirror write code, however the following code 417 * can ignore that and simply use the normal way 418 * of selecting a zone using >=HAMMER_BUFSIZE. 419 * See hammer_alloc_data() for details. 420 */ 421 zone = elm->leaf.data_len >= HAMMER_BUFSIZE ? 422 HAMMER_ZONE_LARGE_DATA_INDEX : 423 HAMMER_ZONE_SMALL_DATA_INDEX; 424 break; 425 default: 426 zone = HAMMER_ZONE_UNAVAIL_INDEX; 427 break; 428 } 429 collect_blockmap(elm->leaf.data_offset, 430 (elm->leaf.data_len + 15) & ~15, zone); 431 } 432 433 static __inline 434 void 435 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head) 436 { 437 collect_blockmap(scan_offset, head->hdr_size, 438 HAMMER_ZONE_UNDO_INDEX); 439 } 440 441 static 442 void 443 collect_blockmap(hammer_off_t offset, int32_t length, int zone) 444 { 445 struct hammer_blockmap_layer1 layer1; 446 struct hammer_blockmap_layer2 layer2; 447 struct hammer_blockmap_layer2 *track2; 448 hammer_off_t result_offset; 449 collect_t collect; 450 int error; 451 452 result_offset = blockmap_lookup(offset, &layer1, &layer2, &error); 453 if (AssertOnFailure) { 454 assert(HAMMER_ZONE_DECODE(offset) == zone); 455 assert(HAMMER_ZONE_DECODE(result_offset) == 456 HAMMER_ZONE_RAW_BUFFER_INDEX); 457 assert(error == 0); 458 } 459 collect = collect_get(layer1.phys_offset); /* layer2 address */ 460 track2 = collect_get_track(collect, result_offset, zone, &layer2); 461 track2->bytes_free -= length; 462 } 463 464 static 465 collect_t 466 collect_get(hammer_off_t phys_offset) 467 { 468 collect_t collect; 469 470 collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset); 471 if (collect) 472 return(collect); 473 474 collect = calloc(sizeof(*collect), 1); 475 collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */ 476 collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */ 477 collect->offsets = malloc(sizeof(hammer_off_t) * HAMMER_BLOCKMAP_RADIX2); 478 collect->phys_offset = phys_offset; 479 RB_INSERT(collect_rb_tree, &CollectTree, collect); 480 bzero(collect->track2, HAMMER_BIGBLOCK_SIZE); 481 bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE); 482 483 return (collect); 484 } 485 486 static 487 void 488 collect_rel(collect_t collect) 489 { 490 free(collect->offsets); 491 free(collect->layer2); 492 free(collect->track2); 493 free(collect); 494 } 495 496 static 497 struct hammer_blockmap_layer2 * 498 collect_get_track(collect_t collect, hammer_off_t offset, int zone, 499 struct hammer_blockmap_layer2 *layer2) 500 { 501 struct hammer_blockmap_layer2 *track2; 502 size_t i; 503 504 i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset); 505 track2 = &collect->track2[i]; 506 if (track2->entry_crc == 0) { 507 collect->layer2[i] = *layer2; 508 collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64; 509 track2->zone = zone; 510 track2->bytes_free = HAMMER_BIGBLOCK_SIZE; 511 track2->entry_crc = 1; /* steal field to tag track load */ 512 } 513 return (track2); 514 } 515 516 static 517 void 518 dump_collect_table(void) 519 { 520 collect_t collect; 521 int error = 0; 522 struct zone_stat *stats = NULL; 523 524 if (VerboseOpt) 525 stats = hammer_init_zone_stat(); 526 527 RB_FOREACH(collect, collect_rb_tree, &CollectTree) { 528 dump_collect(collect, stats); 529 error += collect->error; 530 } 531 532 while ((collect = RB_ROOT(&CollectTree)) != NULL) { 533 RB_REMOVE(collect_rb_tree, &CollectTree, collect); 534 collect_rel(collect); 535 } 536 assert(RB_EMPTY(&CollectTree)); 537 538 if (VerboseOpt) { 539 hammer_print_zone_stat(stats); 540 hammer_cleanup_zone_stat(stats); 541 } 542 543 if (error || VerboseOpt) 544 printf("%d errors\n", error); 545 } 546 547 static 548 void 549 dump_collect(collect_t collect, struct zone_stat *stats) 550 { 551 struct hammer_blockmap_layer2 *track2; 552 struct hammer_blockmap_layer2 *layer2; 553 hammer_off_t offset; 554 size_t i; 555 int zone; 556 557 for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) { 558 track2 = &collect->track2[i]; 559 layer2 = &collect->layer2[i]; 560 offset = collect->offsets[i]; 561 562 /* 563 * Check big-blocks referenced by freemap, data, 564 * B-Tree nodes and UNDO fifo. 565 */ 566 if (track2->entry_crc == 0) 567 continue; 568 569 zone = layer2->zone; 570 if (AssertOnFailure) { 571 assert((zone == HAMMER_ZONE_UNDO_INDEX) || 572 (zone == HAMMER_ZONE_FREEMAP_INDEX) || 573 (zone >= HAMMER_ZONE2_MAPPED_INDEX && 574 zone < HAMMER_MAX_ZONES)); 575 } 576 if (VerboseOpt) 577 hammer_add_zone_stat_layer2(stats, layer2); 578 579 if (track2->zone != layer2->zone) { 580 printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n", 581 (intmax_t)offset, 582 track2->zone, 583 layer2->zone); 584 collect->error++; 585 } else if (track2->bytes_free != layer2->bytes_free) { 586 printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n", 587 (intmax_t)offset, 588 layer2->zone, 589 track2->bytes_free, 590 layer2->bytes_free); 591 collect->error++; 592 } else if (VerboseOpt) { 593 printf("\tblock=%016jx zone=%-2d %d free (correct)\n", 594 (intmax_t)offset, 595 layer2->zone, 596 track2->bytes_free); 597 } 598 } 599 } 600