1 /* 2 * Copyright (c) 2010 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include "hammer.h" 36 37 #include <sys/tree.h> 38 39 struct recover_dict { 40 struct recover_dict *next; 41 struct recover_dict *parent; 42 int64_t obj_id; 43 uint8_t obj_type; 44 uint8_t flags; 45 uint16_t pfs_id; 46 int64_t size; 47 char *name; 48 }; 49 50 #define DICTF_MADEDIR 0x01 51 #define DICTF_MADEFILE 0x02 52 #define DICTF_PARENT 0x04 /* parent attached for real */ 53 #define DICTF_TRAVERSED 0x80 54 55 typedef struct bigblock { 56 RB_ENTRY(bigblock) entry; 57 hammer_off_t phys_offset; /* zone-2 */ 58 struct hammer_blockmap_layer1 layer1; 59 struct hammer_blockmap_layer2 layer2; 60 } *bigblock_t; 61 62 static void recover_top(char *ptr, hammer_off_t offset); 63 static void recover_elm(hammer_btree_leaf_elm_t leaf); 64 static struct recover_dict *get_dict(int64_t obj_id, uint16_t pfs_id); 65 static char *recover_path(struct recover_dict *dict); 66 static void sanitize_string(char *str); 67 static hammer_off_t scan_raw_limit(void); 68 static void scan_bigblocks(int target_zone); 69 static void free_bigblocks(void); 70 static void add_bigblock_entry(hammer_off_t offset, 71 hammer_blockmap_layer1_t layer1, hammer_blockmap_layer2_t layer2); 72 static bigblock_t get_bigblock_entry(hammer_off_t offset); 73 74 static const char *TargetDir; 75 static int CachedFd = -1; 76 static char *CachedPath; 77 78 static int 79 bigblock_cmp(bigblock_t b1, bigblock_t b2) 80 { 81 if (b1->phys_offset < b2->phys_offset) 82 return(-1); 83 if (b1->phys_offset > b2->phys_offset) 84 return(1); 85 return(0); 86 } 87 88 RB_HEAD(bigblock_rb_tree, bigblock) ZoneTree = RB_INITIALIZER(&ZoneTree); 89 RB_PROTOTYPE2(bigblock_rb_tree, bigblock, entry, bigblock_cmp, hammer_off_t); 90 RB_GENERATE2(bigblock_rb_tree, bigblock, entry, bigblock_cmp, hammer_off_t, 91 phys_offset); 92 93 /* 94 * There was a hidden bug here while iterating zone-2 offset as 95 * shown in an example below. 96 * 97 * If a volume was once used as HAMMER filesystem which consists of 98 * multiple volumes whose usage has reached beyond the first volume, 99 * and then later re-formatted only using 1 volume, hammer recover is 100 * likely to hit assertion in get_buffer() due to having access to 101 * invalid volume (vol1,2,...) from old filesystem data. 102 * 103 * To avoid this, now the command only scans upto the last big-block 104 * that's actually used for filesystem data or meta-data at the moment, 105 * if all layer1/2 entries have correct CRC values. This also avoids 106 * recovery of irrelevant files from old filesystem. 107 * 108 * It also doesn't scan beyond append offset of big-blocks in B-Tree 109 * zone to avoid recovery of irrelevant files from old filesystem, 110 * if layer1/2 entries for those big-blocks have correct CRC values. 111 * 112 * |-----vol0-----|-----vol1-----|-----vol2-----| old filesystem 113 * <-----------------------> used by old filesystem 114 * 115 * |-----vol0-----| new filesystem 116 * <-----> used by new filesystem 117 * <-------> unused, invalid data from old filesystem 118 * <-> B-Tree nodes likely to point to vol1 119 */ 120 121 void 122 hammer_cmd_recover(char **av, int ac) 123 { 124 buffer_info_t data_buffer; 125 volume_info_t volume; 126 bigblock_t b = NULL; 127 hammer_off_t off; 128 hammer_off_t off_end; 129 hammer_off_t off_blk; 130 hammer_off_t raw_limit = 0; 131 hammer_off_t zone_limit = 0; 132 char *ptr; 133 int i; 134 int target_zone = HAMMER_ZONE_BTREE_INDEX; 135 int full = 0; 136 int quick = 0; 137 138 if (ac < 1) { 139 errx(1, "hammer recover <target_dir> [full|quick]"); 140 /* not reached */ 141 } 142 143 TargetDir = av[0]; 144 if (ac > 1) { 145 if (!strcmp(av[1], "full")) 146 full = 1; 147 if (!strcmp(av[1], "quick")) 148 quick = 1; 149 } 150 assert(!full || !quick); 151 152 if (mkdir(TargetDir, 0777) == -1) { 153 if (errno != EEXIST) { 154 err(1, "mkdir"); 155 /* not reached */ 156 } 157 } 158 159 printf("Running %sraw scan of HAMMER image, recovering to %s\n", 160 full ? "full " : quick ? "quick " : "", 161 TargetDir); 162 163 if (!full) { 164 scan_bigblocks(target_zone); 165 raw_limit = scan_raw_limit(); 166 if (raw_limit) { 167 raw_limit += HAMMER_BIGBLOCK_SIZE; 168 assert(hammer_is_zone_raw_buffer(raw_limit)); 169 } 170 } 171 172 if (quick) { 173 assert(!full); 174 if (!RB_EMPTY(&ZoneTree)) { 175 printf("Found zone-%d big-blocks at\n", target_zone); 176 RB_FOREACH(b, bigblock_rb_tree, &ZoneTree) 177 printf("%016jx\n", b->phys_offset); 178 179 b = RB_MAX(bigblock_rb_tree, &ZoneTree); 180 zone_limit = b->phys_offset + HAMMER_BIGBLOCK_SIZE; 181 assert(hammer_is_zone_raw_buffer(zone_limit)); 182 } 183 } 184 185 if (raw_limit || zone_limit) { 186 #define _fmt "Scanning zone-%d big-blocks till %016jx" 187 if (!raw_limit) /* unlikely */ 188 printf(_fmt" ???", target_zone, zone_limit); 189 else if (!zone_limit) 190 printf(_fmt, HAMMER_ZONE_RAW_BUFFER_INDEX, raw_limit); 191 else if (raw_limit >= zone_limit) 192 printf(_fmt, target_zone, zone_limit); 193 else /* unlikely */ 194 printf(_fmt" ???", HAMMER_ZONE_RAW_BUFFER_INDEX, raw_limit); 195 printf("\n"); 196 } 197 198 data_buffer = NULL; 199 for (i = 0; i < HAMMER_MAX_VOLUMES; i++) { 200 volume = get_volume(i); 201 if (volume == NULL) 202 continue; 203 204 printf("Scanning volume %d size %s\n", 205 volume->vol_no, sizetostr(volume->size)); 206 off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0); 207 off_end = off + HAMMER_VOL_BUF_SIZE(volume->ondisk); 208 209 while (off < off_end) { 210 off_blk = off & HAMMER_BIGBLOCK_MASK64; 211 if (off_blk == 0) 212 b = get_bigblock_entry(off); 213 214 if (raw_limit) { 215 if (off >= raw_limit) { 216 printf("Done %016jx\n", (uintmax_t)off); 217 goto end; 218 } 219 } 220 if (zone_limit) { 221 if (off >= zone_limit) { 222 printf("Done %016jx\n", (uintmax_t)off); 223 goto end; 224 } 225 if (b == NULL) { 226 off = HAMMER_ZONE_LAYER2_NEXT_OFFSET(off); 227 continue; 228 } 229 } 230 231 if (b) { 232 if (hammer_crc_test_layer1(HammerVersion, 233 &b->layer1) && 234 hammer_crc_test_layer2(HammerVersion, 235 &b->layer2) && 236 off_blk >= b->layer2.append_off) { 237 off = HAMMER_ZONE_LAYER2_NEXT_OFFSET(off); 238 continue; 239 } 240 } 241 242 ptr = get_buffer_data(off, &data_buffer, 0); 243 if (ptr) 244 recover_top(ptr, off); 245 off += HAMMER_BUFSIZE; 246 } 247 } 248 end: 249 rel_buffer(data_buffer); 250 free_bigblocks(); 251 252 if (CachedPath) { 253 free(CachedPath); 254 close(CachedFd); 255 CachedPath = NULL; 256 CachedFd = -1; 257 } 258 } 259 260 static __inline 261 void 262 print_node(hammer_node_ondisk_t node, hammer_off_t offset) 263 { 264 char buf[HAMMER_BTREE_LEAF_ELMS + 1]; 265 int maxcount = hammer_node_max_elements(node->type); 266 int i; 267 268 for (i = 0; i < node->count && i < maxcount; ++i) 269 buf[i] = hammer_elm_btype(&node->elms[i]); 270 buf[i] = '\0'; 271 272 printf("%016jx %c %d %s\n", offset, node->type, node->count, buf); 273 } 274 275 /* 276 * Top level recovery processor. Assume the data is a B-Tree node. 277 * If the CRC is good we attempt to process the node, building the 278 * object space and creating the dictionary as we go. 279 */ 280 static 281 void 282 recover_top(char *ptr, hammer_off_t offset) 283 { 284 hammer_node_ondisk_t node; 285 hammer_btree_elm_t elm; 286 int maxcount; 287 int i; 288 int isnode; 289 290 for (node = (void *)ptr; (char *)node < ptr + HAMMER_BUFSIZE; ++node) { 291 isnode = hammer_crc_test_btree(HammerVersion, node); 292 maxcount = hammer_node_max_elements(node->type); 293 294 if (DebugOpt) { 295 if (isnode) 296 print_node(node, offset); 297 else if (DebugOpt > 1) 298 printf("%016jx -\n", offset); 299 } 300 offset += sizeof(*node); 301 302 if (isnode && node->type == HAMMER_BTREE_TYPE_LEAF) { 303 for (i = 0; i < node->count && i < maxcount; ++i) { 304 elm = &node->elms[i]; 305 if (elm->base.btype == HAMMER_BTREE_TYPE_RECORD) 306 recover_elm(&elm->leaf); 307 } 308 } 309 } 310 } 311 312 static 313 void 314 recover_elm(hammer_btree_leaf_elm_t leaf) 315 { 316 buffer_info_t data_buffer = NULL; 317 struct recover_dict *dict; 318 struct recover_dict *dict2; 319 hammer_data_ondisk_t ondisk; 320 hammer_off_t data_offset; 321 struct stat st; 322 int chunk; 323 int len; 324 int zfill; 325 int64_t file_offset; 326 uint16_t pfs_id; 327 size_t nlen; 328 int fd; 329 char *name; 330 char *path1; 331 char *path2; 332 333 /* 334 * Ignore deleted records 335 */ 336 if (leaf->delete_ts) 337 return; 338 339 /* 340 * If we're running full scan, it's possible that data_offset 341 * refers to old filesystem data that we can't physically access. 342 */ 343 data_offset = leaf->data_offset; 344 if (get_volume(HAMMER_VOL_DECODE(data_offset)) == NULL) 345 return; 346 347 if (data_offset != 0) 348 ondisk = get_buffer_data(data_offset, &data_buffer, 0); 349 else 350 ondisk = NULL; 351 if (ondisk == NULL) 352 goto done; 353 354 len = leaf->data_len; 355 chunk = HAMMER_BUFSIZE - ((int)data_offset & HAMMER_BUFMASK); 356 if (chunk > len) 357 chunk = len; 358 359 if (len < 0 || len > HAMMER_XBUFSIZE || len > chunk) 360 goto done; 361 362 pfs_id = lo_to_pfs(leaf->base.localization); 363 364 /* 365 * Note that meaning of leaf->base.obj_id differs depending 366 * on record type. For a direntry, leaf->base.obj_id points 367 * to its parent inode that this entry is a part of, but not 368 * its corresponding inode. 369 */ 370 dict = get_dict(leaf->base.obj_id, pfs_id); 371 372 switch(leaf->base.rec_type) { 373 case HAMMER_RECTYPE_INODE: 374 /* 375 * We found an inode which also tells us where the file 376 * or directory is in the directory hierarchy. 377 */ 378 if (VerboseOpt) { 379 printf("inode %016jx:%05d found\n", 380 (uintmax_t)leaf->base.obj_id, pfs_id); 381 } 382 path1 = recover_path(dict); 383 384 /* 385 * Attach the inode to its parent. This isn't strictly 386 * necessary because the information is also in the 387 * directory entries, but if we do not find the directory 388 * entry this ensures that the files will still be 389 * reasonably well organized in their proper directories. 390 */ 391 if ((dict->flags & DICTF_PARENT) == 0 && 392 dict->obj_id != HAMMER_OBJID_ROOT && 393 ondisk->inode.parent_obj_id != 0) { 394 dict->flags |= DICTF_PARENT; 395 dict->parent = get_dict(ondisk->inode.parent_obj_id, 396 pfs_id); 397 if (dict->parent && 398 (dict->parent->flags & DICTF_MADEDIR) == 0) { 399 dict->parent->flags |= DICTF_MADEDIR; 400 path2 = recover_path(dict->parent); 401 printf("mkdir %s\n", path2); 402 mkdir(path2, 0777); 403 free(path2); 404 path2 = NULL; 405 } 406 } 407 if (dict->obj_type == 0) 408 dict->obj_type = ondisk->inode.obj_type; 409 dict->size = ondisk->inode.size; 410 path2 = recover_path(dict); 411 412 if (lstat(path1, &st) == 0) { 413 if (ondisk->inode.obj_type == HAMMER_OBJTYPE_REGFILE) { 414 truncate(path1, dict->size); 415 /* chmod(path1, 0666); */ 416 } 417 if (strcmp(path1, path2)) { 418 printf("Rename (inode) %s -> %s\n", path1, path2); 419 rename(path1, path2); 420 } 421 } else if (ondisk->inode.obj_type == HAMMER_OBJTYPE_REGFILE) { 422 printf("mkinode (file) %s\n", path2); 423 fd = open(path2, O_RDWR|O_CREAT, 0666); 424 if (fd > 0) 425 close(fd); 426 } else if (ondisk->inode.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 427 printf("mkinode (dir) %s\n", path2); 428 mkdir(path2, 0777); 429 dict->flags |= DICTF_MADEDIR; 430 } 431 free(path1); 432 free(path2); 433 break; 434 case HAMMER_RECTYPE_DATA: 435 /* 436 * File record data 437 */ 438 if (leaf->base.obj_id == 0) 439 break; 440 if (VerboseOpt) { 441 printf("inode %016jx:%05d data %016jx,%d\n", 442 (uintmax_t)leaf->base.obj_id, 443 pfs_id, 444 (uintmax_t)leaf->base.key - len, 445 len); 446 } 447 448 /* 449 * Update the dictionary entry 450 */ 451 if (dict->obj_type == 0) 452 dict->obj_type = HAMMER_OBJTYPE_REGFILE; 453 454 /* 455 * If the parent directory has not been created we 456 * have to create it (typically a PFS%05d) 457 */ 458 if (dict->parent && 459 (dict->parent->flags & DICTF_MADEDIR) == 0) { 460 dict->parent->flags |= DICTF_MADEDIR; 461 path2 = recover_path(dict->parent); 462 printf("mkdir %s\n", path2); 463 mkdir(path2, 0777); 464 free(path2); 465 path2 = NULL; 466 } 467 468 /* 469 * Create the file if necessary, report file creations 470 */ 471 path1 = recover_path(dict); 472 if (CachedPath && strcmp(CachedPath, path1) == 0) 473 fd = CachedFd; 474 else 475 fd = open(path1, O_CREAT|O_RDWR, 0666); 476 if (fd < 0) { 477 printf("Unable to create %s: %s\n", 478 path1, strerror(errno)); 479 free(path1); 480 break; 481 } 482 if ((dict->flags & DICTF_MADEFILE) == 0) { 483 dict->flags |= DICTF_MADEFILE; 484 printf("mkfile %s\n", path1); 485 } 486 487 /* 488 * And write the record. A HAMMER data block is aligned 489 * and may contain trailing zeros after the file EOF. The 490 * inode record is required to get the actual file size. 491 * 492 * However, when the inode record is not available 493 * we can do a sparse write and that will get it right 494 * most of the time even if the inode record is never 495 * found. 496 */ 497 file_offset = (int64_t)leaf->base.key - len; 498 lseek(fd, (off_t)file_offset, SEEK_SET); 499 while (len) { 500 if (dict->size == -1) { 501 for (zfill = chunk - 1; zfill >= 0; --zfill) { 502 if (((char *)ondisk)[zfill]) 503 break; 504 } 505 ++zfill; 506 } else { 507 zfill = chunk; 508 } 509 510 if (zfill) 511 write(fd, ondisk, zfill); 512 if (zfill < chunk) 513 lseek(fd, chunk - zfill, SEEK_CUR); 514 515 len -= chunk; 516 data_offset += chunk; 517 file_offset += chunk; 518 ondisk = get_buffer_data(data_offset, &data_buffer, 0); 519 if (ondisk == NULL) 520 break; 521 chunk = HAMMER_BUFSIZE - 522 ((int)data_offset & HAMMER_BUFMASK); 523 if (chunk > len) 524 chunk = len; 525 } 526 if (dict->size >= 0 && file_offset > dict->size) { 527 ftruncate(fd, dict->size); 528 /* fchmod(fd, 0666); */ 529 } 530 531 if (fd == CachedFd) { 532 free(path1); 533 } else if (CachedPath) { 534 free(CachedPath); 535 close(CachedFd); 536 CachedPath = path1; 537 CachedFd = fd; 538 } else { 539 CachedPath = path1; 540 CachedFd = fd; 541 } 542 break; 543 case HAMMER_RECTYPE_DIRENTRY: 544 nlen = len - HAMMER_ENTRY_NAME_OFF; 545 if ((int)nlen < 0) /* illegal length */ 546 break; 547 if (ondisk->entry.obj_id == 0 || 548 ondisk->entry.obj_id == HAMMER_OBJID_ROOT) { 549 break; 550 } 551 name = malloc(nlen + 1); 552 bcopy(ondisk->entry.name, name, nlen); 553 name[nlen] = 0; 554 sanitize_string(name); 555 556 if (VerboseOpt) { 557 printf("dir %016jx:%05d entry %016jx \"%s\"\n", 558 (uintmax_t)leaf->base.obj_id, 559 pfs_id, 560 (uintmax_t)ondisk->entry.obj_id, 561 name); 562 } 563 564 /* 565 * We can't deal with hardlinks so if the object already 566 * has a name assigned to it we just keep using that name. 567 */ 568 dict2 = get_dict(ondisk->entry.obj_id, pfs_id); 569 path1 = recover_path(dict2); 570 571 if (dict2->name == NULL) 572 dict2->name = name; 573 else 574 free(name); 575 576 /* 577 * Attach dict2 to its directory (dict), create the 578 * directory (dict) if necessary. We must ensure 579 * that the directory entry exists in order to be 580 * able to properly rename() the file without creating 581 * a namespace conflict. 582 */ 583 if ((dict2->flags & DICTF_PARENT) == 0) { 584 dict2->flags |= DICTF_PARENT; 585 dict2->parent = dict; 586 if ((dict->flags & DICTF_MADEDIR) == 0) { 587 dict->flags |= DICTF_MADEDIR; 588 path2 = recover_path(dict); 589 printf("mkdir %s\n", path2); 590 mkdir(path2, 0777); 591 free(path2); 592 path2 = NULL; 593 } 594 } 595 path2 = recover_path(dict2); 596 if (strcmp(path1, path2) != 0 && lstat(path1, &st) == 0) { 597 printf("Rename (entry) %s -> %s\n", path1, path2); 598 rename(path1, path2); 599 } 600 free(path1); 601 free(path2); 602 break; 603 default: 604 /* 605 * Ignore any other record types 606 */ 607 break; 608 } 609 done: 610 rel_buffer(data_buffer); 611 } 612 613 #define RD_HSIZE 32768 614 #define RD_HMASK (RD_HSIZE - 1) 615 616 struct recover_dict *RDHash[RD_HSIZE]; 617 618 static 619 struct recover_dict * 620 get_dict(int64_t obj_id, uint16_t pfs_id) 621 { 622 struct recover_dict *dict; 623 int i; 624 625 if (obj_id == 0) 626 return(NULL); 627 628 i = crc32(&obj_id, sizeof(obj_id)) & RD_HMASK; 629 for (dict = RDHash[i]; dict; dict = dict->next) { 630 if (dict->obj_id == obj_id && dict->pfs_id == pfs_id) 631 break; 632 } 633 634 if (dict == NULL) { 635 dict = malloc(sizeof(*dict)); 636 bzero(dict, sizeof(*dict)); 637 dict->obj_id = obj_id; 638 dict->pfs_id = pfs_id; 639 dict->next = RDHash[i]; 640 dict->size = -1; 641 RDHash[i] = dict; 642 643 /* 644 * Always connect dangling dictionary entries to object 1 645 * (the root of the PFS). 646 * 647 * DICTF_PARENT will not be set until we know what the 648 * real parent directory object is. 649 */ 650 if (dict->obj_id != HAMMER_OBJID_ROOT) 651 dict->parent = get_dict(HAMMER_OBJID_ROOT, pfs_id); 652 } 653 return(dict); 654 } 655 656 struct path_info { 657 enum { PI_FIGURE, PI_LOAD } state; 658 uint16_t pfs_id; 659 char *base; 660 char *next; 661 int len; 662 }; 663 664 static void recover_path_helper(struct recover_dict *, struct path_info *); 665 666 static 667 char * 668 recover_path(struct recover_dict *dict) 669 { 670 struct path_info info; 671 672 /* Find info.len first */ 673 bzero(&info, sizeof(info)); 674 info.state = PI_FIGURE; 675 recover_path_helper(dict, &info); 676 677 /* Fill in the path */ 678 info.pfs_id = dict->pfs_id; 679 info.base = malloc(info.len); 680 info.next = info.base; 681 info.state = PI_LOAD; 682 recover_path_helper(dict, &info); 683 684 /* Return the path */ 685 return(info.base); 686 } 687 688 #define STRLEN_OBJID 22 /* "obj_0x%016jx" */ 689 #define STRLEN_PFSID 8 /* "PFS%05d" */ 690 691 static 692 void 693 recover_path_helper(struct recover_dict *dict, struct path_info *info) 694 { 695 /* 696 * Calculate path element length 697 */ 698 dict->flags |= DICTF_TRAVERSED; 699 700 switch(info->state) { 701 case PI_FIGURE: 702 if (dict->obj_id == HAMMER_OBJID_ROOT) 703 info->len += STRLEN_PFSID; 704 else if (dict->name) 705 info->len += strlen(dict->name); 706 else 707 info->len += STRLEN_OBJID; 708 ++info->len; 709 710 if (dict->parent && 711 (dict->parent->flags & DICTF_TRAVERSED) == 0) { 712 recover_path_helper(dict->parent, info); 713 } else { 714 info->len += strlen(TargetDir) + 1; 715 } 716 break; 717 case PI_LOAD: 718 if (dict->parent && 719 (dict->parent->flags & DICTF_TRAVERSED) == 0) { 720 recover_path_helper(dict->parent, info); 721 } else { 722 strcpy(info->next, TargetDir); 723 info->next += strlen(info->next); 724 } 725 726 *info->next++ = '/'; 727 if (dict->obj_id == HAMMER_OBJID_ROOT) { 728 snprintf(info->next, STRLEN_PFSID + 1, 729 "PFS%05d", info->pfs_id); 730 } else if (dict->name) { 731 strcpy(info->next, dict->name); 732 } else { 733 snprintf(info->next, STRLEN_OBJID + 1, 734 "obj_0x%016jx", (uintmax_t)dict->obj_id); 735 } 736 info->next += strlen(info->next); 737 break; 738 } 739 dict->flags &= ~DICTF_TRAVERSED; 740 } 741 742 static 743 void 744 sanitize_string(char *str) 745 { 746 while (*str) { 747 if (!isprint(*str)) 748 *str = 'x'; 749 ++str; 750 } 751 } 752 753 static 754 hammer_off_t 755 scan_raw_limit(void) 756 { 757 volume_info_t volume; 758 hammer_blockmap_t rootmap; 759 hammer_blockmap_layer1_t layer1; 760 hammer_blockmap_layer2_t layer2; 761 buffer_info_t buffer1 = NULL; 762 buffer_info_t buffer2 = NULL; 763 hammer_off_t layer1_offset; 764 hammer_off_t layer2_offset; 765 hammer_off_t phys_offset; 766 hammer_off_t block_offset; 767 hammer_off_t offset = 0; 768 int zone = HAMMER_ZONE_FREEMAP_INDEX; 769 770 volume = get_root_volume(); 771 rootmap = &volume->ondisk->vol0_blockmap[zone]; 772 assert(rootmap->phys_offset != 0); 773 774 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0); 775 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK); 776 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 777 /* 778 * Dive layer 1. 779 */ 780 layer1_offset = rootmap->phys_offset + 781 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 782 layer1 = get_buffer_data(layer1_offset, &buffer1, 0); 783 784 if (!hammer_crc_test_layer1(HammerVersion, layer1)) { 785 offset = 0; /* failed */ 786 goto end; 787 } 788 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) 789 continue; 790 791 for (block_offset = 0; 792 block_offset < HAMMER_BLOCKMAP_LAYER2; 793 block_offset += HAMMER_BIGBLOCK_SIZE) { 794 /* 795 * Dive layer 2, each entry represents a big-block. 796 */ 797 layer2_offset = layer1->phys_offset + 798 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset); 799 layer2 = get_buffer_data(layer2_offset, &buffer2, 0); 800 801 if (!hammer_crc_test_layer2(HammerVersion, layer2)) { 802 offset = 0; /* failed */ 803 goto end; 804 } 805 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) { 806 break; 807 } else if (layer2->zone && layer2->zone != zone) { 808 offset = phys_offset + block_offset; 809 } 810 } 811 } 812 end: 813 rel_buffer(buffer1); 814 rel_buffer(buffer2); 815 816 return(hammer_xlate_to_zone2(offset)); 817 } 818 819 static 820 void 821 scan_bigblocks(int target_zone) 822 { 823 volume_info_t volume; 824 hammer_blockmap_t rootmap; 825 hammer_blockmap_layer1_t layer1; 826 hammer_blockmap_layer2_t layer2; 827 buffer_info_t buffer1 = NULL; 828 buffer_info_t buffer2 = NULL; 829 hammer_off_t layer1_offset; 830 hammer_off_t layer2_offset; 831 hammer_off_t phys_offset; 832 hammer_off_t block_offset; 833 hammer_off_t offset = 0; 834 int zone = HAMMER_ZONE_FREEMAP_INDEX; 835 836 volume = get_root_volume(); 837 rootmap = &volume->ondisk->vol0_blockmap[zone]; 838 assert(rootmap->phys_offset != 0); 839 840 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0); 841 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK); 842 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 843 /* 844 * Dive layer 1. 845 */ 846 layer1_offset = rootmap->phys_offset + 847 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 848 layer1 = get_buffer_data(layer1_offset, &buffer1, 0); 849 850 /* 851 if (!hammer_crc_test_layer1(HammerVersion, layer1)) { 852 } 853 */ 854 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) 855 continue; 856 857 for (block_offset = 0; 858 block_offset < HAMMER_BLOCKMAP_LAYER2; 859 block_offset += HAMMER_BIGBLOCK_SIZE) { 860 offset = phys_offset + block_offset; 861 /* 862 * Dive layer 2, each entry represents a big-block. 863 */ 864 layer2_offset = layer1->phys_offset + 865 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset); 866 layer2 = get_buffer_data(layer2_offset, &buffer2, 0); 867 868 /* 869 if (!hammer_crc_test_layer2(HammerVersion, layer2)) { 870 } 871 */ 872 if (layer2->zone == target_zone) { 873 add_bigblock_entry(offset, layer1, layer2); 874 } else if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) { 875 break; 876 } 877 } 878 } 879 rel_buffer(buffer1); 880 rel_buffer(buffer2); 881 } 882 883 static 884 void 885 free_bigblocks(void) 886 { 887 bigblock_t b; 888 889 while ((b = RB_ROOT(&ZoneTree)) != NULL) { 890 RB_REMOVE(bigblock_rb_tree, &ZoneTree, b); 891 free(b); 892 } 893 assert(RB_EMPTY(&ZoneTree)); 894 } 895 896 static 897 void 898 add_bigblock_entry(hammer_off_t offset, 899 hammer_blockmap_layer1_t layer1, hammer_blockmap_layer2_t layer2) 900 { 901 bigblock_t b; 902 903 b = calloc(1, sizeof(*b)); 904 b->phys_offset = hammer_xlate_to_zone2(offset); 905 assert((b->phys_offset & HAMMER_BIGBLOCK_MASK64) == 0); 906 bcopy(layer1, &b->layer1, sizeof(*layer1)); 907 bcopy(layer2, &b->layer2, sizeof(*layer2)); 908 909 RB_INSERT(bigblock_rb_tree, &ZoneTree, b); 910 } 911 912 static 913 bigblock_t 914 get_bigblock_entry(hammer_off_t offset) 915 { 916 bigblock_t b; 917 918 offset = hammer_xlate_to_zone2(offset); 919 offset &= ~HAMMER_BIGBLOCK_MASK64; 920 921 b = RB_LOOKUP(bigblock_rb_tree, &ZoneTree, offset); 922 if (b) 923 return(b); 924 return(NULL); 925 } 926