1 /*
2 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include "hammer.h"
36
37 #include <sys/tree.h>
38
39 struct recover_dict {
40 struct recover_dict *next;
41 struct recover_dict *parent;
42 int64_t obj_id;
43 uint8_t obj_type;
44 uint8_t flags;
45 uint16_t pfs_id;
46 int64_t size;
47 char *name;
48 };
49
50 #define DICTF_MADEDIR 0x01
51 #define DICTF_MADEFILE 0x02
52 #define DICTF_PARENT 0x04 /* parent attached for real */
53 #define DICTF_TRAVERSED 0x80
54
55 typedef struct bigblock {
56 RB_ENTRY(bigblock) entry;
57 hammer_off_t phys_offset; /* zone-2 */
58 struct hammer_blockmap_layer1 layer1;
59 struct hammer_blockmap_layer2 layer2;
60 } *bigblock_t;
61
62 static void recover_top(char *ptr, hammer_off_t offset);
63 static void recover_elm(hammer_btree_leaf_elm_t leaf);
64 static struct recover_dict *get_dict(int64_t obj_id, uint16_t pfs_id);
65 static char *recover_path(struct recover_dict *dict);
66 static void sanitize_string(char *str);
67 static hammer_off_t scan_raw_limit(void);
68 static void scan_bigblocks(int target_zone);
69 static void free_bigblocks(void);
70 static void add_bigblock_entry(hammer_off_t offset,
71 hammer_blockmap_layer1_t layer1, hammer_blockmap_layer2_t layer2);
72 static bigblock_t get_bigblock_entry(hammer_off_t offset);
73
74 static const char *TargetDir;
75 static int CachedFd = -1;
76 static char *CachedPath;
77
78 static int
bigblock_cmp(bigblock_t b1,bigblock_t b2)79 bigblock_cmp(bigblock_t b1, bigblock_t b2)
80 {
81 if (b1->phys_offset < b2->phys_offset)
82 return(-1);
83 if (b1->phys_offset > b2->phys_offset)
84 return(1);
85 return(0);
86 }
87
88 static RB_HEAD(bigblock_rb_tree, bigblock) ZoneTree =
89 RB_INITIALIZER(&ZoneTree);
90 RB_PROTOTYPE2(bigblock_rb_tree, bigblock, entry, bigblock_cmp, hammer_off_t);
91 RB_GENERATE2(bigblock_rb_tree, bigblock, entry, bigblock_cmp, hammer_off_t,
92 phys_offset);
93
94 /*
95 * There was a hidden bug here while iterating zone-2 offset as
96 * shown in an example below.
97 *
98 * If a volume was once used as HAMMER filesystem which consists of
99 * multiple volumes whose usage has reached beyond the first volume,
100 * and then later re-formatted only using 1 volume, hammer recover is
101 * likely to hit assertion in get_buffer() due to having access to
102 * invalid volume (vol1,2,...) from old filesystem data.
103 *
104 * To avoid this, now the command only scans upto the last big-block
105 * that's actually used for filesystem data or meta-data at the moment,
106 * if all layer1/2 entries have correct CRC values. This also avoids
107 * recovery of irrelevant files from old filesystem.
108 *
109 * It also doesn't scan beyond append offset of big-blocks in B-Tree
110 * zone to avoid recovery of irrelevant files from old filesystem,
111 * if layer1/2 entries for those big-blocks have correct CRC values.
112 *
113 * |-----vol0-----|-----vol1-----|-----vol2-----| old filesystem
114 * <-----------------------> used by old filesystem
115 *
116 * |-----vol0-----| new filesystem
117 * <-----> used by new filesystem
118 * <-------> unused, invalid data from old filesystem
119 * <-> B-Tree nodes likely to point to vol1
120 */
121
122 void
hammer_cmd_recover(char ** av,int ac)123 hammer_cmd_recover(char **av, int ac)
124 {
125 buffer_info_t data_buffer;
126 volume_info_t volume;
127 bigblock_t b = NULL;
128 hammer_off_t off;
129 hammer_off_t off_end;
130 hammer_off_t off_blk;
131 hammer_off_t raw_limit = 0;
132 hammer_off_t zone_limit = 0;
133 char *ptr;
134 int i;
135 int target_zone = HAMMER_ZONE_BTREE_INDEX;
136 int full = 0;
137 int quick = 0;
138
139 if (ac < 1) {
140 errx(1, "hammer recover <target_dir> [full|quick]");
141 /* not reached */
142 }
143
144 TargetDir = av[0];
145 if (ac > 1) {
146 if (!strcmp(av[1], "full"))
147 full = 1;
148 if (!strcmp(av[1], "quick"))
149 quick = 1;
150 }
151 assert(!full || !quick);
152
153 if (mkdir(TargetDir, 0777) == -1) {
154 if (errno != EEXIST) {
155 err(1, "mkdir");
156 /* not reached */
157 }
158 }
159
160 printf("Running %sraw scan of HAMMER image, recovering to %s\n",
161 full ? "full " : quick ? "quick " : "",
162 TargetDir);
163
164 if (!full) {
165 scan_bigblocks(target_zone);
166 raw_limit = scan_raw_limit();
167 if (raw_limit) {
168 raw_limit += HAMMER_BIGBLOCK_SIZE;
169 assert(hammer_is_zone_raw_buffer(raw_limit));
170 }
171 }
172
173 if (quick) {
174 assert(!full);
175 if (!RB_EMPTY(&ZoneTree)) {
176 printf("Found zone-%d big-blocks at\n", target_zone);
177 RB_FOREACH(b, bigblock_rb_tree, &ZoneTree)
178 printf("%016jx\n", b->phys_offset);
179
180 b = RB_MAX(bigblock_rb_tree, &ZoneTree);
181 zone_limit = b->phys_offset + HAMMER_BIGBLOCK_SIZE;
182 assert(hammer_is_zone_raw_buffer(zone_limit));
183 }
184 }
185
186 if (raw_limit || zone_limit) {
187 #define _fmt "Scanning zone-%d big-blocks till %016jx"
188 if (!raw_limit) /* unlikely */
189 printf(_fmt" ???", target_zone, zone_limit);
190 else if (!zone_limit)
191 printf(_fmt, HAMMER_ZONE_RAW_BUFFER_INDEX, raw_limit);
192 else if (raw_limit >= zone_limit)
193 printf(_fmt, target_zone, zone_limit);
194 else /* unlikely */
195 printf(_fmt" ???", HAMMER_ZONE_RAW_BUFFER_INDEX, raw_limit);
196 printf("\n");
197 }
198
199 data_buffer = NULL;
200 for (i = 0; i < HAMMER_MAX_VOLUMES; i++) {
201 volume = get_volume(i);
202 if (volume == NULL)
203 continue;
204
205 printf("Scanning volume %d size %s\n",
206 volume->vol_no, sizetostr(volume->size));
207 off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
208 off_end = off + HAMMER_VOL_BUF_SIZE(volume->ondisk);
209
210 while (off < off_end) {
211 off_blk = off & HAMMER_BIGBLOCK_MASK64;
212 if (off_blk == 0)
213 b = get_bigblock_entry(off);
214
215 if (raw_limit) {
216 if (off >= raw_limit) {
217 printf("Done %016jx\n", (uintmax_t)off);
218 goto end;
219 }
220 }
221 if (zone_limit) {
222 if (off >= zone_limit) {
223 printf("Done %016jx\n", (uintmax_t)off);
224 goto end;
225 }
226 if (b == NULL) {
227 off = HAMMER_ZONE_LAYER2_NEXT_OFFSET(off);
228 continue;
229 }
230 }
231
232 if (b) {
233 if (hammer_crc_test_layer1(HammerVersion,
234 &b->layer1) &&
235 hammer_crc_test_layer2(HammerVersion,
236 &b->layer2) &&
237 off_blk >= b->layer2.append_off) {
238 off = HAMMER_ZONE_LAYER2_NEXT_OFFSET(off);
239 continue;
240 }
241 }
242
243 ptr = get_buffer_data(off, &data_buffer, 0);
244 if (ptr)
245 recover_top(ptr, off);
246 off += HAMMER_BUFSIZE;
247 }
248 }
249 end:
250 rel_buffer(data_buffer);
251 free_bigblocks();
252
253 if (CachedPath) {
254 free(CachedPath);
255 close(CachedFd);
256 CachedPath = NULL;
257 CachedFd = -1;
258 }
259 }
260
261 static __inline
262 void
print_node(hammer_node_ondisk_t node,hammer_off_t offset)263 print_node(hammer_node_ondisk_t node, hammer_off_t offset)
264 {
265 char buf[HAMMER_BTREE_LEAF_ELMS + 1];
266 int maxcount = hammer_node_max_elements(node->type);
267 int i;
268
269 for (i = 0; i < node->count && i < maxcount; ++i)
270 buf[i] = hammer_elm_btype(&node->elms[i]);
271 buf[i] = '\0';
272
273 printf("%016jx %c %d %s\n", offset, node->type, node->count, buf);
274 }
275
276 /*
277 * Top level recovery processor. Assume the data is a B-Tree node.
278 * If the CRC is good we attempt to process the node, building the
279 * object space and creating the dictionary as we go.
280 */
281 static
282 void
recover_top(char * ptr,hammer_off_t offset)283 recover_top(char *ptr, hammer_off_t offset)
284 {
285 hammer_node_ondisk_t node;
286 hammer_btree_elm_t elm;
287 int maxcount;
288 int i;
289 int isnode;
290
291 for (node = (void *)ptr; (char *)node < ptr + HAMMER_BUFSIZE; ++node) {
292 isnode = hammer_crc_test_btree(HammerVersion, node);
293 maxcount = hammer_node_max_elements(node->type);
294
295 if (DebugOpt) {
296 if (isnode)
297 print_node(node, offset);
298 else if (DebugOpt > 1)
299 printf("%016jx -\n", offset);
300 }
301 offset += sizeof(*node);
302
303 if (isnode && node->type == HAMMER_BTREE_TYPE_LEAF) {
304 for (i = 0; i < node->count && i < maxcount; ++i) {
305 elm = &node->elms[i];
306 if (elm->base.btype == HAMMER_BTREE_TYPE_RECORD)
307 recover_elm(&elm->leaf);
308 }
309 }
310 }
311 }
312
313 static
314 void
recover_elm(hammer_btree_leaf_elm_t leaf)315 recover_elm(hammer_btree_leaf_elm_t leaf)
316 {
317 buffer_info_t data_buffer = NULL;
318 struct recover_dict *dict;
319 struct recover_dict *dict2;
320 hammer_data_ondisk_t ondisk;
321 hammer_off_t data_offset;
322 struct stat st;
323 int chunk;
324 int len;
325 int zfill;
326 int64_t file_offset;
327 uint16_t pfs_id;
328 size_t nlen;
329 int fd;
330 char *name;
331 char *path1;
332 char *path2;
333
334 /*
335 * Ignore deleted records
336 */
337 if (leaf->delete_ts)
338 return;
339
340 /*
341 * If we're running full scan, it's possible that data_offset
342 * refers to old filesystem data that we can't physically access.
343 */
344 data_offset = leaf->data_offset;
345 if (get_volume(HAMMER_VOL_DECODE(data_offset)) == NULL)
346 return;
347
348 if (data_offset != 0)
349 ondisk = get_buffer_data(data_offset, &data_buffer, 0);
350 else
351 ondisk = NULL;
352 if (ondisk == NULL)
353 goto done;
354
355 len = leaf->data_len;
356 chunk = HAMMER_BUFSIZE - ((int)data_offset & HAMMER_BUFMASK);
357 if (chunk > len)
358 chunk = len;
359
360 if (len < 0 || len > HAMMER_XBUFSIZE || len > chunk)
361 goto done;
362
363 pfs_id = lo_to_pfs(leaf->base.localization);
364
365 /*
366 * Note that meaning of leaf->base.obj_id differs depending
367 * on record type. For a direntry, leaf->base.obj_id points
368 * to its parent inode that this entry is a part of, but not
369 * its corresponding inode.
370 */
371 dict = get_dict(leaf->base.obj_id, pfs_id);
372
373 switch(leaf->base.rec_type) {
374 case HAMMER_RECTYPE_INODE:
375 /*
376 * We found an inode which also tells us where the file
377 * or directory is in the directory hierarchy.
378 */
379 if (VerboseOpt) {
380 printf("inode %016jx:%05d found\n",
381 (uintmax_t)leaf->base.obj_id, pfs_id);
382 }
383 path1 = recover_path(dict);
384
385 /*
386 * Attach the inode to its parent. This isn't strictly
387 * necessary because the information is also in the
388 * directory entries, but if we do not find the directory
389 * entry this ensures that the files will still be
390 * reasonably well organized in their proper directories.
391 */
392 if ((dict->flags & DICTF_PARENT) == 0 &&
393 dict->obj_id != HAMMER_OBJID_ROOT &&
394 ondisk->inode.parent_obj_id != 0) {
395 dict->flags |= DICTF_PARENT;
396 dict->parent = get_dict(ondisk->inode.parent_obj_id,
397 pfs_id);
398 if (dict->parent &&
399 (dict->parent->flags & DICTF_MADEDIR) == 0) {
400 dict->parent->flags |= DICTF_MADEDIR;
401 path2 = recover_path(dict->parent);
402 printf("mkdir %s\n", path2);
403 mkdir(path2, 0777);
404 free(path2);
405 path2 = NULL;
406 }
407 }
408 if (dict->obj_type == 0)
409 dict->obj_type = ondisk->inode.obj_type;
410 dict->size = ondisk->inode.size;
411 path2 = recover_path(dict);
412
413 if (lstat(path1, &st) == 0) {
414 if (ondisk->inode.obj_type == HAMMER_OBJTYPE_REGFILE) {
415 truncate(path1, dict->size);
416 /* chmod(path1, 0666); */
417 }
418 if (strcmp(path1, path2)) {
419 printf("Rename (inode) %s -> %s\n", path1, path2);
420 rename(path1, path2);
421 }
422 } else if (ondisk->inode.obj_type == HAMMER_OBJTYPE_REGFILE) {
423 printf("mkinode (file) %s\n", path2);
424 fd = open(path2, O_RDWR|O_CREAT, 0666);
425 if (fd > 0)
426 close(fd);
427 } else if (ondisk->inode.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
428 printf("mkinode (dir) %s\n", path2);
429 mkdir(path2, 0777);
430 dict->flags |= DICTF_MADEDIR;
431 }
432 free(path1);
433 free(path2);
434 break;
435 case HAMMER_RECTYPE_DATA:
436 /*
437 * File record data
438 */
439 if (leaf->base.obj_id == 0)
440 break;
441 if (VerboseOpt) {
442 printf("inode %016jx:%05d data %016jx,%d\n",
443 (uintmax_t)leaf->base.obj_id,
444 pfs_id,
445 (uintmax_t)leaf->base.key - len,
446 len);
447 }
448
449 /*
450 * Update the dictionary entry
451 */
452 if (dict->obj_type == 0)
453 dict->obj_type = HAMMER_OBJTYPE_REGFILE;
454
455 /*
456 * If the parent directory has not been created we
457 * have to create it (typically a PFS%05d)
458 */
459 if (dict->parent &&
460 (dict->parent->flags & DICTF_MADEDIR) == 0) {
461 dict->parent->flags |= DICTF_MADEDIR;
462 path2 = recover_path(dict->parent);
463 printf("mkdir %s\n", path2);
464 mkdir(path2, 0777);
465 free(path2);
466 path2 = NULL;
467 }
468
469 /*
470 * Create the file if necessary, report file creations
471 */
472 path1 = recover_path(dict);
473 if (CachedPath && strcmp(CachedPath, path1) == 0)
474 fd = CachedFd;
475 else
476 fd = open(path1, O_CREAT|O_RDWR, 0666);
477 if (fd < 0) {
478 printf("Unable to create %s: %s\n",
479 path1, strerror(errno));
480 free(path1);
481 break;
482 }
483 if ((dict->flags & DICTF_MADEFILE) == 0) {
484 dict->flags |= DICTF_MADEFILE;
485 printf("mkfile %s\n", path1);
486 }
487
488 /*
489 * And write the record. A HAMMER data block is aligned
490 * and may contain trailing zeros after the file EOF. The
491 * inode record is required to get the actual file size.
492 *
493 * However, when the inode record is not available
494 * we can do a sparse write and that will get it right
495 * most of the time even if the inode record is never
496 * found.
497 */
498 file_offset = (int64_t)leaf->base.key - len;
499 lseek(fd, (off_t)file_offset, SEEK_SET);
500 while (len) {
501 if (dict->size == -1) {
502 for (zfill = chunk - 1; zfill >= 0; --zfill) {
503 if (((char *)ondisk)[zfill])
504 break;
505 }
506 ++zfill;
507 } else {
508 zfill = chunk;
509 }
510
511 if (zfill)
512 write(fd, ondisk, zfill);
513 if (zfill < chunk)
514 lseek(fd, chunk - zfill, SEEK_CUR);
515
516 len -= chunk;
517 data_offset += chunk;
518 file_offset += chunk;
519 ondisk = get_buffer_data(data_offset, &data_buffer, 0);
520 if (ondisk == NULL)
521 break;
522 chunk = HAMMER_BUFSIZE -
523 ((int)data_offset & HAMMER_BUFMASK);
524 if (chunk > len)
525 chunk = len;
526 }
527 if (dict->size >= 0 && file_offset > dict->size) {
528 ftruncate(fd, dict->size);
529 /* fchmod(fd, 0666); */
530 }
531
532 if (fd == CachedFd) {
533 free(path1);
534 } else if (CachedPath) {
535 free(CachedPath);
536 close(CachedFd);
537 CachedPath = path1;
538 CachedFd = fd;
539 } else {
540 CachedPath = path1;
541 CachedFd = fd;
542 }
543 break;
544 case HAMMER_RECTYPE_DIRENTRY:
545 nlen = len - HAMMER_ENTRY_NAME_OFF;
546 if ((int)nlen < 0) /* illegal length */
547 break;
548 if (ondisk->entry.obj_id == 0 ||
549 ondisk->entry.obj_id == HAMMER_OBJID_ROOT) {
550 break;
551 }
552 name = malloc(nlen + 1);
553 bcopy(ondisk->entry.name, name, nlen);
554 name[nlen] = 0;
555 sanitize_string(name);
556
557 if (VerboseOpt) {
558 printf("dir %016jx:%05d entry %016jx \"%s\"\n",
559 (uintmax_t)leaf->base.obj_id,
560 pfs_id,
561 (uintmax_t)ondisk->entry.obj_id,
562 name);
563 }
564
565 /*
566 * We can't deal with hardlinks so if the object already
567 * has a name assigned to it we just keep using that name.
568 */
569 dict2 = get_dict(ondisk->entry.obj_id, pfs_id);
570 path1 = recover_path(dict2);
571
572 if (dict2->name == NULL)
573 dict2->name = name;
574 else
575 free(name);
576
577 /*
578 * Attach dict2 to its directory (dict), create the
579 * directory (dict) if necessary. We must ensure
580 * that the directory entry exists in order to be
581 * able to properly rename() the file without creating
582 * a namespace conflict.
583 */
584 if ((dict2->flags & DICTF_PARENT) == 0) {
585 dict2->flags |= DICTF_PARENT;
586 dict2->parent = dict;
587 if ((dict->flags & DICTF_MADEDIR) == 0) {
588 dict->flags |= DICTF_MADEDIR;
589 path2 = recover_path(dict);
590 printf("mkdir %s\n", path2);
591 mkdir(path2, 0777);
592 free(path2);
593 path2 = NULL;
594 }
595 }
596 path2 = recover_path(dict2);
597 if (strcmp(path1, path2) != 0 && lstat(path1, &st) == 0) {
598 printf("Rename (entry) %s -> %s\n", path1, path2);
599 rename(path1, path2);
600 }
601 free(path1);
602 free(path2);
603 break;
604 default:
605 /*
606 * Ignore any other record types
607 */
608 break;
609 }
610 done:
611 rel_buffer(data_buffer);
612 }
613
614 #define RD_HSIZE 32768
615 #define RD_HMASK (RD_HSIZE - 1)
616
617 static struct recover_dict *RDHash[RD_HSIZE];
618
619 static
620 struct recover_dict *
get_dict(int64_t obj_id,uint16_t pfs_id)621 get_dict(int64_t obj_id, uint16_t pfs_id)
622 {
623 struct recover_dict *dict;
624 int i;
625
626 if (obj_id == 0)
627 return(NULL);
628
629 i = crc32(&obj_id, sizeof(obj_id)) & RD_HMASK;
630 for (dict = RDHash[i]; dict; dict = dict->next) {
631 if (dict->obj_id == obj_id && dict->pfs_id == pfs_id)
632 break;
633 }
634
635 if (dict == NULL) {
636 dict = malloc(sizeof(*dict));
637 bzero(dict, sizeof(*dict));
638 dict->obj_id = obj_id;
639 dict->pfs_id = pfs_id;
640 dict->next = RDHash[i];
641 dict->size = -1;
642 RDHash[i] = dict;
643
644 /*
645 * Always connect dangling dictionary entries to object 1
646 * (the root of the PFS).
647 *
648 * DICTF_PARENT will not be set until we know what the
649 * real parent directory object is.
650 */
651 if (dict->obj_id != HAMMER_OBJID_ROOT)
652 dict->parent = get_dict(HAMMER_OBJID_ROOT, pfs_id);
653 }
654 return(dict);
655 }
656
657 struct path_info {
658 enum { PI_FIGURE, PI_LOAD } state;
659 uint16_t pfs_id;
660 char *base;
661 char *next;
662 int len;
663 };
664
665 static void recover_path_helper(struct recover_dict *, struct path_info *);
666
667 static
668 char *
recover_path(struct recover_dict * dict)669 recover_path(struct recover_dict *dict)
670 {
671 struct path_info info;
672
673 /* Find info.len first */
674 bzero(&info, sizeof(info));
675 info.state = PI_FIGURE;
676 recover_path_helper(dict, &info);
677
678 /* Fill in the path */
679 info.pfs_id = dict->pfs_id;
680 info.base = malloc(info.len);
681 info.next = info.base;
682 info.state = PI_LOAD;
683 recover_path_helper(dict, &info);
684
685 /* Return the path */
686 return(info.base);
687 }
688
689 #define STRLEN_OBJID 22 /* "obj_0x%016jx" */
690 #define STRLEN_PFSID 8 /* "PFS%05d" */
691
692 static
693 void
recover_path_helper(struct recover_dict * dict,struct path_info * info)694 recover_path_helper(struct recover_dict *dict, struct path_info *info)
695 {
696 /*
697 * Calculate path element length
698 */
699 dict->flags |= DICTF_TRAVERSED;
700
701 switch(info->state) {
702 case PI_FIGURE:
703 if (dict->obj_id == HAMMER_OBJID_ROOT)
704 info->len += STRLEN_PFSID;
705 else if (dict->name)
706 info->len += strlen(dict->name);
707 else
708 info->len += STRLEN_OBJID;
709 ++info->len;
710
711 if (dict->parent &&
712 (dict->parent->flags & DICTF_TRAVERSED) == 0) {
713 recover_path_helper(dict->parent, info);
714 } else {
715 info->len += strlen(TargetDir) + 1;
716 }
717 break;
718 case PI_LOAD:
719 if (dict->parent &&
720 (dict->parent->flags & DICTF_TRAVERSED) == 0) {
721 recover_path_helper(dict->parent, info);
722 } else {
723 strcpy(info->next, TargetDir);
724 info->next += strlen(info->next);
725 }
726
727 *info->next++ = '/';
728 if (dict->obj_id == HAMMER_OBJID_ROOT) {
729 snprintf(info->next, STRLEN_PFSID + 1,
730 "PFS%05d", info->pfs_id);
731 } else if (dict->name) {
732 strcpy(info->next, dict->name);
733 } else {
734 snprintf(info->next, STRLEN_OBJID + 1,
735 "obj_0x%016jx", (uintmax_t)dict->obj_id);
736 }
737 info->next += strlen(info->next);
738 break;
739 }
740 dict->flags &= ~DICTF_TRAVERSED;
741 }
742
743 static
744 void
sanitize_string(char * str)745 sanitize_string(char *str)
746 {
747 while (*str) {
748 if (!isprint(*str))
749 *str = 'x';
750 ++str;
751 }
752 }
753
754 static
755 hammer_off_t
scan_raw_limit(void)756 scan_raw_limit(void)
757 {
758 volume_info_t volume;
759 hammer_blockmap_t rootmap;
760 hammer_blockmap_layer1_t layer1;
761 hammer_blockmap_layer2_t layer2;
762 buffer_info_t buffer1 = NULL;
763 buffer_info_t buffer2 = NULL;
764 hammer_off_t layer1_offset;
765 hammer_off_t layer2_offset;
766 hammer_off_t phys_offset;
767 hammer_off_t block_offset;
768 hammer_off_t offset = 0;
769 int zone = HAMMER_ZONE_FREEMAP_INDEX;
770
771 volume = get_root_volume();
772 rootmap = &volume->ondisk->vol0_blockmap[zone];
773 assert(rootmap->phys_offset != 0);
774
775 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0);
776 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
777 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
778 /*
779 * Dive layer 1.
780 */
781 layer1_offset = rootmap->phys_offset +
782 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
783 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
784
785 if (!hammer_crc_test_layer1(HammerVersion, layer1)) {
786 offset = 0; /* failed */
787 goto end;
788 }
789 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL)
790 continue;
791
792 for (block_offset = 0;
793 block_offset < HAMMER_BLOCKMAP_LAYER2;
794 block_offset += HAMMER_BIGBLOCK_SIZE) {
795 /*
796 * Dive layer 2, each entry represents a big-block.
797 */
798 layer2_offset = layer1->phys_offset +
799 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
800 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
801
802 if (!hammer_crc_test_layer2(HammerVersion, layer2)) {
803 offset = 0; /* failed */
804 goto end;
805 }
806 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
807 break;
808 } else if (layer2->zone && layer2->zone != zone) {
809 offset = phys_offset + block_offset;
810 }
811 }
812 }
813 end:
814 rel_buffer(buffer1);
815 rel_buffer(buffer2);
816
817 return(hammer_xlate_to_zone2(offset));
818 }
819
820 static
821 void
scan_bigblocks(int target_zone)822 scan_bigblocks(int target_zone)
823 {
824 volume_info_t volume;
825 hammer_blockmap_t rootmap;
826 hammer_blockmap_layer1_t layer1;
827 hammer_blockmap_layer2_t layer2;
828 buffer_info_t buffer1 = NULL;
829 buffer_info_t buffer2 = NULL;
830 hammer_off_t layer1_offset;
831 hammer_off_t layer2_offset;
832 hammer_off_t phys_offset;
833 hammer_off_t block_offset;
834 hammer_off_t offset = 0;
835 int zone = HAMMER_ZONE_FREEMAP_INDEX;
836
837 volume = get_root_volume();
838 rootmap = &volume->ondisk->vol0_blockmap[zone];
839 assert(rootmap->phys_offset != 0);
840
841 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0);
842 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
843 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
844 /*
845 * Dive layer 1.
846 */
847 layer1_offset = rootmap->phys_offset +
848 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
849 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
850
851 /*
852 if (!hammer_crc_test_layer1(HammerVersion, layer1)) {
853 }
854 */
855 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL)
856 continue;
857
858 for (block_offset = 0;
859 block_offset < HAMMER_BLOCKMAP_LAYER2;
860 block_offset += HAMMER_BIGBLOCK_SIZE) {
861 offset = phys_offset + block_offset;
862 /*
863 * Dive layer 2, each entry represents a big-block.
864 */
865 layer2_offset = layer1->phys_offset +
866 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
867 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
868
869 /*
870 if (!hammer_crc_test_layer2(HammerVersion, layer2)) {
871 }
872 */
873 if (layer2->zone == target_zone) {
874 add_bigblock_entry(offset, layer1, layer2);
875 } else if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
876 break;
877 }
878 }
879 }
880 rel_buffer(buffer1);
881 rel_buffer(buffer2);
882 }
883
884 static
885 void
free_bigblocks(void)886 free_bigblocks(void)
887 {
888 bigblock_t b;
889
890 while ((b = RB_ROOT(&ZoneTree)) != NULL) {
891 RB_REMOVE(bigblock_rb_tree, &ZoneTree, b);
892 free(b);
893 }
894 assert(RB_EMPTY(&ZoneTree));
895 }
896
897 static
898 void
add_bigblock_entry(hammer_off_t offset,hammer_blockmap_layer1_t layer1,hammer_blockmap_layer2_t layer2)899 add_bigblock_entry(hammer_off_t offset,
900 hammer_blockmap_layer1_t layer1, hammer_blockmap_layer2_t layer2)
901 {
902 bigblock_t b;
903
904 b = calloc(1, sizeof(*b));
905 b->phys_offset = hammer_xlate_to_zone2(offset);
906 assert((b->phys_offset & HAMMER_BIGBLOCK_MASK64) == 0);
907 bcopy(layer1, &b->layer1, sizeof(*layer1));
908 bcopy(layer2, &b->layer2, sizeof(*layer2));
909
910 RB_INSERT(bigblock_rb_tree, &ZoneTree, b);
911 }
912
913 static
914 bigblock_t
get_bigblock_entry(hammer_off_t offset)915 get_bigblock_entry(hammer_off_t offset)
916 {
917 bigblock_t b;
918
919 offset = hammer_xlate_to_zone2(offset);
920 offset &= ~HAMMER_BIGBLOCK_MASK64;
921
922 b = RB_LOOKUP(bigblock_rb_tree, &ZoneTree, offset);
923 if (b)
924 return(b);
925 return(NULL);
926 }
927