1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28 #include <stdio.h> 29 #include <unistd.h> 30 #include <stdio_ext.h> 31 #include <stdlib.h> 32 #include <ctype.h> 33 #include <sys/zfs_context.h> 34 #include <sys/spa.h> 35 #include <sys/spa_impl.h> 36 #include <sys/dmu.h> 37 #include <sys/zap.h> 38 #include <sys/fs/zfs.h> 39 #include <sys/zfs_znode.h> 40 #include <sys/zfs_sa.h> 41 #include <sys/sa.h> 42 #include <sys/sa_impl.h> 43 #include <sys/vdev.h> 44 #include <sys/vdev_impl.h> 45 #include <sys/metaslab_impl.h> 46 #include <sys/dmu_objset.h> 47 #include <sys/dsl_dir.h> 48 #include <sys/dsl_dataset.h> 49 #include <sys/dsl_pool.h> 50 #include <sys/dbuf.h> 51 #include <sys/zil.h> 52 #include <sys/zil_impl.h> 53 #include <sys/stat.h> 54 #include <sys/resource.h> 55 #include <sys/dmu_traverse.h> 56 #include <sys/zio_checksum.h> 57 #include <sys/zio_compress.h> 58 #include <sys/zfs_fuid.h> 59 #include <sys/arc.h> 60 #include <sys/ddt.h> 61 #include <sys/zfeature.h> 62 #include <zfs_comutil.h> 63 #undef ZFS_MAXNAMELEN 64 #undef verify 65 #include <libzfs.h> 66 67 #define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \ 68 zio_compress_table[(idx)].ci_name : "UNKNOWN") 69 #define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \ 70 zio_checksum_table[(idx)].ci_name : "UNKNOWN") 71 #define ZDB_OT_NAME(idx) ((idx) < DMU_OT_NUMTYPES ? \ 72 dmu_ot[(idx)].ot_name : DMU_OT_IS_VALID(idx) ? \ 73 dmu_ot_byteswap[DMU_OT_BYTESWAP(idx)].ob_name : "UNKNOWN") 74 #define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \ 75 (((idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA) ? \ 76 DMU_OT_ZAP_OTHER : DMU_OT_NUMTYPES)) 77 78 #ifndef lint 79 extern boolean_t zfs_recover; 80 extern uint64_t zfs_arc_max, zfs_arc_meta_limit; 81 extern int zfs_vdev_async_read_max_active; 82 #else 83 boolean_t zfs_recover; 84 uint64_t zfs_arc_max, zfs_arc_meta_limit; 85 int zfs_vdev_async_read_max_active; 86 #endif 87 88 const char cmdname[] = "zdb"; 89 uint8_t dump_opt[256]; 90 91 typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size); 92 93 extern void dump_intent_log(zilog_t *); 94 uint64_t *zopt_object = NULL; 95 int zopt_objects = 0; 96 libzfs_handle_t *g_zfs; 97 uint64_t max_inflight = 1000; 98 99 static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *); 100 101 /* 102 * These libumem hooks provide a reasonable set of defaults for the allocator's 103 * debugging facilities. 104 */ 105 const char * 106 _umem_debug_init() 107 { 108 return ("default,verbose"); /* $UMEM_DEBUG setting */ 109 } 110 111 const char * 112 _umem_logging_init(void) 113 { 114 return ("fail,contents"); /* $UMEM_LOGGING setting */ 115 } 116 117 static void 118 usage(void) 119 { 120 (void) fprintf(stderr, 121 "Usage: %s [-CumMdibcsDvhLXFPA] [-t txg] [-e [-p path...]] " 122 "[-U config] [-I inflight I/Os] [-x dumpdir] poolname [object...]\n" 123 " %s [-divPA] [-e -p path...] [-U config] dataset " 124 "[object...]\n" 125 " %s -mM [-LXFPA] [-t txg] [-e [-p path...]] [-U config] " 126 "poolname [vdev [metaslab...]]\n" 127 " %s -R [-A] [-e [-p path...]] poolname " 128 "vdev:offset:size[:flags]\n" 129 " %s -S [-PA] [-e [-p path...]] [-U config] poolname\n" 130 " %s -l [-uA] device\n" 131 " %s -C [-A] [-U config]\n\n", 132 cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname); 133 134 (void) fprintf(stderr, " Dataset name must include at least one " 135 "separator character '/' or '@'\n"); 136 (void) fprintf(stderr, " If dataset name is specified, only that " 137 "dataset is dumped\n"); 138 (void) fprintf(stderr, " If object numbers are specified, only " 139 "those objects are dumped\n\n"); 140 (void) fprintf(stderr, " Options to control amount of output:\n"); 141 (void) fprintf(stderr, " -u uberblock\n"); 142 (void) fprintf(stderr, " -d dataset(s)\n"); 143 (void) fprintf(stderr, " -i intent logs\n"); 144 (void) fprintf(stderr, " -C config (or cachefile if alone)\n"); 145 (void) fprintf(stderr, " -h pool history\n"); 146 (void) fprintf(stderr, " -b block statistics\n"); 147 (void) fprintf(stderr, " -m metaslabs\n"); 148 (void) fprintf(stderr, " -M metaslab groups\n"); 149 (void) fprintf(stderr, " -c checksum all metadata (twice for " 150 "all data) blocks\n"); 151 (void) fprintf(stderr, " -s report stats on zdb's I/O\n"); 152 (void) fprintf(stderr, " -D dedup statistics\n"); 153 (void) fprintf(stderr, " -S simulate dedup to measure effect\n"); 154 (void) fprintf(stderr, " -v verbose (applies to all others)\n"); 155 (void) fprintf(stderr, " -l dump label contents\n"); 156 (void) fprintf(stderr, " -L disable leak tracking (do not " 157 "load spacemaps)\n"); 158 (void) fprintf(stderr, " -R read and display block from a " 159 "device\n\n"); 160 (void) fprintf(stderr, " Below options are intended for use " 161 "with other options:\n"); 162 (void) fprintf(stderr, " -A ignore assertions (-A), enable " 163 "panic recovery (-AA) or both (-AAA)\n"); 164 (void) fprintf(stderr, " -F attempt automatic rewind within " 165 "safe range of transaction groups\n"); 166 (void) fprintf(stderr, " -U <cachefile_path> -- use alternate " 167 "cachefile\n"); 168 (void) fprintf(stderr, " -X attempt extreme rewind (does not " 169 "work with dataset)\n"); 170 (void) fprintf(stderr, " -e pool is exported/destroyed/" 171 "has altroot/not in a cachefile\n"); 172 (void) fprintf(stderr, " -p <path> -- use one or more with " 173 "-e to specify path to vdev dir\n"); 174 (void) fprintf(stderr, " -x <dumpdir> -- " 175 "dump all read blocks into specified directory\n"); 176 (void) fprintf(stderr, " -P print numbers in parseable form\n"); 177 (void) fprintf(stderr, " -t <txg> -- highest txg to use when " 178 "searching for uberblocks\n"); 179 (void) fprintf(stderr, " -I <number of inflight I/Os> -- " 180 "specify the maximum number of " 181 "checksumming I/Os [default is 200]\n"); 182 (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) " 183 "to make only that option verbose\n"); 184 (void) fprintf(stderr, "Default is to dump everything non-verbosely\n"); 185 exit(1); 186 } 187 188 /* 189 * Called for usage errors that are discovered after a call to spa_open(), 190 * dmu_bonus_hold(), or pool_match(). abort() is called for other errors. 191 */ 192 193 static void 194 fatal(const char *fmt, ...) 195 { 196 va_list ap; 197 198 va_start(ap, fmt); 199 (void) fprintf(stderr, "%s: ", cmdname); 200 (void) vfprintf(stderr, fmt, ap); 201 va_end(ap); 202 (void) fprintf(stderr, "\n"); 203 204 exit(1); 205 } 206 207 /* ARGSUSED */ 208 static void 209 dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size) 210 { 211 nvlist_t *nv; 212 size_t nvsize = *(uint64_t *)data; 213 char *packed = umem_alloc(nvsize, UMEM_NOFAIL); 214 215 VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH)); 216 217 VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0); 218 219 umem_free(packed, nvsize); 220 221 dump_nvlist(nv, 8); 222 223 nvlist_free(nv); 224 } 225 226 /* ARGSUSED */ 227 static void 228 dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size) 229 { 230 spa_history_phys_t *shp = data; 231 232 if (shp == NULL) 233 return; 234 235 (void) printf("\t\tpool_create_len = %llu\n", 236 (u_longlong_t)shp->sh_pool_create_len); 237 (void) printf("\t\tphys_max_off = %llu\n", 238 (u_longlong_t)shp->sh_phys_max_off); 239 (void) printf("\t\tbof = %llu\n", 240 (u_longlong_t)shp->sh_bof); 241 (void) printf("\t\teof = %llu\n", 242 (u_longlong_t)shp->sh_eof); 243 (void) printf("\t\trecords_lost = %llu\n", 244 (u_longlong_t)shp->sh_records_lost); 245 } 246 247 static void 248 zdb_nicenum(uint64_t num, char *buf) 249 { 250 if (dump_opt['P']) 251 (void) sprintf(buf, "%llu", (longlong_t)num); 252 else 253 nicenum(num, buf); 254 } 255 256 const char histo_stars[] = "****************************************"; 257 const int histo_width = sizeof (histo_stars) - 1; 258 259 static void 260 dump_histogram(const uint64_t *histo, int size, int offset) 261 { 262 int i; 263 int minidx = size - 1; 264 int maxidx = 0; 265 uint64_t max = 0; 266 267 for (i = 0; i < size; i++) { 268 if (histo[i] > max) 269 max = histo[i]; 270 if (histo[i] > 0 && i > maxidx) 271 maxidx = i; 272 if (histo[i] > 0 && i < minidx) 273 minidx = i; 274 } 275 276 if (max < histo_width) 277 max = histo_width; 278 279 for (i = minidx; i <= maxidx; i++) { 280 (void) printf("\t\t\t%3u: %6llu %s\n", 281 i + offset, (u_longlong_t)histo[i], 282 &histo_stars[(max - histo[i]) * histo_width / max]); 283 } 284 } 285 286 static void 287 dump_zap_stats(objset_t *os, uint64_t object) 288 { 289 int error; 290 zap_stats_t zs; 291 292 error = zap_get_stats(os, object, &zs); 293 if (error) 294 return; 295 296 if (zs.zs_ptrtbl_len == 0) { 297 ASSERT(zs.zs_num_blocks == 1); 298 (void) printf("\tmicrozap: %llu bytes, %llu entries\n", 299 (u_longlong_t)zs.zs_blocksize, 300 (u_longlong_t)zs.zs_num_entries); 301 return; 302 } 303 304 (void) printf("\tFat ZAP stats:\n"); 305 306 (void) printf("\t\tPointer table:\n"); 307 (void) printf("\t\t\t%llu elements\n", 308 (u_longlong_t)zs.zs_ptrtbl_len); 309 (void) printf("\t\t\tzt_blk: %llu\n", 310 (u_longlong_t)zs.zs_ptrtbl_zt_blk); 311 (void) printf("\t\t\tzt_numblks: %llu\n", 312 (u_longlong_t)zs.zs_ptrtbl_zt_numblks); 313 (void) printf("\t\t\tzt_shift: %llu\n", 314 (u_longlong_t)zs.zs_ptrtbl_zt_shift); 315 (void) printf("\t\t\tzt_blks_copied: %llu\n", 316 (u_longlong_t)zs.zs_ptrtbl_blks_copied); 317 (void) printf("\t\t\tzt_nextblk: %llu\n", 318 (u_longlong_t)zs.zs_ptrtbl_nextblk); 319 320 (void) printf("\t\tZAP entries: %llu\n", 321 (u_longlong_t)zs.zs_num_entries); 322 (void) printf("\t\tLeaf blocks: %llu\n", 323 (u_longlong_t)zs.zs_num_leafs); 324 (void) printf("\t\tTotal blocks: %llu\n", 325 (u_longlong_t)zs.zs_num_blocks); 326 (void) printf("\t\tzap_block_type: 0x%llx\n", 327 (u_longlong_t)zs.zs_block_type); 328 (void) printf("\t\tzap_magic: 0x%llx\n", 329 (u_longlong_t)zs.zs_magic); 330 (void) printf("\t\tzap_salt: 0x%llx\n", 331 (u_longlong_t)zs.zs_salt); 332 333 (void) printf("\t\tLeafs with 2^n pointers:\n"); 334 dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0); 335 336 (void) printf("\t\tBlocks with n*5 entries:\n"); 337 dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0); 338 339 (void) printf("\t\tBlocks n/10 full:\n"); 340 dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0); 341 342 (void) printf("\t\tEntries with n chunks:\n"); 343 dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0); 344 345 (void) printf("\t\tBuckets with n entries:\n"); 346 dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0); 347 } 348 349 /*ARGSUSED*/ 350 static void 351 dump_none(objset_t *os, uint64_t object, void *data, size_t size) 352 { 353 } 354 355 /*ARGSUSED*/ 356 static void 357 dump_unknown(objset_t *os, uint64_t object, void *data, size_t size) 358 { 359 (void) printf("\tUNKNOWN OBJECT TYPE\n"); 360 } 361 362 /*ARGSUSED*/ 363 void 364 dump_uint8(objset_t *os, uint64_t object, void *data, size_t size) 365 { 366 } 367 368 /*ARGSUSED*/ 369 static void 370 dump_uint64(objset_t *os, uint64_t object, void *data, size_t size) 371 { 372 } 373 374 /*ARGSUSED*/ 375 static void 376 dump_zap(objset_t *os, uint64_t object, void *data, size_t size) 377 { 378 zap_cursor_t zc; 379 zap_attribute_t attr; 380 void *prop; 381 int i; 382 383 dump_zap_stats(os, object); 384 (void) printf("\n"); 385 386 for (zap_cursor_init(&zc, os, object); 387 zap_cursor_retrieve(&zc, &attr) == 0; 388 zap_cursor_advance(&zc)) { 389 (void) printf("\t\t%s = ", attr.za_name); 390 if (attr.za_num_integers == 0) { 391 (void) printf("\n"); 392 continue; 393 } 394 prop = umem_zalloc(attr.za_num_integers * 395 attr.za_integer_length, UMEM_NOFAIL); 396 (void) zap_lookup(os, object, attr.za_name, 397 attr.za_integer_length, attr.za_num_integers, prop); 398 if (attr.za_integer_length == 1) { 399 (void) printf("%s", (char *)prop); 400 } else { 401 for (i = 0; i < attr.za_num_integers; i++) { 402 switch (attr.za_integer_length) { 403 case 2: 404 (void) printf("%u ", 405 ((uint16_t *)prop)[i]); 406 break; 407 case 4: 408 (void) printf("%u ", 409 ((uint32_t *)prop)[i]); 410 break; 411 case 8: 412 (void) printf("%lld ", 413 (u_longlong_t)((int64_t *)prop)[i]); 414 break; 415 } 416 } 417 } 418 (void) printf("\n"); 419 umem_free(prop, attr.za_num_integers * attr.za_integer_length); 420 } 421 zap_cursor_fini(&zc); 422 } 423 424 static void 425 dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size) 426 { 427 bpobj_phys_t *bpop = data; 428 char bytes[32], comp[32], uncomp[32]; 429 430 if (bpop == NULL) 431 return; 432 433 zdb_nicenum(bpop->bpo_bytes, bytes); 434 zdb_nicenum(bpop->bpo_comp, comp); 435 zdb_nicenum(bpop->bpo_uncomp, uncomp); 436 437 (void) printf("\t\tnum_blkptrs = %llu\n", 438 (u_longlong_t)bpop->bpo_num_blkptrs); 439 (void) printf("\t\tbytes = %s\n", bytes); 440 if (size >= BPOBJ_SIZE_V1) { 441 (void) printf("\t\tcomp = %s\n", comp); 442 (void) printf("\t\tuncomp = %s\n", uncomp); 443 } 444 if (size >= sizeof (*bpop)) { 445 (void) printf("\t\tsubobjs = %llu\n", 446 (u_longlong_t)bpop->bpo_subobjs); 447 (void) printf("\t\tnum_subobjs = %llu\n", 448 (u_longlong_t)bpop->bpo_num_subobjs); 449 } 450 451 if (dump_opt['d'] < 5) 452 return; 453 454 for (uint64_t i = 0; i < bpop->bpo_num_blkptrs; i++) { 455 char blkbuf[BP_SPRINTF_LEN]; 456 blkptr_t bp; 457 458 int err = dmu_read(os, object, 459 i * sizeof (bp), sizeof (bp), &bp, 0); 460 if (err != 0) { 461 (void) printf("got error %u from dmu_read\n", err); 462 break; 463 } 464 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp); 465 (void) printf("\t%s\n", blkbuf); 466 } 467 } 468 469 /* ARGSUSED */ 470 static void 471 dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size) 472 { 473 dmu_object_info_t doi; 474 475 VERIFY0(dmu_object_info(os, object, &doi)); 476 uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP); 477 478 int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0); 479 if (err != 0) { 480 (void) printf("got error %u from dmu_read\n", err); 481 kmem_free(subobjs, doi.doi_max_offset); 482 return; 483 } 484 485 int64_t last_nonzero = -1; 486 for (uint64_t i = 0; i < doi.doi_max_offset / 8; i++) { 487 if (subobjs[i] != 0) 488 last_nonzero = i; 489 } 490 491 for (int64_t i = 0; i <= last_nonzero; i++) { 492 (void) printf("\t%llu\n", (longlong_t)subobjs[i]); 493 } 494 kmem_free(subobjs, doi.doi_max_offset); 495 } 496 497 /*ARGSUSED*/ 498 static void 499 dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size) 500 { 501 dump_zap_stats(os, object); 502 /* contents are printed elsewhere, properly decoded */ 503 } 504 505 /*ARGSUSED*/ 506 static void 507 dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size) 508 { 509 zap_cursor_t zc; 510 zap_attribute_t attr; 511 512 dump_zap_stats(os, object); 513 (void) printf("\n"); 514 515 for (zap_cursor_init(&zc, os, object); 516 zap_cursor_retrieve(&zc, &attr) == 0; 517 zap_cursor_advance(&zc)) { 518 (void) printf("\t\t%s = ", attr.za_name); 519 if (attr.za_num_integers == 0) { 520 (void) printf("\n"); 521 continue; 522 } 523 (void) printf(" %llx : [%d:%d:%d]\n", 524 (u_longlong_t)attr.za_first_integer, 525 (int)ATTR_LENGTH(attr.za_first_integer), 526 (int)ATTR_BSWAP(attr.za_first_integer), 527 (int)ATTR_NUM(attr.za_first_integer)); 528 } 529 zap_cursor_fini(&zc); 530 } 531 532 /*ARGSUSED*/ 533 static void 534 dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size) 535 { 536 zap_cursor_t zc; 537 zap_attribute_t attr; 538 uint16_t *layout_attrs; 539 int i; 540 541 dump_zap_stats(os, object); 542 (void) printf("\n"); 543 544 for (zap_cursor_init(&zc, os, object); 545 zap_cursor_retrieve(&zc, &attr) == 0; 546 zap_cursor_advance(&zc)) { 547 (void) printf("\t\t%s = [", attr.za_name); 548 if (attr.za_num_integers == 0) { 549 (void) printf("\n"); 550 continue; 551 } 552 553 VERIFY(attr.za_integer_length == 2); 554 layout_attrs = umem_zalloc(attr.za_num_integers * 555 attr.za_integer_length, UMEM_NOFAIL); 556 557 VERIFY(zap_lookup(os, object, attr.za_name, 558 attr.za_integer_length, 559 attr.za_num_integers, layout_attrs) == 0); 560 561 for (i = 0; i != attr.za_num_integers; i++) 562 (void) printf(" %d ", (int)layout_attrs[i]); 563 (void) printf("]\n"); 564 umem_free(layout_attrs, 565 attr.za_num_integers * attr.za_integer_length); 566 } 567 zap_cursor_fini(&zc); 568 } 569 570 /*ARGSUSED*/ 571 static void 572 dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size) 573 { 574 zap_cursor_t zc; 575 zap_attribute_t attr; 576 const char *typenames[] = { 577 /* 0 */ "not specified", 578 /* 1 */ "FIFO", 579 /* 2 */ "Character Device", 580 /* 3 */ "3 (invalid)", 581 /* 4 */ "Directory", 582 /* 5 */ "5 (invalid)", 583 /* 6 */ "Block Device", 584 /* 7 */ "7 (invalid)", 585 /* 8 */ "Regular File", 586 /* 9 */ "9 (invalid)", 587 /* 10 */ "Symbolic Link", 588 /* 11 */ "11 (invalid)", 589 /* 12 */ "Socket", 590 /* 13 */ "Door", 591 /* 14 */ "Event Port", 592 /* 15 */ "15 (invalid)", 593 }; 594 595 dump_zap_stats(os, object); 596 (void) printf("\n"); 597 598 for (zap_cursor_init(&zc, os, object); 599 zap_cursor_retrieve(&zc, &attr) == 0; 600 zap_cursor_advance(&zc)) { 601 (void) printf("\t\t%s = %lld (type: %s)\n", 602 attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer), 603 typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]); 604 } 605 zap_cursor_fini(&zc); 606 } 607 608 int 609 get_dtl_refcount(vdev_t *vd) 610 { 611 int refcount = 0; 612 613 if (vd->vdev_ops->vdev_op_leaf) { 614 space_map_t *sm = vd->vdev_dtl_sm; 615 616 if (sm != NULL && 617 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 618 return (1); 619 return (0); 620 } 621 622 for (int c = 0; c < vd->vdev_children; c++) 623 refcount += get_dtl_refcount(vd->vdev_child[c]); 624 return (refcount); 625 } 626 627 int 628 get_metaslab_refcount(vdev_t *vd) 629 { 630 int refcount = 0; 631 632 if (vd->vdev_top == vd && !vd->vdev_removing) { 633 for (int m = 0; m < vd->vdev_ms_count; m++) { 634 space_map_t *sm = vd->vdev_ms[m]->ms_sm; 635 636 if (sm != NULL && 637 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 638 refcount++; 639 } 640 } 641 for (int c = 0; c < vd->vdev_children; c++) 642 refcount += get_metaslab_refcount(vd->vdev_child[c]); 643 644 return (refcount); 645 } 646 647 static int 648 verify_spacemap_refcounts(spa_t *spa) 649 { 650 uint64_t expected_refcount = 0; 651 uint64_t actual_refcount; 652 653 (void) feature_get_refcount(spa, 654 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM], 655 &expected_refcount); 656 actual_refcount = get_dtl_refcount(spa->spa_root_vdev); 657 actual_refcount += get_metaslab_refcount(spa->spa_root_vdev); 658 659 if (expected_refcount != actual_refcount) { 660 (void) printf("space map refcount mismatch: expected %lld != " 661 "actual %lld\n", 662 (longlong_t)expected_refcount, 663 (longlong_t)actual_refcount); 664 return (2); 665 } 666 return (0); 667 } 668 669 static void 670 dump_spacemap(objset_t *os, space_map_t *sm) 671 { 672 uint64_t alloc, offset, entry; 673 char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID", 674 "INVALID", "INVALID", "INVALID", "INVALID" }; 675 676 if (sm == NULL) 677 return; 678 679 /* 680 * Print out the freelist entries in both encoded and decoded form. 681 */ 682 alloc = 0; 683 for (offset = 0; offset < space_map_length(sm); 684 offset += sizeof (entry)) { 685 uint8_t mapshift = sm->sm_shift; 686 687 VERIFY0(dmu_read(os, space_map_object(sm), offset, 688 sizeof (entry), &entry, DMU_READ_PREFETCH)); 689 if (SM_DEBUG_DECODE(entry)) { 690 691 (void) printf("\t [%6llu] %s: txg %llu, pass %llu\n", 692 (u_longlong_t)(offset / sizeof (entry)), 693 ddata[SM_DEBUG_ACTION_DECODE(entry)], 694 (u_longlong_t)SM_DEBUG_TXG_DECODE(entry), 695 (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(entry)); 696 } else { 697 (void) printf("\t [%6llu] %c range:" 698 " %010llx-%010llx size: %06llx\n", 699 (u_longlong_t)(offset / sizeof (entry)), 700 SM_TYPE_DECODE(entry) == SM_ALLOC ? 'A' : 'F', 701 (u_longlong_t)((SM_OFFSET_DECODE(entry) << 702 mapshift) + sm->sm_start), 703 (u_longlong_t)((SM_OFFSET_DECODE(entry) << 704 mapshift) + sm->sm_start + 705 (SM_RUN_DECODE(entry) << mapshift)), 706 (u_longlong_t)(SM_RUN_DECODE(entry) << mapshift)); 707 if (SM_TYPE_DECODE(entry) == SM_ALLOC) 708 alloc += SM_RUN_DECODE(entry) << mapshift; 709 else 710 alloc -= SM_RUN_DECODE(entry) << mapshift; 711 } 712 } 713 if (alloc != space_map_allocated(sm)) { 714 (void) printf("space_map_object alloc (%llu) INCONSISTENT " 715 "with space map summary (%llu)\n", 716 (u_longlong_t)space_map_allocated(sm), (u_longlong_t)alloc); 717 } 718 } 719 720 static void 721 dump_metaslab_stats(metaslab_t *msp) 722 { 723 char maxbuf[32]; 724 range_tree_t *rt = msp->ms_tree; 725 avl_tree_t *t = &msp->ms_size_tree; 726 int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 727 728 zdb_nicenum(metaslab_block_maxsize(msp), maxbuf); 729 730 (void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n", 731 "segments", avl_numnodes(t), "maxsize", maxbuf, 732 "freepct", free_pct); 733 (void) printf("\tIn-memory histogram:\n"); 734 dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 735 } 736 737 static void 738 dump_metaslab(metaslab_t *msp) 739 { 740 vdev_t *vd = msp->ms_group->mg_vd; 741 spa_t *spa = vd->vdev_spa; 742 space_map_t *sm = msp->ms_sm; 743 char freebuf[32]; 744 745 zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf); 746 747 (void) printf( 748 "\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n", 749 (u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start, 750 (u_longlong_t)space_map_object(sm), freebuf); 751 752 if (dump_opt['m'] > 2 && !dump_opt['L']) { 753 mutex_enter(&msp->ms_lock); 754 metaslab_load_wait(msp); 755 if (!msp->ms_loaded) { 756 VERIFY0(metaslab_load(msp)); 757 range_tree_stat_verify(msp->ms_tree); 758 } 759 dump_metaslab_stats(msp); 760 metaslab_unload(msp); 761 mutex_exit(&msp->ms_lock); 762 } 763 764 if (dump_opt['m'] > 1 && sm != NULL && 765 spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 766 /* 767 * The space map histogram represents free space in chunks 768 * of sm_shift (i.e. bucket 0 refers to 2^sm_shift). 769 */ 770 (void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n", 771 (u_longlong_t)msp->ms_fragmentation); 772 dump_histogram(sm->sm_phys->smp_histogram, 773 SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift); 774 } 775 776 if (dump_opt['d'] > 5 || dump_opt['m'] > 3) { 777 ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift)); 778 779 mutex_enter(&msp->ms_lock); 780 dump_spacemap(spa->spa_meta_objset, msp->ms_sm); 781 mutex_exit(&msp->ms_lock); 782 } 783 } 784 785 static void 786 print_vdev_metaslab_header(vdev_t *vd) 787 { 788 (void) printf("\tvdev %10llu\n\t%-10s%5llu %-19s %-15s %-10s\n", 789 (u_longlong_t)vd->vdev_id, 790 "metaslabs", (u_longlong_t)vd->vdev_ms_count, 791 "offset", "spacemap", "free"); 792 (void) printf("\t%15s %19s %15s %10s\n", 793 "---------------", "-------------------", 794 "---------------", "-------------"); 795 } 796 797 static void 798 dump_metaslab_groups(spa_t *spa) 799 { 800 vdev_t *rvd = spa->spa_root_vdev; 801 metaslab_class_t *mc = spa_normal_class(spa); 802 uint64_t fragmentation; 803 804 metaslab_class_histogram_verify(mc); 805 806 for (int c = 0; c < rvd->vdev_children; c++) { 807 vdev_t *tvd = rvd->vdev_child[c]; 808 metaslab_group_t *mg = tvd->vdev_mg; 809 810 if (mg->mg_class != mc) 811 continue; 812 813 metaslab_group_histogram_verify(mg); 814 mg->mg_fragmentation = metaslab_group_fragmentation(mg); 815 816 (void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t" 817 "fragmentation", 818 (u_longlong_t)tvd->vdev_id, 819 (u_longlong_t)tvd->vdev_ms_count); 820 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 821 (void) printf("%3s\n", "-"); 822 } else { 823 (void) printf("%3llu%%\n", 824 (u_longlong_t)mg->mg_fragmentation); 825 } 826 dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 827 } 828 829 (void) printf("\tpool %s\tfragmentation", spa_name(spa)); 830 fragmentation = metaslab_class_fragmentation(mc); 831 if (fragmentation == ZFS_FRAG_INVALID) 832 (void) printf("\t%3s\n", "-"); 833 else 834 (void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation); 835 dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 836 } 837 838 static void 839 dump_metaslabs(spa_t *spa) 840 { 841 vdev_t *vd, *rvd = spa->spa_root_vdev; 842 uint64_t m, c = 0, children = rvd->vdev_children; 843 844 (void) printf("\nMetaslabs:\n"); 845 846 if (!dump_opt['d'] && zopt_objects > 0) { 847 c = zopt_object[0]; 848 849 if (c >= children) 850 (void) fatal("bad vdev id: %llu", (u_longlong_t)c); 851 852 if (zopt_objects > 1) { 853 vd = rvd->vdev_child[c]; 854 print_vdev_metaslab_header(vd); 855 856 for (m = 1; m < zopt_objects; m++) { 857 if (zopt_object[m] < vd->vdev_ms_count) 858 dump_metaslab( 859 vd->vdev_ms[zopt_object[m]]); 860 else 861 (void) fprintf(stderr, "bad metaslab " 862 "number %llu\n", 863 (u_longlong_t)zopt_object[m]); 864 } 865 (void) printf("\n"); 866 return; 867 } 868 children = c + 1; 869 } 870 for (; c < children; c++) { 871 vd = rvd->vdev_child[c]; 872 print_vdev_metaslab_header(vd); 873 874 for (m = 0; m < vd->vdev_ms_count; m++) 875 dump_metaslab(vd->vdev_ms[m]); 876 (void) printf("\n"); 877 } 878 } 879 880 static void 881 dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index) 882 { 883 const ddt_phys_t *ddp = dde->dde_phys; 884 const ddt_key_t *ddk = &dde->dde_key; 885 char *types[4] = { "ditto", "single", "double", "triple" }; 886 char blkbuf[BP_SPRINTF_LEN]; 887 blkptr_t blk; 888 889 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 890 if (ddp->ddp_phys_birth == 0) 891 continue; 892 ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk); 893 snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk); 894 (void) printf("index %llx refcnt %llu %s %s\n", 895 (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt, 896 types[p], blkbuf); 897 } 898 } 899 900 static void 901 dump_dedup_ratio(const ddt_stat_t *dds) 902 { 903 double rL, rP, rD, D, dedup, compress, copies; 904 905 if (dds->dds_blocks == 0) 906 return; 907 908 rL = (double)dds->dds_ref_lsize; 909 rP = (double)dds->dds_ref_psize; 910 rD = (double)dds->dds_ref_dsize; 911 D = (double)dds->dds_dsize; 912 913 dedup = rD / D; 914 compress = rL / rP; 915 copies = rD / rP; 916 917 (void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, " 918 "dedup * compress / copies = %.2f\n\n", 919 dedup, compress, copies, dedup * compress / copies); 920 } 921 922 static void 923 dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class) 924 { 925 char name[DDT_NAMELEN]; 926 ddt_entry_t dde; 927 uint64_t walk = 0; 928 dmu_object_info_t doi; 929 uint64_t count, dspace, mspace; 930 int error; 931 932 error = ddt_object_info(ddt, type, class, &doi); 933 934 if (error == ENOENT) 935 return; 936 ASSERT(error == 0); 937 938 if ((count = ddt_object_count(ddt, type, class)) == 0) 939 return; 940 941 dspace = doi.doi_physical_blocks_512 << 9; 942 mspace = doi.doi_fill_count * doi.doi_data_block_size; 943 944 ddt_object_name(ddt, type, class, name); 945 946 (void) printf("%s: %llu entries, size %llu on disk, %llu in core\n", 947 name, 948 (u_longlong_t)count, 949 (u_longlong_t)(dspace / count), 950 (u_longlong_t)(mspace / count)); 951 952 if (dump_opt['D'] < 3) 953 return; 954 955 zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]); 956 957 if (dump_opt['D'] < 4) 958 return; 959 960 if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE) 961 return; 962 963 (void) printf("%s contents:\n\n", name); 964 965 while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0) 966 dump_dde(ddt, &dde, walk); 967 968 ASSERT(error == ENOENT); 969 970 (void) printf("\n"); 971 } 972 973 static void 974 dump_all_ddts(spa_t *spa) 975 { 976 ddt_histogram_t ddh_total = { 0 }; 977 ddt_stat_t dds_total = { 0 }; 978 979 for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { 980 ddt_t *ddt = spa->spa_ddt[c]; 981 for (enum ddt_type type = 0; type < DDT_TYPES; type++) { 982 for (enum ddt_class class = 0; class < DDT_CLASSES; 983 class++) { 984 dump_ddt(ddt, type, class); 985 } 986 } 987 } 988 989 ddt_get_dedup_stats(spa, &dds_total); 990 991 if (dds_total.dds_blocks == 0) { 992 (void) printf("All DDTs are empty\n"); 993 return; 994 } 995 996 (void) printf("\n"); 997 998 if (dump_opt['D'] > 1) { 999 (void) printf("DDT histogram (aggregated over all DDTs):\n"); 1000 ddt_get_dedup_histogram(spa, &ddh_total); 1001 zpool_dump_ddt(&dds_total, &ddh_total); 1002 } 1003 1004 dump_dedup_ratio(&dds_total); 1005 } 1006 1007 static void 1008 dump_dtl_seg(void *arg, uint64_t start, uint64_t size) 1009 { 1010 char *prefix = arg; 1011 1012 (void) printf("%s [%llu,%llu) length %llu\n", 1013 prefix, 1014 (u_longlong_t)start, 1015 (u_longlong_t)(start + size), 1016 (u_longlong_t)(size)); 1017 } 1018 1019 static void 1020 dump_dtl(vdev_t *vd, int indent) 1021 { 1022 spa_t *spa = vd->vdev_spa; 1023 boolean_t required; 1024 char *name[DTL_TYPES] = { "missing", "partial", "scrub", "outage" }; 1025 char prefix[256]; 1026 1027 spa_vdev_state_enter(spa, SCL_NONE); 1028 required = vdev_dtl_required(vd); 1029 (void) spa_vdev_state_exit(spa, NULL, 0); 1030 1031 if (indent == 0) 1032 (void) printf("\nDirty time logs:\n\n"); 1033 1034 (void) printf("\t%*s%s [%s]\n", indent, "", 1035 vd->vdev_path ? vd->vdev_path : 1036 vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa), 1037 required ? "DTL-required" : "DTL-expendable"); 1038 1039 for (int t = 0; t < DTL_TYPES; t++) { 1040 range_tree_t *rt = vd->vdev_dtl[t]; 1041 if (range_tree_space(rt) == 0) 1042 continue; 1043 (void) snprintf(prefix, sizeof (prefix), "\t%*s%s", 1044 indent + 2, "", name[t]); 1045 mutex_enter(rt->rt_lock); 1046 range_tree_walk(rt, dump_dtl_seg, prefix); 1047 mutex_exit(rt->rt_lock); 1048 if (dump_opt['d'] > 5 && vd->vdev_children == 0) 1049 dump_spacemap(spa->spa_meta_objset, vd->vdev_dtl_sm); 1050 } 1051 1052 for (int c = 0; c < vd->vdev_children; c++) 1053 dump_dtl(vd->vdev_child[c], indent + 4); 1054 } 1055 1056 static void 1057 dump_history(spa_t *spa) 1058 { 1059 nvlist_t **events = NULL; 1060 uint64_t resid, len, off = 0; 1061 uint_t num = 0; 1062 int error; 1063 time_t tsec; 1064 struct tm t; 1065 char tbuf[30]; 1066 char internalstr[MAXPATHLEN]; 1067 1068 char *buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 1069 do { 1070 len = SPA_MAXBLOCKSIZE; 1071 1072 if ((error = spa_history_get(spa, &off, &len, buf)) != 0) { 1073 (void) fprintf(stderr, "Unable to read history: " 1074 "error %d\n", error); 1075 umem_free(buf, SPA_MAXBLOCKSIZE); 1076 return; 1077 } 1078 1079 if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0) 1080 break; 1081 1082 off -= resid; 1083 } while (len != 0); 1084 umem_free(buf, SPA_MAXBLOCKSIZE); 1085 1086 (void) printf("\nHistory:\n"); 1087 for (int i = 0; i < num; i++) { 1088 uint64_t time, txg, ievent; 1089 char *cmd, *intstr; 1090 boolean_t printed = B_FALSE; 1091 1092 if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME, 1093 &time) != 0) 1094 goto next; 1095 if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD, 1096 &cmd) != 0) { 1097 if (nvlist_lookup_uint64(events[i], 1098 ZPOOL_HIST_INT_EVENT, &ievent) != 0) 1099 goto next; 1100 verify(nvlist_lookup_uint64(events[i], 1101 ZPOOL_HIST_TXG, &txg) == 0); 1102 verify(nvlist_lookup_string(events[i], 1103 ZPOOL_HIST_INT_STR, &intstr) == 0); 1104 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) 1105 goto next; 1106 1107 (void) snprintf(internalstr, 1108 sizeof (internalstr), 1109 "[internal %s txg:%lld] %s", 1110 zfs_history_event_names[ievent], txg, 1111 intstr); 1112 cmd = internalstr; 1113 } 1114 tsec = time; 1115 (void) localtime_r(&tsec, &t); 1116 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 1117 (void) printf("%s %s\n", tbuf, cmd); 1118 printed = B_TRUE; 1119 1120 next: 1121 if (dump_opt['h'] > 1) { 1122 if (!printed) 1123 (void) printf("unrecognized record:\n"); 1124 dump_nvlist(events[i], 2); 1125 } 1126 } 1127 } 1128 1129 /*ARGSUSED*/ 1130 static void 1131 dump_dnode(objset_t *os, uint64_t object, void *data, size_t size) 1132 { 1133 } 1134 1135 static uint64_t 1136 blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp, 1137 const zbookmark_phys_t *zb) 1138 { 1139 if (dnp == NULL) { 1140 ASSERT(zb->zb_level < 0); 1141 if (zb->zb_object == 0) 1142 return (zb->zb_blkid); 1143 return (zb->zb_blkid * BP_GET_LSIZE(bp)); 1144 } 1145 1146 ASSERT(zb->zb_level >= 0); 1147 1148 return ((zb->zb_blkid << 1149 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) * 1150 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 1151 } 1152 1153 static void 1154 snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp) 1155 { 1156 const dva_t *dva = bp->blk_dva; 1157 int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1; 1158 1159 if (dump_opt['b'] >= 6) { 1160 snprintf_blkptr(blkbuf, buflen, bp); 1161 return; 1162 } 1163 1164 if (BP_IS_EMBEDDED(bp)) { 1165 (void) sprintf(blkbuf, 1166 "EMBEDDED et=%u %llxL/%llxP B=%llu", 1167 (int)BPE_GET_ETYPE(bp), 1168 (u_longlong_t)BPE_GET_LSIZE(bp), 1169 (u_longlong_t)BPE_GET_PSIZE(bp), 1170 (u_longlong_t)bp->blk_birth); 1171 return; 1172 } 1173 1174 blkbuf[0] = '\0'; 1175 for (int i = 0; i < ndvas; i++) 1176 (void) snprintf(blkbuf + strlen(blkbuf), 1177 buflen - strlen(blkbuf), "%llu:%llx:%llx ", 1178 (u_longlong_t)DVA_GET_VDEV(&dva[i]), 1179 (u_longlong_t)DVA_GET_OFFSET(&dva[i]), 1180 (u_longlong_t)DVA_GET_ASIZE(&dva[i])); 1181 1182 if (BP_IS_HOLE(bp)) { 1183 (void) snprintf(blkbuf + strlen(blkbuf), 1184 buflen - strlen(blkbuf), 1185 "%llxL B=%llu", 1186 (u_longlong_t)BP_GET_LSIZE(bp), 1187 (u_longlong_t)bp->blk_birth); 1188 } else { 1189 (void) snprintf(blkbuf + strlen(blkbuf), 1190 buflen - strlen(blkbuf), 1191 "%llxL/%llxP F=%llu B=%llu/%llu", 1192 (u_longlong_t)BP_GET_LSIZE(bp), 1193 (u_longlong_t)BP_GET_PSIZE(bp), 1194 (u_longlong_t)BP_GET_FILL(bp), 1195 (u_longlong_t)bp->blk_birth, 1196 (u_longlong_t)BP_PHYSICAL_BIRTH(bp)); 1197 } 1198 } 1199 1200 static void 1201 print_indirect(blkptr_t *bp, const zbookmark_phys_t *zb, 1202 const dnode_phys_t *dnp) 1203 { 1204 char blkbuf[BP_SPRINTF_LEN]; 1205 int l; 1206 1207 if (!BP_IS_EMBEDDED(bp)) { 1208 ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type); 1209 ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level); 1210 } 1211 1212 (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb)); 1213 1214 ASSERT(zb->zb_level >= 0); 1215 1216 for (l = dnp->dn_nlevels - 1; l >= -1; l--) { 1217 if (l == zb->zb_level) { 1218 (void) printf("L%llx", (u_longlong_t)zb->zb_level); 1219 } else { 1220 (void) printf(" "); 1221 } 1222 } 1223 1224 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1225 (void) printf("%s\n", blkbuf); 1226 } 1227 1228 static int 1229 visit_indirect(spa_t *spa, const dnode_phys_t *dnp, 1230 blkptr_t *bp, const zbookmark_phys_t *zb) 1231 { 1232 int err = 0; 1233 1234 if (bp->blk_birth == 0) 1235 return (0); 1236 1237 print_indirect(bp, zb, dnp); 1238 1239 if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) { 1240 arc_flags_t flags = ARC_FLAG_WAIT; 1241 int i; 1242 blkptr_t *cbp; 1243 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1244 arc_buf_t *buf; 1245 uint64_t fill = 0; 1246 1247 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 1248 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 1249 if (err) 1250 return (err); 1251 ASSERT(buf->b_data); 1252 1253 /* recursively visit blocks below this */ 1254 cbp = buf->b_data; 1255 for (i = 0; i < epb; i++, cbp++) { 1256 zbookmark_phys_t czb; 1257 1258 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1259 zb->zb_level - 1, 1260 zb->zb_blkid * epb + i); 1261 err = visit_indirect(spa, dnp, cbp, &czb); 1262 if (err) 1263 break; 1264 fill += BP_GET_FILL(cbp); 1265 } 1266 if (!err) 1267 ASSERT3U(fill, ==, BP_GET_FILL(bp)); 1268 (void) arc_buf_remove_ref(buf, &buf); 1269 } 1270 1271 return (err); 1272 } 1273 1274 /*ARGSUSED*/ 1275 static void 1276 dump_indirect(dnode_t *dn) 1277 { 1278 dnode_phys_t *dnp = dn->dn_phys; 1279 int j; 1280 zbookmark_phys_t czb; 1281 1282 (void) printf("Indirect blocks:\n"); 1283 1284 SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset), 1285 dn->dn_object, dnp->dn_nlevels - 1, 0); 1286 for (j = 0; j < dnp->dn_nblkptr; j++) { 1287 czb.zb_blkid = j; 1288 (void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp, 1289 &dnp->dn_blkptr[j], &czb); 1290 } 1291 1292 (void) printf("\n"); 1293 } 1294 1295 /*ARGSUSED*/ 1296 static void 1297 dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size) 1298 { 1299 dsl_dir_phys_t *dd = data; 1300 time_t crtime; 1301 char nice[32]; 1302 1303 if (dd == NULL) 1304 return; 1305 1306 ASSERT3U(size, >=, sizeof (dsl_dir_phys_t)); 1307 1308 crtime = dd->dd_creation_time; 1309 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1310 (void) printf("\t\thead_dataset_obj = %llu\n", 1311 (u_longlong_t)dd->dd_head_dataset_obj); 1312 (void) printf("\t\tparent_dir_obj = %llu\n", 1313 (u_longlong_t)dd->dd_parent_obj); 1314 (void) printf("\t\torigin_obj = %llu\n", 1315 (u_longlong_t)dd->dd_origin_obj); 1316 (void) printf("\t\tchild_dir_zapobj = %llu\n", 1317 (u_longlong_t)dd->dd_child_dir_zapobj); 1318 zdb_nicenum(dd->dd_used_bytes, nice); 1319 (void) printf("\t\tused_bytes = %s\n", nice); 1320 zdb_nicenum(dd->dd_compressed_bytes, nice); 1321 (void) printf("\t\tcompressed_bytes = %s\n", nice); 1322 zdb_nicenum(dd->dd_uncompressed_bytes, nice); 1323 (void) printf("\t\tuncompressed_bytes = %s\n", nice); 1324 zdb_nicenum(dd->dd_quota, nice); 1325 (void) printf("\t\tquota = %s\n", nice); 1326 zdb_nicenum(dd->dd_reserved, nice); 1327 (void) printf("\t\treserved = %s\n", nice); 1328 (void) printf("\t\tprops_zapobj = %llu\n", 1329 (u_longlong_t)dd->dd_props_zapobj); 1330 (void) printf("\t\tdeleg_zapobj = %llu\n", 1331 (u_longlong_t)dd->dd_deleg_zapobj); 1332 (void) printf("\t\tflags = %llx\n", 1333 (u_longlong_t)dd->dd_flags); 1334 1335 #define DO(which) \ 1336 zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice); \ 1337 (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice) 1338 DO(HEAD); 1339 DO(SNAP); 1340 DO(CHILD); 1341 DO(CHILD_RSRV); 1342 DO(REFRSRV); 1343 #undef DO 1344 } 1345 1346 /*ARGSUSED*/ 1347 static void 1348 dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size) 1349 { 1350 dsl_dataset_phys_t *ds = data; 1351 time_t crtime; 1352 char used[32], compressed[32], uncompressed[32], unique[32]; 1353 char blkbuf[BP_SPRINTF_LEN]; 1354 1355 if (ds == NULL) 1356 return; 1357 1358 ASSERT(size == sizeof (*ds)); 1359 crtime = ds->ds_creation_time; 1360 zdb_nicenum(ds->ds_referenced_bytes, used); 1361 zdb_nicenum(ds->ds_compressed_bytes, compressed); 1362 zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed); 1363 zdb_nicenum(ds->ds_unique_bytes, unique); 1364 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp); 1365 1366 (void) printf("\t\tdir_obj = %llu\n", 1367 (u_longlong_t)ds->ds_dir_obj); 1368 (void) printf("\t\tprev_snap_obj = %llu\n", 1369 (u_longlong_t)ds->ds_prev_snap_obj); 1370 (void) printf("\t\tprev_snap_txg = %llu\n", 1371 (u_longlong_t)ds->ds_prev_snap_txg); 1372 (void) printf("\t\tnext_snap_obj = %llu\n", 1373 (u_longlong_t)ds->ds_next_snap_obj); 1374 (void) printf("\t\tsnapnames_zapobj = %llu\n", 1375 (u_longlong_t)ds->ds_snapnames_zapobj); 1376 (void) printf("\t\tnum_children = %llu\n", 1377 (u_longlong_t)ds->ds_num_children); 1378 (void) printf("\t\tuserrefs_obj = %llu\n", 1379 (u_longlong_t)ds->ds_userrefs_obj); 1380 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1381 (void) printf("\t\tcreation_txg = %llu\n", 1382 (u_longlong_t)ds->ds_creation_txg); 1383 (void) printf("\t\tdeadlist_obj = %llu\n", 1384 (u_longlong_t)ds->ds_deadlist_obj); 1385 (void) printf("\t\tused_bytes = %s\n", used); 1386 (void) printf("\t\tcompressed_bytes = %s\n", compressed); 1387 (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed); 1388 (void) printf("\t\tunique = %s\n", unique); 1389 (void) printf("\t\tfsid_guid = %llu\n", 1390 (u_longlong_t)ds->ds_fsid_guid); 1391 (void) printf("\t\tguid = %llu\n", 1392 (u_longlong_t)ds->ds_guid); 1393 (void) printf("\t\tflags = %llx\n", 1394 (u_longlong_t)ds->ds_flags); 1395 (void) printf("\t\tnext_clones_obj = %llu\n", 1396 (u_longlong_t)ds->ds_next_clones_obj); 1397 (void) printf("\t\tprops_obj = %llu\n", 1398 (u_longlong_t)ds->ds_props_obj); 1399 (void) printf("\t\tbp = %s\n", blkbuf); 1400 } 1401 1402 /* ARGSUSED */ 1403 static int 1404 dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1405 { 1406 char blkbuf[BP_SPRINTF_LEN]; 1407 1408 if (bp->blk_birth != 0) { 1409 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 1410 (void) printf("\t%s\n", blkbuf); 1411 } 1412 return (0); 1413 } 1414 1415 static void 1416 dump_bptree(objset_t *os, uint64_t obj, char *name) 1417 { 1418 char bytes[32]; 1419 bptree_phys_t *bt; 1420 dmu_buf_t *db; 1421 1422 if (dump_opt['d'] < 3) 1423 return; 1424 1425 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db)); 1426 bt = db->db_data; 1427 zdb_nicenum(bt->bt_bytes, bytes); 1428 (void) printf("\n %s: %llu datasets, %s\n", 1429 name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes); 1430 dmu_buf_rele(db, FTAG); 1431 1432 if (dump_opt['d'] < 5) 1433 return; 1434 1435 (void) printf("\n"); 1436 1437 (void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL); 1438 } 1439 1440 /* ARGSUSED */ 1441 static int 1442 dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1443 { 1444 char blkbuf[BP_SPRINTF_LEN]; 1445 1446 ASSERT(bp->blk_birth != 0); 1447 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1448 (void) printf("\t%s\n", blkbuf); 1449 return (0); 1450 } 1451 1452 static void 1453 dump_full_bpobj(bpobj_t *bpo, char *name, int indent) 1454 { 1455 char bytes[32]; 1456 char comp[32]; 1457 char uncomp[32]; 1458 1459 if (dump_opt['d'] < 3) 1460 return; 1461 1462 zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes); 1463 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) { 1464 zdb_nicenum(bpo->bpo_phys->bpo_comp, comp); 1465 zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp); 1466 (void) printf(" %*s: object %llu, %llu local blkptrs, " 1467 "%llu subobjs in object %llu, %s (%s/%s comp)\n", 1468 indent * 8, name, 1469 (u_longlong_t)bpo->bpo_object, 1470 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1471 (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs, 1472 (u_longlong_t)bpo->bpo_phys->bpo_subobjs, 1473 bytes, comp, uncomp); 1474 1475 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) { 1476 uint64_t subobj; 1477 bpobj_t subbpo; 1478 int error; 1479 VERIFY0(dmu_read(bpo->bpo_os, 1480 bpo->bpo_phys->bpo_subobjs, 1481 i * sizeof (subobj), sizeof (subobj), &subobj, 0)); 1482 error = bpobj_open(&subbpo, bpo->bpo_os, subobj); 1483 if (error != 0) { 1484 (void) printf("ERROR %u while trying to open " 1485 "subobj id %llu\n", 1486 error, (u_longlong_t)subobj); 1487 continue; 1488 } 1489 dump_full_bpobj(&subbpo, "subobj", indent + 1); 1490 bpobj_close(&subbpo); 1491 } 1492 } else { 1493 (void) printf(" %*s: object %llu, %llu blkptrs, %s\n", 1494 indent * 8, name, 1495 (u_longlong_t)bpo->bpo_object, 1496 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1497 bytes); 1498 } 1499 1500 if (dump_opt['d'] < 5) 1501 return; 1502 1503 1504 if (indent == 0) { 1505 (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL); 1506 (void) printf("\n"); 1507 } 1508 } 1509 1510 static void 1511 dump_deadlist(dsl_deadlist_t *dl) 1512 { 1513 dsl_deadlist_entry_t *dle; 1514 uint64_t unused; 1515 char bytes[32]; 1516 char comp[32]; 1517 char uncomp[32]; 1518 1519 if (dump_opt['d'] < 3) 1520 return; 1521 1522 if (dl->dl_oldfmt) { 1523 dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0); 1524 return; 1525 } 1526 1527 zdb_nicenum(dl->dl_phys->dl_used, bytes); 1528 zdb_nicenum(dl->dl_phys->dl_comp, comp); 1529 zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp); 1530 (void) printf("\n Deadlist: %s (%s/%s comp)\n", 1531 bytes, comp, uncomp); 1532 1533 if (dump_opt['d'] < 4) 1534 return; 1535 1536 (void) printf("\n"); 1537 1538 /* force the tree to be loaded */ 1539 dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused); 1540 1541 for (dle = avl_first(&dl->dl_tree); dle; 1542 dle = AVL_NEXT(&dl->dl_tree, dle)) { 1543 if (dump_opt['d'] >= 5) { 1544 char buf[128]; 1545 (void) snprintf(buf, sizeof (buf), "mintxg %llu -> ", 1546 (longlong_t)dle->dle_mintxg, 1547 (longlong_t)dle->dle_bpobj.bpo_object); 1548 1549 dump_full_bpobj(&dle->dle_bpobj, buf, 0); 1550 } else { 1551 (void) printf("mintxg %llu -> obj %llu\n", 1552 (longlong_t)dle->dle_mintxg, 1553 (longlong_t)dle->dle_bpobj.bpo_object); 1554 1555 } 1556 } 1557 } 1558 1559 static avl_tree_t idx_tree; 1560 static avl_tree_t domain_tree; 1561 static boolean_t fuid_table_loaded; 1562 static boolean_t sa_loaded; 1563 sa_attr_type_t *sa_attr_table; 1564 1565 static void 1566 fuid_table_destroy() 1567 { 1568 if (fuid_table_loaded) { 1569 zfs_fuid_table_destroy(&idx_tree, &domain_tree); 1570 fuid_table_loaded = B_FALSE; 1571 } 1572 } 1573 1574 /* 1575 * print uid or gid information. 1576 * For normal POSIX id just the id is printed in decimal format. 1577 * For CIFS files with FUID the fuid is printed in hex followed by 1578 * the domain-rid string. 1579 */ 1580 static void 1581 print_idstr(uint64_t id, const char *id_type) 1582 { 1583 if (FUID_INDEX(id)) { 1584 char *domain; 1585 1586 domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id)); 1587 (void) printf("\t%s %llx [%s-%d]\n", id_type, 1588 (u_longlong_t)id, domain, (int)FUID_RID(id)); 1589 } else { 1590 (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id); 1591 } 1592 1593 } 1594 1595 static void 1596 dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid) 1597 { 1598 uint32_t uid_idx, gid_idx; 1599 1600 uid_idx = FUID_INDEX(uid); 1601 gid_idx = FUID_INDEX(gid); 1602 1603 /* Load domain table, if not already loaded */ 1604 if (!fuid_table_loaded && (uid_idx || gid_idx)) { 1605 uint64_t fuid_obj; 1606 1607 /* first find the fuid object. It lives in the master node */ 1608 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 1609 8, 1, &fuid_obj) == 0); 1610 zfs_fuid_avl_tree_create(&idx_tree, &domain_tree); 1611 (void) zfs_fuid_table_load(os, fuid_obj, 1612 &idx_tree, &domain_tree); 1613 fuid_table_loaded = B_TRUE; 1614 } 1615 1616 print_idstr(uid, "uid"); 1617 print_idstr(gid, "gid"); 1618 } 1619 1620 /*ARGSUSED*/ 1621 static void 1622 dump_znode(objset_t *os, uint64_t object, void *data, size_t size) 1623 { 1624 char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */ 1625 sa_handle_t *hdl; 1626 uint64_t xattr, rdev, gen; 1627 uint64_t uid, gid, mode, fsize, parent, links; 1628 uint64_t pflags; 1629 uint64_t acctm[2], modtm[2], chgtm[2], crtm[2]; 1630 time_t z_crtime, z_atime, z_mtime, z_ctime; 1631 sa_bulk_attr_t bulk[12]; 1632 int idx = 0; 1633 int error; 1634 1635 if (!sa_loaded) { 1636 uint64_t sa_attrs = 0; 1637 uint64_t version; 1638 1639 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 1640 8, 1, &version) == 0); 1641 if (version >= ZPL_VERSION_SA) { 1642 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 1643 8, 1, &sa_attrs) == 0); 1644 } 1645 if ((error = sa_setup(os, sa_attrs, zfs_attr_table, 1646 ZPL_END, &sa_attr_table)) != 0) { 1647 (void) printf("sa_setup failed errno %d, can't " 1648 "display znode contents\n", error); 1649 return; 1650 } 1651 sa_loaded = B_TRUE; 1652 } 1653 1654 if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) { 1655 (void) printf("Failed to get handle for SA znode\n"); 1656 return; 1657 } 1658 1659 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8); 1660 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8); 1661 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL, 1662 &links, 8); 1663 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8); 1664 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL, 1665 &mode, 8); 1666 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT], 1667 NULL, &parent, 8); 1668 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL, 1669 &fsize, 8); 1670 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL, 1671 acctm, 16); 1672 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL, 1673 modtm, 16); 1674 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL, 1675 crtm, 16); 1676 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL, 1677 chgtm, 16); 1678 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL, 1679 &pflags, 8); 1680 1681 if (sa_bulk_lookup(hdl, bulk, idx)) { 1682 (void) sa_handle_destroy(hdl); 1683 return; 1684 } 1685 1686 error = zfs_obj_to_path(os, object, path, sizeof (path)); 1687 if (error != 0) { 1688 (void) snprintf(path, sizeof (path), "\?\?\?<object#%llu>", 1689 (u_longlong_t)object); 1690 } 1691 if (dump_opt['d'] < 3) { 1692 (void) printf("\t%s\n", path); 1693 (void) sa_handle_destroy(hdl); 1694 return; 1695 } 1696 1697 z_crtime = (time_t)crtm[0]; 1698 z_atime = (time_t)acctm[0]; 1699 z_mtime = (time_t)modtm[0]; 1700 z_ctime = (time_t)chgtm[0]; 1701 1702 (void) printf("\tpath %s\n", path); 1703 dump_uidgid(os, uid, gid); 1704 (void) printf("\tatime %s", ctime(&z_atime)); 1705 (void) printf("\tmtime %s", ctime(&z_mtime)); 1706 (void) printf("\tctime %s", ctime(&z_ctime)); 1707 (void) printf("\tcrtime %s", ctime(&z_crtime)); 1708 (void) printf("\tgen %llu\n", (u_longlong_t)gen); 1709 (void) printf("\tmode %llo\n", (u_longlong_t)mode); 1710 (void) printf("\tsize %llu\n", (u_longlong_t)fsize); 1711 (void) printf("\tparent %llu\n", (u_longlong_t)parent); 1712 (void) printf("\tlinks %llu\n", (u_longlong_t)links); 1713 (void) printf("\tpflags %llx\n", (u_longlong_t)pflags); 1714 if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr, 1715 sizeof (uint64_t)) == 0) 1716 (void) printf("\txattr %llu\n", (u_longlong_t)xattr); 1717 if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev, 1718 sizeof (uint64_t)) == 0) 1719 (void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev); 1720 sa_handle_destroy(hdl); 1721 } 1722 1723 /*ARGSUSED*/ 1724 static void 1725 dump_acl(objset_t *os, uint64_t object, void *data, size_t size) 1726 { 1727 } 1728 1729 /*ARGSUSED*/ 1730 static void 1731 dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size) 1732 { 1733 } 1734 1735 static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = { 1736 dump_none, /* unallocated */ 1737 dump_zap, /* object directory */ 1738 dump_uint64, /* object array */ 1739 dump_none, /* packed nvlist */ 1740 dump_packed_nvlist, /* packed nvlist size */ 1741 dump_none, /* bpobj */ 1742 dump_bpobj, /* bpobj header */ 1743 dump_none, /* SPA space map header */ 1744 dump_none, /* SPA space map */ 1745 dump_none, /* ZIL intent log */ 1746 dump_dnode, /* DMU dnode */ 1747 dump_dmu_objset, /* DMU objset */ 1748 dump_dsl_dir, /* DSL directory */ 1749 dump_zap, /* DSL directory child map */ 1750 dump_zap, /* DSL dataset snap map */ 1751 dump_zap, /* DSL props */ 1752 dump_dsl_dataset, /* DSL dataset */ 1753 dump_znode, /* ZFS znode */ 1754 dump_acl, /* ZFS V0 ACL */ 1755 dump_uint8, /* ZFS plain file */ 1756 dump_zpldir, /* ZFS directory */ 1757 dump_zap, /* ZFS master node */ 1758 dump_zap, /* ZFS delete queue */ 1759 dump_uint8, /* zvol object */ 1760 dump_zap, /* zvol prop */ 1761 dump_uint8, /* other uint8[] */ 1762 dump_uint64, /* other uint64[] */ 1763 dump_zap, /* other ZAP */ 1764 dump_zap, /* persistent error log */ 1765 dump_uint8, /* SPA history */ 1766 dump_history_offsets, /* SPA history offsets */ 1767 dump_zap, /* Pool properties */ 1768 dump_zap, /* DSL permissions */ 1769 dump_acl, /* ZFS ACL */ 1770 dump_uint8, /* ZFS SYSACL */ 1771 dump_none, /* FUID nvlist */ 1772 dump_packed_nvlist, /* FUID nvlist size */ 1773 dump_zap, /* DSL dataset next clones */ 1774 dump_zap, /* DSL scrub queue */ 1775 dump_zap, /* ZFS user/group used */ 1776 dump_zap, /* ZFS user/group quota */ 1777 dump_zap, /* snapshot refcount tags */ 1778 dump_ddt_zap, /* DDT ZAP object */ 1779 dump_zap, /* DDT statistics */ 1780 dump_znode, /* SA object */ 1781 dump_zap, /* SA Master Node */ 1782 dump_sa_attrs, /* SA attribute registration */ 1783 dump_sa_layouts, /* SA attribute layouts */ 1784 dump_zap, /* DSL scrub translations */ 1785 dump_none, /* fake dedup BP */ 1786 dump_zap, /* deadlist */ 1787 dump_none, /* deadlist hdr */ 1788 dump_zap, /* dsl clones */ 1789 dump_bpobj_subobjs, /* bpobj subobjs */ 1790 dump_unknown, /* Unknown type, must be last */ 1791 }; 1792 1793 static void 1794 dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header) 1795 { 1796 dmu_buf_t *db = NULL; 1797 dmu_object_info_t doi; 1798 dnode_t *dn; 1799 void *bonus = NULL; 1800 size_t bsize = 0; 1801 char iblk[32], dblk[32], lsize[32], asize[32], fill[32]; 1802 char bonus_size[32]; 1803 char aux[50]; 1804 int error; 1805 1806 if (*print_header) { 1807 (void) printf("\n%10s %3s %5s %5s %5s %5s %6s %s\n", 1808 "Object", "lvl", "iblk", "dblk", "dsize", "lsize", 1809 "%full", "type"); 1810 *print_header = 0; 1811 } 1812 1813 if (object == 0) { 1814 dn = DMU_META_DNODE(os); 1815 } else { 1816 error = dmu_bonus_hold(os, object, FTAG, &db); 1817 if (error) 1818 fatal("dmu_bonus_hold(%llu) failed, errno %u", 1819 object, error); 1820 bonus = db->db_data; 1821 bsize = db->db_size; 1822 dn = DB_DNODE((dmu_buf_impl_t *)db); 1823 } 1824 dmu_object_info_from_dnode(dn, &doi); 1825 1826 zdb_nicenum(doi.doi_metadata_block_size, iblk); 1827 zdb_nicenum(doi.doi_data_block_size, dblk); 1828 zdb_nicenum(doi.doi_max_offset, lsize); 1829 zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize); 1830 zdb_nicenum(doi.doi_bonus_size, bonus_size); 1831 (void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count * 1832 doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) / 1833 doi.doi_max_offset); 1834 1835 aux[0] = '\0'; 1836 1837 if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) { 1838 (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)", 1839 ZDB_CHECKSUM_NAME(doi.doi_checksum)); 1840 } 1841 1842 if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) { 1843 (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)", 1844 ZDB_COMPRESS_NAME(doi.doi_compress)); 1845 } 1846 1847 (void) printf("%10lld %3u %5s %5s %5s %5s %6s %s%s\n", 1848 (u_longlong_t)object, doi.doi_indirection, iblk, dblk, 1849 asize, lsize, fill, ZDB_OT_NAME(doi.doi_type), aux); 1850 1851 if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) { 1852 (void) printf("%10s %3s %5s %5s %5s %5s %6s %s\n", 1853 "", "", "", "", "", bonus_size, "bonus", 1854 ZDB_OT_NAME(doi.doi_bonus_type)); 1855 } 1856 1857 if (verbosity >= 4) { 1858 (void) printf("\tdnode flags: %s%s%s\n", 1859 (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ? 1860 "USED_BYTES " : "", 1861 (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ? 1862 "USERUSED_ACCOUNTED " : "", 1863 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? 1864 "SPILL_BLKPTR" : ""); 1865 (void) printf("\tdnode maxblkid: %llu\n", 1866 (longlong_t)dn->dn_phys->dn_maxblkid); 1867 1868 object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os, object, 1869 bonus, bsize); 1870 object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object, NULL, 0); 1871 *print_header = 1; 1872 } 1873 1874 if (verbosity >= 5) 1875 dump_indirect(dn); 1876 1877 if (verbosity >= 5) { 1878 /* 1879 * Report the list of segments that comprise the object. 1880 */ 1881 uint64_t start = 0; 1882 uint64_t end; 1883 uint64_t blkfill = 1; 1884 int minlvl = 1; 1885 1886 if (dn->dn_type == DMU_OT_DNODE) { 1887 minlvl = 0; 1888 blkfill = DNODES_PER_BLOCK; 1889 } 1890 1891 for (;;) { 1892 char segsize[32]; 1893 error = dnode_next_offset(dn, 1894 0, &start, minlvl, blkfill, 0); 1895 if (error) 1896 break; 1897 end = start; 1898 error = dnode_next_offset(dn, 1899 DNODE_FIND_HOLE, &end, minlvl, blkfill, 0); 1900 zdb_nicenum(end - start, segsize); 1901 (void) printf("\t\tsegment [%016llx, %016llx)" 1902 " size %5s\n", (u_longlong_t)start, 1903 (u_longlong_t)end, segsize); 1904 if (error) 1905 break; 1906 start = end; 1907 } 1908 } 1909 1910 if (db != NULL) 1911 dmu_buf_rele(db, FTAG); 1912 } 1913 1914 static char *objset_types[DMU_OST_NUMTYPES] = { 1915 "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" }; 1916 1917 static void 1918 dump_dir(objset_t *os) 1919 { 1920 dmu_objset_stats_t dds; 1921 uint64_t object, object_count; 1922 uint64_t refdbytes, usedobjs, scratch; 1923 char numbuf[32]; 1924 char blkbuf[BP_SPRINTF_LEN + 20]; 1925 char osname[MAXNAMELEN]; 1926 char *type = "UNKNOWN"; 1927 int verbosity = dump_opt['d']; 1928 int print_header = 1; 1929 int i, error; 1930 1931 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 1932 dmu_objset_fast_stat(os, &dds); 1933 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 1934 1935 if (dds.dds_type < DMU_OST_NUMTYPES) 1936 type = objset_types[dds.dds_type]; 1937 1938 if (dds.dds_type == DMU_OST_META) { 1939 dds.dds_creation_txg = TXG_INITIAL; 1940 usedobjs = BP_GET_FILL(os->os_rootbp); 1941 refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)-> 1942 dd_used_bytes; 1943 } else { 1944 dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch); 1945 } 1946 1947 ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp)); 1948 1949 zdb_nicenum(refdbytes, numbuf); 1950 1951 if (verbosity >= 4) { 1952 (void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp "); 1953 (void) snprintf_blkptr(blkbuf + strlen(blkbuf), 1954 sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp); 1955 } else { 1956 blkbuf[0] = '\0'; 1957 } 1958 1959 dmu_objset_name(os, osname); 1960 1961 (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, " 1962 "%s, %llu objects%s\n", 1963 osname, type, (u_longlong_t)dmu_objset_id(os), 1964 (u_longlong_t)dds.dds_creation_txg, 1965 numbuf, (u_longlong_t)usedobjs, blkbuf); 1966 1967 if (zopt_objects != 0) { 1968 for (i = 0; i < zopt_objects; i++) 1969 dump_object(os, zopt_object[i], verbosity, 1970 &print_header); 1971 (void) printf("\n"); 1972 return; 1973 } 1974 1975 if (dump_opt['i'] != 0 || verbosity >= 2) 1976 dump_intent_log(dmu_objset_zil(os)); 1977 1978 if (dmu_objset_ds(os) != NULL) 1979 dump_deadlist(&dmu_objset_ds(os)->ds_deadlist); 1980 1981 if (verbosity < 2) 1982 return; 1983 1984 if (BP_IS_HOLE(os->os_rootbp)) 1985 return; 1986 1987 dump_object(os, 0, verbosity, &print_header); 1988 object_count = 0; 1989 if (DMU_USERUSED_DNODE(os) != NULL && 1990 DMU_USERUSED_DNODE(os)->dn_type != 0) { 1991 dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header); 1992 dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header); 1993 } 1994 1995 object = 0; 1996 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { 1997 dump_object(os, object, verbosity, &print_header); 1998 object_count++; 1999 } 2000 2001 ASSERT3U(object_count, ==, usedobjs); 2002 2003 (void) printf("\n"); 2004 2005 if (error != ESRCH) { 2006 (void) fprintf(stderr, "dmu_object_next() = %d\n", error); 2007 abort(); 2008 } 2009 } 2010 2011 static void 2012 dump_uberblock(uberblock_t *ub, const char *header, const char *footer) 2013 { 2014 time_t timestamp = ub->ub_timestamp; 2015 2016 (void) printf(header ? header : ""); 2017 (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic); 2018 (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version); 2019 (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg); 2020 (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum); 2021 (void) printf("\ttimestamp = %llu UTC = %s", 2022 (u_longlong_t)ub->ub_timestamp, asctime(localtime(×tamp))); 2023 if (dump_opt['u'] >= 3) { 2024 char blkbuf[BP_SPRINTF_LEN]; 2025 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp); 2026 (void) printf("\trootbp = %s\n", blkbuf); 2027 } 2028 (void) printf(footer ? footer : ""); 2029 } 2030 2031 static void 2032 dump_config(spa_t *spa) 2033 { 2034 dmu_buf_t *db; 2035 size_t nvsize = 0; 2036 int error = 0; 2037 2038 2039 error = dmu_bonus_hold(spa->spa_meta_objset, 2040 spa->spa_config_object, FTAG, &db); 2041 2042 if (error == 0) { 2043 nvsize = *(uint64_t *)db->db_data; 2044 dmu_buf_rele(db, FTAG); 2045 2046 (void) printf("\nMOS Configuration:\n"); 2047 dump_packed_nvlist(spa->spa_meta_objset, 2048 spa->spa_config_object, (void *)&nvsize, 1); 2049 } else { 2050 (void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d", 2051 (u_longlong_t)spa->spa_config_object, error); 2052 } 2053 } 2054 2055 static void 2056 dump_cachefile(const char *cachefile) 2057 { 2058 int fd; 2059 struct stat64 statbuf; 2060 char *buf; 2061 nvlist_t *config; 2062 2063 if ((fd = open64(cachefile, O_RDONLY)) < 0) { 2064 (void) printf("cannot open '%s': %s\n", cachefile, 2065 strerror(errno)); 2066 exit(1); 2067 } 2068 2069 if (fstat64(fd, &statbuf) != 0) { 2070 (void) printf("failed to stat '%s': %s\n", cachefile, 2071 strerror(errno)); 2072 exit(1); 2073 } 2074 2075 if ((buf = malloc(statbuf.st_size)) == NULL) { 2076 (void) fprintf(stderr, "failed to allocate %llu bytes\n", 2077 (u_longlong_t)statbuf.st_size); 2078 exit(1); 2079 } 2080 2081 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { 2082 (void) fprintf(stderr, "failed to read %llu bytes\n", 2083 (u_longlong_t)statbuf.st_size); 2084 exit(1); 2085 } 2086 2087 (void) close(fd); 2088 2089 if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) { 2090 (void) fprintf(stderr, "failed to unpack nvlist\n"); 2091 exit(1); 2092 } 2093 2094 free(buf); 2095 2096 dump_nvlist(config, 0); 2097 2098 nvlist_free(config); 2099 } 2100 2101 #define ZDB_MAX_UB_HEADER_SIZE 32 2102 2103 static void 2104 dump_label_uberblocks(vdev_label_t *lbl, uint64_t ashift) 2105 { 2106 vdev_t vd; 2107 vdev_t *vdp = &vd; 2108 char header[ZDB_MAX_UB_HEADER_SIZE]; 2109 2110 vd.vdev_ashift = ashift; 2111 vdp->vdev_top = vdp; 2112 2113 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(vdp); i++) { 2114 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(vdp, i); 2115 uberblock_t *ub = (void *)((char *)lbl + uoff); 2116 2117 if (uberblock_verify(ub)) 2118 continue; 2119 (void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE, 2120 "Uberblock[%d]\n", i); 2121 dump_uberblock(ub, header, ""); 2122 } 2123 } 2124 2125 static void 2126 dump_label(const char *dev) 2127 { 2128 int fd; 2129 vdev_label_t label; 2130 char *path, *buf = label.vl_vdev_phys.vp_nvlist; 2131 size_t buflen = sizeof (label.vl_vdev_phys.vp_nvlist); 2132 struct stat64 statbuf; 2133 uint64_t psize, ashift; 2134 int len = strlen(dev) + 1; 2135 2136 if (strncmp(dev, "/dev/dsk/", 9) == 0) { 2137 len++; 2138 path = malloc(len); 2139 (void) snprintf(path, len, "%s%s", "/dev/rdsk/", dev + 9); 2140 } else { 2141 path = strdup(dev); 2142 } 2143 2144 if ((fd = open64(path, O_RDONLY)) < 0) { 2145 (void) printf("cannot open '%s': %s\n", path, strerror(errno)); 2146 free(path); 2147 exit(1); 2148 } 2149 2150 if (fstat64(fd, &statbuf) != 0) { 2151 (void) printf("failed to stat '%s': %s\n", path, 2152 strerror(errno)); 2153 free(path); 2154 (void) close(fd); 2155 exit(1); 2156 } 2157 2158 if (S_ISBLK(statbuf.st_mode)) { 2159 (void) printf("cannot use '%s': character device required\n", 2160 path); 2161 free(path); 2162 (void) close(fd); 2163 exit(1); 2164 } 2165 2166 psize = statbuf.st_size; 2167 psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t)); 2168 2169 for (int l = 0; l < VDEV_LABELS; l++) { 2170 nvlist_t *config = NULL; 2171 2172 (void) printf("--------------------------------------------\n"); 2173 (void) printf("LABEL %d\n", l); 2174 (void) printf("--------------------------------------------\n"); 2175 2176 if (pread64(fd, &label, sizeof (label), 2177 vdev_label_offset(psize, l, 0)) != sizeof (label)) { 2178 (void) printf("failed to read label %d\n", l); 2179 continue; 2180 } 2181 2182 if (nvlist_unpack(buf, buflen, &config, 0) != 0) { 2183 (void) printf("failed to unpack label %d\n", l); 2184 ashift = SPA_MINBLOCKSHIFT; 2185 } else { 2186 nvlist_t *vdev_tree = NULL; 2187 2188 dump_nvlist(config, 4); 2189 if ((nvlist_lookup_nvlist(config, 2190 ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) || 2191 (nvlist_lookup_uint64(vdev_tree, 2192 ZPOOL_CONFIG_ASHIFT, &ashift) != 0)) 2193 ashift = SPA_MINBLOCKSHIFT; 2194 nvlist_free(config); 2195 } 2196 if (dump_opt['u']) 2197 dump_label_uberblocks(&label, ashift); 2198 } 2199 2200 free(path); 2201 (void) close(fd); 2202 } 2203 2204 static uint64_t dataset_feature_count[SPA_FEATURES]; 2205 2206 /*ARGSUSED*/ 2207 static int 2208 dump_one_dir(const char *dsname, void *arg) 2209 { 2210 int error; 2211 objset_t *os; 2212 2213 error = dmu_objset_own(dsname, DMU_OST_ANY, B_TRUE, FTAG, &os); 2214 if (error) { 2215 (void) printf("Could not open %s, error %d\n", dsname, error); 2216 return (0); 2217 } 2218 2219 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 2220 if (!dmu_objset_ds(os)->ds_feature_inuse[f]) 2221 continue; 2222 ASSERT(spa_feature_table[f].fi_flags & 2223 ZFEATURE_FLAG_PER_DATASET); 2224 dataset_feature_count[f]++; 2225 } 2226 2227 dump_dir(os); 2228 dmu_objset_disown(os, FTAG); 2229 fuid_table_destroy(); 2230 sa_loaded = B_FALSE; 2231 return (0); 2232 } 2233 2234 /* 2235 * Block statistics. 2236 */ 2237 #define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2) 2238 typedef struct zdb_blkstats { 2239 uint64_t zb_asize; 2240 uint64_t zb_lsize; 2241 uint64_t zb_psize; 2242 uint64_t zb_count; 2243 uint64_t zb_gangs; 2244 uint64_t zb_ditto_samevdev; 2245 uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE]; 2246 } zdb_blkstats_t; 2247 2248 /* 2249 * Extended object types to report deferred frees and dedup auto-ditto blocks. 2250 */ 2251 #define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0) 2252 #define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1) 2253 #define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2) 2254 #define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3) 2255 2256 static char *zdb_ot_extname[] = { 2257 "deferred free", 2258 "dedup ditto", 2259 "other", 2260 "Total", 2261 }; 2262 2263 #define ZB_TOTAL DN_MAX_LEVELS 2264 2265 typedef struct zdb_cb { 2266 zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1]; 2267 uint64_t zcb_dedup_asize; 2268 uint64_t zcb_dedup_blocks; 2269 uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES]; 2270 uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES] 2271 [BPE_PAYLOAD_SIZE]; 2272 uint64_t zcb_start; 2273 uint64_t zcb_lastprint; 2274 uint64_t zcb_totalasize; 2275 uint64_t zcb_errors[256]; 2276 int zcb_readfails; 2277 int zcb_haderrors; 2278 spa_t *zcb_spa; 2279 } zdb_cb_t; 2280 2281 static void 2282 zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp, 2283 dmu_object_type_t type) 2284 { 2285 uint64_t refcnt = 0; 2286 2287 ASSERT(type < ZDB_OT_TOTAL); 2288 2289 if (zilog && zil_bp_tree_add(zilog, bp) != 0) 2290 return; 2291 2292 for (int i = 0; i < 4; i++) { 2293 int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL; 2294 int t = (i & 1) ? type : ZDB_OT_TOTAL; 2295 int equal; 2296 zdb_blkstats_t *zb = &zcb->zcb_type[l][t]; 2297 2298 zb->zb_asize += BP_GET_ASIZE(bp); 2299 zb->zb_lsize += BP_GET_LSIZE(bp); 2300 zb->zb_psize += BP_GET_PSIZE(bp); 2301 zb->zb_count++; 2302 2303 /* 2304 * The histogram is only big enough to record blocks up to 2305 * SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last, 2306 * "other", bucket. 2307 */ 2308 int idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT; 2309 idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1); 2310 zb->zb_psize_histogram[idx]++; 2311 2312 zb->zb_gangs += BP_COUNT_GANG(bp); 2313 2314 switch (BP_GET_NDVAS(bp)) { 2315 case 2: 2316 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 2317 DVA_GET_VDEV(&bp->blk_dva[1])) 2318 zb->zb_ditto_samevdev++; 2319 break; 2320 case 3: 2321 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 2322 DVA_GET_VDEV(&bp->blk_dva[1])) + 2323 (DVA_GET_VDEV(&bp->blk_dva[0]) == 2324 DVA_GET_VDEV(&bp->blk_dva[2])) + 2325 (DVA_GET_VDEV(&bp->blk_dva[1]) == 2326 DVA_GET_VDEV(&bp->blk_dva[2])); 2327 if (equal != 0) 2328 zb->zb_ditto_samevdev++; 2329 break; 2330 } 2331 2332 } 2333 2334 if (BP_IS_EMBEDDED(bp)) { 2335 zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++; 2336 zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)] 2337 [BPE_GET_PSIZE(bp)]++; 2338 return; 2339 } 2340 2341 if (dump_opt['L']) 2342 return; 2343 2344 if (BP_GET_DEDUP(bp)) { 2345 ddt_t *ddt; 2346 ddt_entry_t *dde; 2347 2348 ddt = ddt_select(zcb->zcb_spa, bp); 2349 ddt_enter(ddt); 2350 dde = ddt_lookup(ddt, bp, B_FALSE); 2351 2352 if (dde == NULL) { 2353 refcnt = 0; 2354 } else { 2355 ddt_phys_t *ddp = ddt_phys_select(dde, bp); 2356 ddt_phys_decref(ddp); 2357 refcnt = ddp->ddp_refcnt; 2358 if (ddt_phys_total_refcnt(dde) == 0) 2359 ddt_remove(ddt, dde); 2360 } 2361 ddt_exit(ddt); 2362 } 2363 2364 VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa, 2365 refcnt ? 0 : spa_first_txg(zcb->zcb_spa), 2366 bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0); 2367 } 2368 2369 static void 2370 zdb_blkptr_done(zio_t *zio) 2371 { 2372 spa_t *spa = zio->io_spa; 2373 blkptr_t *bp = zio->io_bp; 2374 int ioerr = zio->io_error; 2375 zdb_cb_t *zcb = zio->io_private; 2376 zbookmark_phys_t *zb = &zio->io_bookmark; 2377 2378 zio_data_buf_free(zio->io_data, zio->io_size); 2379 2380 mutex_enter(&spa->spa_scrub_lock); 2381 spa->spa_scrub_inflight--; 2382 cv_broadcast(&spa->spa_scrub_io_cv); 2383 2384 if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2385 char blkbuf[BP_SPRINTF_LEN]; 2386 2387 zcb->zcb_haderrors = 1; 2388 zcb->zcb_errors[ioerr]++; 2389 2390 if (dump_opt['b'] >= 2) 2391 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2392 else 2393 blkbuf[0] = '\0'; 2394 2395 (void) printf("zdb_blkptr_cb: " 2396 "Got error %d reading " 2397 "<%llu, %llu, %lld, %llx> %s -- skipping\n", 2398 ioerr, 2399 (u_longlong_t)zb->zb_objset, 2400 (u_longlong_t)zb->zb_object, 2401 (u_longlong_t)zb->zb_level, 2402 (u_longlong_t)zb->zb_blkid, 2403 blkbuf); 2404 } 2405 mutex_exit(&spa->spa_scrub_lock); 2406 } 2407 2408 static int 2409 zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2410 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2411 { 2412 zdb_cb_t *zcb = arg; 2413 dmu_object_type_t type; 2414 boolean_t is_metadata; 2415 2416 if (bp == NULL) 2417 return (0); 2418 2419 if (dump_opt['b'] >= 5 && bp->blk_birth > 0) { 2420 char blkbuf[BP_SPRINTF_LEN]; 2421 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2422 (void) printf("objset %llu object %llu " 2423 "level %lld offset 0x%llx %s\n", 2424 (u_longlong_t)zb->zb_objset, 2425 (u_longlong_t)zb->zb_object, 2426 (longlong_t)zb->zb_level, 2427 (u_longlong_t)blkid2offset(dnp, bp, zb), 2428 blkbuf); 2429 } 2430 2431 if (BP_IS_HOLE(bp)) 2432 return (0); 2433 2434 type = BP_GET_TYPE(bp); 2435 2436 zdb_count_block(zcb, zilog, bp, 2437 (type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type); 2438 2439 is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)); 2440 2441 if (!BP_IS_EMBEDDED(bp) && 2442 (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) { 2443 size_t size = BP_GET_PSIZE(bp); 2444 void *data = zio_data_buf_alloc(size); 2445 int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW; 2446 2447 /* If it's an intent log block, failure is expected. */ 2448 if (zb->zb_level == ZB_ZIL_LEVEL) 2449 flags |= ZIO_FLAG_SPECULATIVE; 2450 2451 mutex_enter(&spa->spa_scrub_lock); 2452 while (spa->spa_scrub_inflight > max_inflight) 2453 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2454 spa->spa_scrub_inflight++; 2455 mutex_exit(&spa->spa_scrub_lock); 2456 2457 zio_nowait(zio_read(NULL, spa, bp, data, size, 2458 zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb)); 2459 } 2460 2461 zcb->zcb_readfails = 0; 2462 2463 /* only call gethrtime() every 100 blocks */ 2464 static int iters; 2465 if (++iters > 100) 2466 iters = 0; 2467 else 2468 return (0); 2469 2470 if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) { 2471 uint64_t now = gethrtime(); 2472 char buf[10]; 2473 uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize; 2474 int kb_per_sec = 2475 1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000)); 2476 int sec_remaining = 2477 (zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec; 2478 2479 zfs_nicenum(bytes, buf, sizeof (buf)); 2480 (void) fprintf(stderr, 2481 "\r%5s completed (%4dMB/s) " 2482 "estimated time remaining: %uhr %02umin %02usec ", 2483 buf, kb_per_sec / 1024, 2484 sec_remaining / 60 / 60, 2485 sec_remaining / 60 % 60, 2486 sec_remaining % 60); 2487 2488 zcb->zcb_lastprint = now; 2489 } 2490 2491 return (0); 2492 } 2493 2494 static void 2495 zdb_leak(void *arg, uint64_t start, uint64_t size) 2496 { 2497 vdev_t *vd = arg; 2498 2499 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n", 2500 (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size); 2501 } 2502 2503 static metaslab_ops_t zdb_metaslab_ops = { 2504 NULL /* alloc */ 2505 }; 2506 2507 static void 2508 zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb) 2509 { 2510 ddt_bookmark_t ddb = { 0 }; 2511 ddt_entry_t dde; 2512 int error; 2513 2514 while ((error = ddt_walk(spa, &ddb, &dde)) == 0) { 2515 blkptr_t blk; 2516 ddt_phys_t *ddp = dde.dde_phys; 2517 2518 if (ddb.ddb_class == DDT_CLASS_UNIQUE) 2519 return; 2520 2521 ASSERT(ddt_phys_total_refcnt(&dde) > 1); 2522 2523 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2524 if (ddp->ddp_phys_birth == 0) 2525 continue; 2526 ddt_bp_create(ddb.ddb_checksum, 2527 &dde.dde_key, ddp, &blk); 2528 if (p == DDT_PHYS_DITTO) { 2529 zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO); 2530 } else { 2531 zcb->zcb_dedup_asize += 2532 BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1); 2533 zcb->zcb_dedup_blocks++; 2534 } 2535 } 2536 if (!dump_opt['L']) { 2537 ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum]; 2538 ddt_enter(ddt); 2539 VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL); 2540 ddt_exit(ddt); 2541 } 2542 } 2543 2544 ASSERT(error == ENOENT); 2545 } 2546 2547 static void 2548 zdb_leak_init(spa_t *spa, zdb_cb_t *zcb) 2549 { 2550 zcb->zcb_spa = spa; 2551 2552 if (!dump_opt['L']) { 2553 vdev_t *rvd = spa->spa_root_vdev; 2554 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2555 vdev_t *vd = rvd->vdev_child[c]; 2556 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 2557 metaslab_t *msp = vd->vdev_ms[m]; 2558 mutex_enter(&msp->ms_lock); 2559 metaslab_unload(msp); 2560 2561 /* 2562 * For leak detection, we overload the metaslab 2563 * ms_tree to contain allocated segments 2564 * instead of free segments. As a result, 2565 * we can't use the normal metaslab_load/unload 2566 * interfaces. 2567 */ 2568 if (msp->ms_sm != NULL) { 2569 (void) fprintf(stderr, 2570 "\rloading space map for " 2571 "vdev %llu of %llu, " 2572 "metaslab %llu of %llu ...", 2573 (longlong_t)c, 2574 (longlong_t)rvd->vdev_children, 2575 (longlong_t)m, 2576 (longlong_t)vd->vdev_ms_count); 2577 2578 msp->ms_ops = &zdb_metaslab_ops; 2579 2580 /* 2581 * We don't want to spend the CPU 2582 * manipulating the size-ordered 2583 * tree, so clear the range_tree 2584 * ops. 2585 */ 2586 msp->ms_tree->rt_ops = NULL; 2587 VERIFY0(space_map_load(msp->ms_sm, 2588 msp->ms_tree, SM_ALLOC)); 2589 msp->ms_loaded = B_TRUE; 2590 } 2591 mutex_exit(&msp->ms_lock); 2592 } 2593 } 2594 (void) fprintf(stderr, "\n"); 2595 } 2596 2597 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 2598 2599 zdb_ddt_leak_init(spa, zcb); 2600 2601 spa_config_exit(spa, SCL_CONFIG, FTAG); 2602 } 2603 2604 static void 2605 zdb_leak_fini(spa_t *spa) 2606 { 2607 if (!dump_opt['L']) { 2608 vdev_t *rvd = spa->spa_root_vdev; 2609 for (int c = 0; c < rvd->vdev_children; c++) { 2610 vdev_t *vd = rvd->vdev_child[c]; 2611 for (int m = 0; m < vd->vdev_ms_count; m++) { 2612 metaslab_t *msp = vd->vdev_ms[m]; 2613 mutex_enter(&msp->ms_lock); 2614 2615 /* 2616 * The ms_tree has been overloaded to 2617 * contain allocated segments. Now that we 2618 * finished traversing all blocks, any 2619 * block that remains in the ms_tree 2620 * represents an allocated block that we 2621 * did not claim during the traversal. 2622 * Claimed blocks would have been removed 2623 * from the ms_tree. 2624 */ 2625 range_tree_vacate(msp->ms_tree, zdb_leak, vd); 2626 msp->ms_loaded = B_FALSE; 2627 2628 mutex_exit(&msp->ms_lock); 2629 } 2630 } 2631 } 2632 } 2633 2634 /* ARGSUSED */ 2635 static int 2636 count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2637 { 2638 zdb_cb_t *zcb = arg; 2639 2640 if (dump_opt['b'] >= 5) { 2641 char blkbuf[BP_SPRINTF_LEN]; 2642 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2643 (void) printf("[%s] %s\n", 2644 "deferred free", blkbuf); 2645 } 2646 zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED); 2647 return (0); 2648 } 2649 2650 static int 2651 dump_block_stats(spa_t *spa) 2652 { 2653 zdb_cb_t zcb = { 0 }; 2654 zdb_blkstats_t *zb, *tzb; 2655 uint64_t norm_alloc, norm_space, total_alloc, total_found; 2656 int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD; 2657 boolean_t leaks = B_FALSE; 2658 2659 (void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n", 2660 (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "", 2661 (dump_opt['c'] == 1) ? "metadata " : "", 2662 dump_opt['c'] ? "checksums " : "", 2663 (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "", 2664 !dump_opt['L'] ? "nothing leaked " : ""); 2665 2666 /* 2667 * Load all space maps as SM_ALLOC maps, then traverse the pool 2668 * claiming each block we discover. If the pool is perfectly 2669 * consistent, the space maps will be empty when we're done. 2670 * Anything left over is a leak; any block we can't claim (because 2671 * it's not part of any space map) is a double allocation, 2672 * reference to a freed block, or an unclaimed log block. 2673 */ 2674 zdb_leak_init(spa, &zcb); 2675 2676 /* 2677 * If there's a deferred-free bplist, process that first. 2678 */ 2679 (void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj, 2680 count_block_cb, &zcb, NULL); 2681 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 2682 (void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj, 2683 count_block_cb, &zcb, NULL); 2684 } 2685 if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 2686 VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset, 2687 spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb, 2688 &zcb, NULL)); 2689 } 2690 2691 if (dump_opt['c'] > 1) 2692 flags |= TRAVERSE_PREFETCH_DATA; 2693 2694 zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa)); 2695 zcb.zcb_start = zcb.zcb_lastprint = gethrtime(); 2696 zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb); 2697 2698 /* 2699 * If we've traversed the data blocks then we need to wait for those 2700 * I/Os to complete. We leverage "The Godfather" zio to wait on 2701 * all async I/Os to complete. 2702 */ 2703 if (dump_opt['c']) { 2704 for (int i = 0; i < max_ncpus; i++) { 2705 (void) zio_wait(spa->spa_async_zio_root[i]); 2706 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 2707 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2708 ZIO_FLAG_GODFATHER); 2709 } 2710 } 2711 2712 if (zcb.zcb_haderrors) { 2713 (void) printf("\nError counts:\n\n"); 2714 (void) printf("\t%5s %s\n", "errno", "count"); 2715 for (int e = 0; e < 256; e++) { 2716 if (zcb.zcb_errors[e] != 0) { 2717 (void) printf("\t%5d %llu\n", 2718 e, (u_longlong_t)zcb.zcb_errors[e]); 2719 } 2720 } 2721 } 2722 2723 /* 2724 * Report any leaked segments. 2725 */ 2726 zdb_leak_fini(spa); 2727 2728 tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL]; 2729 2730 norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 2731 norm_space = metaslab_class_get_space(spa_normal_class(spa)); 2732 2733 total_alloc = norm_alloc + metaslab_class_get_alloc(spa_log_class(spa)); 2734 total_found = tzb->zb_asize - zcb.zcb_dedup_asize; 2735 2736 if (total_found == total_alloc) { 2737 if (!dump_opt['L']) 2738 (void) printf("\n\tNo leaks (block sum matches space" 2739 " maps exactly)\n"); 2740 } else { 2741 (void) printf("block traversal size %llu != alloc %llu " 2742 "(%s %lld)\n", 2743 (u_longlong_t)total_found, 2744 (u_longlong_t)total_alloc, 2745 (dump_opt['L']) ? "unreachable" : "leaked", 2746 (longlong_t)(total_alloc - total_found)); 2747 leaks = B_TRUE; 2748 } 2749 2750 if (tzb->zb_count == 0) 2751 return (2); 2752 2753 (void) printf("\n"); 2754 (void) printf("\tbp count: %10llu\n", 2755 (u_longlong_t)tzb->zb_count); 2756 (void) printf("\tganged count: %10llu\n", 2757 (longlong_t)tzb->zb_gangs); 2758 (void) printf("\tbp logical: %10llu avg: %6llu\n", 2759 (u_longlong_t)tzb->zb_lsize, 2760 (u_longlong_t)(tzb->zb_lsize / tzb->zb_count)); 2761 (void) printf("\tbp physical: %10llu avg:" 2762 " %6llu compression: %6.2f\n", 2763 (u_longlong_t)tzb->zb_psize, 2764 (u_longlong_t)(tzb->zb_psize / tzb->zb_count), 2765 (double)tzb->zb_lsize / tzb->zb_psize); 2766 (void) printf("\tbp allocated: %10llu avg:" 2767 " %6llu compression: %6.2f\n", 2768 (u_longlong_t)tzb->zb_asize, 2769 (u_longlong_t)(tzb->zb_asize / tzb->zb_count), 2770 (double)tzb->zb_lsize / tzb->zb_asize); 2771 (void) printf("\tbp deduped: %10llu ref>1:" 2772 " %6llu deduplication: %6.2f\n", 2773 (u_longlong_t)zcb.zcb_dedup_asize, 2774 (u_longlong_t)zcb.zcb_dedup_blocks, 2775 (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0); 2776 (void) printf("\tSPA allocated: %10llu used: %5.2f%%\n", 2777 (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space); 2778 2779 for (bp_embedded_type_t i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) { 2780 if (zcb.zcb_embedded_blocks[i] == 0) 2781 continue; 2782 (void) printf("\n"); 2783 (void) printf("\tadditional, non-pointer bps of type %u: " 2784 "%10llu\n", 2785 i, (u_longlong_t)zcb.zcb_embedded_blocks[i]); 2786 2787 if (dump_opt['b'] >= 3) { 2788 (void) printf("\t number of (compressed) bytes: " 2789 "number of bps\n"); 2790 dump_histogram(zcb.zcb_embedded_histogram[i], 2791 sizeof (zcb.zcb_embedded_histogram[i]) / 2792 sizeof (zcb.zcb_embedded_histogram[i][0]), 0); 2793 } 2794 } 2795 2796 if (tzb->zb_ditto_samevdev != 0) { 2797 (void) printf("\tDittoed blocks on same vdev: %llu\n", 2798 (longlong_t)tzb->zb_ditto_samevdev); 2799 } 2800 2801 if (dump_opt['b'] >= 2) { 2802 int l, t, level; 2803 (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE" 2804 "\t avg\t comp\t%%Total\tType\n"); 2805 2806 for (t = 0; t <= ZDB_OT_TOTAL; t++) { 2807 char csize[32], lsize[32], psize[32], asize[32]; 2808 char avg[32], gang[32]; 2809 char *typename; 2810 2811 if (t < DMU_OT_NUMTYPES) 2812 typename = dmu_ot[t].ot_name; 2813 else 2814 typename = zdb_ot_extname[t - DMU_OT_NUMTYPES]; 2815 2816 if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) { 2817 (void) printf("%6s\t%5s\t%5s\t%5s" 2818 "\t%5s\t%5s\t%6s\t%s\n", 2819 "-", 2820 "-", 2821 "-", 2822 "-", 2823 "-", 2824 "-", 2825 "-", 2826 typename); 2827 continue; 2828 } 2829 2830 for (l = ZB_TOTAL - 1; l >= -1; l--) { 2831 level = (l == -1 ? ZB_TOTAL : l); 2832 zb = &zcb.zcb_type[level][t]; 2833 2834 if (zb->zb_asize == 0) 2835 continue; 2836 2837 if (dump_opt['b'] < 3 && level != ZB_TOTAL) 2838 continue; 2839 2840 if (level == 0 && zb->zb_asize == 2841 zcb.zcb_type[ZB_TOTAL][t].zb_asize) 2842 continue; 2843 2844 zdb_nicenum(zb->zb_count, csize); 2845 zdb_nicenum(zb->zb_lsize, lsize); 2846 zdb_nicenum(zb->zb_psize, psize); 2847 zdb_nicenum(zb->zb_asize, asize); 2848 zdb_nicenum(zb->zb_asize / zb->zb_count, avg); 2849 zdb_nicenum(zb->zb_gangs, gang); 2850 2851 (void) printf("%6s\t%5s\t%5s\t%5s\t%5s" 2852 "\t%5.2f\t%6.2f\t", 2853 csize, lsize, psize, asize, avg, 2854 (double)zb->zb_lsize / zb->zb_psize, 2855 100.0 * zb->zb_asize / tzb->zb_asize); 2856 2857 if (level == ZB_TOTAL) 2858 (void) printf("%s\n", typename); 2859 else 2860 (void) printf(" L%d %s\n", 2861 level, typename); 2862 2863 if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) { 2864 (void) printf("\t number of ganged " 2865 "blocks: %s\n", gang); 2866 } 2867 2868 if (dump_opt['b'] >= 4) { 2869 (void) printf("psize " 2870 "(in 512-byte sectors): " 2871 "number of blocks\n"); 2872 dump_histogram(zb->zb_psize_histogram, 2873 PSIZE_HISTO_SIZE, 0); 2874 } 2875 } 2876 } 2877 } 2878 2879 (void) printf("\n"); 2880 2881 if (leaks) 2882 return (2); 2883 2884 if (zcb.zcb_haderrors) 2885 return (3); 2886 2887 return (0); 2888 } 2889 2890 typedef struct zdb_ddt_entry { 2891 ddt_key_t zdde_key; 2892 uint64_t zdde_ref_blocks; 2893 uint64_t zdde_ref_lsize; 2894 uint64_t zdde_ref_psize; 2895 uint64_t zdde_ref_dsize; 2896 avl_node_t zdde_node; 2897 } zdb_ddt_entry_t; 2898 2899 /* ARGSUSED */ 2900 static int 2901 zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2902 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2903 { 2904 avl_tree_t *t = arg; 2905 avl_index_t where; 2906 zdb_ddt_entry_t *zdde, zdde_search; 2907 2908 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2909 return (0); 2910 2911 if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) { 2912 (void) printf("traversing objset %llu, %llu objects, " 2913 "%lu blocks so far\n", 2914 (u_longlong_t)zb->zb_objset, 2915 (u_longlong_t)BP_GET_FILL(bp), 2916 avl_numnodes(t)); 2917 } 2918 2919 if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF || 2920 BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp))) 2921 return (0); 2922 2923 ddt_key_fill(&zdde_search.zdde_key, bp); 2924 2925 zdde = avl_find(t, &zdde_search, &where); 2926 2927 if (zdde == NULL) { 2928 zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL); 2929 zdde->zdde_key = zdde_search.zdde_key; 2930 avl_insert(t, zdde, where); 2931 } 2932 2933 zdde->zdde_ref_blocks += 1; 2934 zdde->zdde_ref_lsize += BP_GET_LSIZE(bp); 2935 zdde->zdde_ref_psize += BP_GET_PSIZE(bp); 2936 zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp); 2937 2938 return (0); 2939 } 2940 2941 static void 2942 dump_simulated_ddt(spa_t *spa) 2943 { 2944 avl_tree_t t; 2945 void *cookie = NULL; 2946 zdb_ddt_entry_t *zdde; 2947 ddt_histogram_t ddh_total = { 0 }; 2948 ddt_stat_t dds_total = { 0 }; 2949 2950 avl_create(&t, ddt_entry_compare, 2951 sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node)); 2952 2953 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 2954 2955 (void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 2956 zdb_ddt_add_cb, &t); 2957 2958 spa_config_exit(spa, SCL_CONFIG, FTAG); 2959 2960 while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) { 2961 ddt_stat_t dds; 2962 uint64_t refcnt = zdde->zdde_ref_blocks; 2963 ASSERT(refcnt != 0); 2964 2965 dds.dds_blocks = zdde->zdde_ref_blocks / refcnt; 2966 dds.dds_lsize = zdde->zdde_ref_lsize / refcnt; 2967 dds.dds_psize = zdde->zdde_ref_psize / refcnt; 2968 dds.dds_dsize = zdde->zdde_ref_dsize / refcnt; 2969 2970 dds.dds_ref_blocks = zdde->zdde_ref_blocks; 2971 dds.dds_ref_lsize = zdde->zdde_ref_lsize; 2972 dds.dds_ref_psize = zdde->zdde_ref_psize; 2973 dds.dds_ref_dsize = zdde->zdde_ref_dsize; 2974 2975 ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1], 2976 &dds, 0); 2977 2978 umem_free(zdde, sizeof (*zdde)); 2979 } 2980 2981 avl_destroy(&t); 2982 2983 ddt_histogram_stat(&dds_total, &ddh_total); 2984 2985 (void) printf("Simulated DDT histogram:\n"); 2986 2987 zpool_dump_ddt(&dds_total, &ddh_total); 2988 2989 dump_dedup_ratio(&dds_total); 2990 } 2991 2992 static void 2993 dump_zpool(spa_t *spa) 2994 { 2995 dsl_pool_t *dp = spa_get_dsl(spa); 2996 int rc = 0; 2997 2998 if (dump_opt['S']) { 2999 dump_simulated_ddt(spa); 3000 return; 3001 } 3002 3003 if (!dump_opt['e'] && dump_opt['C'] > 1) { 3004 (void) printf("\nCached configuration:\n"); 3005 dump_nvlist(spa->spa_config, 8); 3006 } 3007 3008 if (dump_opt['C']) 3009 dump_config(spa); 3010 3011 if (dump_opt['u']) 3012 dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n"); 3013 3014 if (dump_opt['D']) 3015 dump_all_ddts(spa); 3016 3017 if (dump_opt['d'] > 2 || dump_opt['m']) 3018 dump_metaslabs(spa); 3019 if (dump_opt['M']) 3020 dump_metaslab_groups(spa); 3021 3022 if (dump_opt['d'] || dump_opt['i']) { 3023 dump_dir(dp->dp_meta_objset); 3024 if (dump_opt['d'] >= 3) { 3025 dump_full_bpobj(&spa->spa_deferred_bpobj, 3026 "Deferred frees", 0); 3027 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 3028 dump_full_bpobj( 3029 &spa->spa_dsl_pool->dp_free_bpobj, 3030 "Pool snapshot frees", 0); 3031 } 3032 3033 if (spa_feature_is_active(spa, 3034 SPA_FEATURE_ASYNC_DESTROY)) { 3035 dump_bptree(spa->spa_meta_objset, 3036 spa->spa_dsl_pool->dp_bptree_obj, 3037 "Pool dataset frees"); 3038 } 3039 dump_dtl(spa->spa_root_vdev, 0); 3040 } 3041 (void) dmu_objset_find(spa_name(spa), dump_one_dir, 3042 NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 3043 3044 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 3045 uint64_t refcount; 3046 3047 if (!(spa_feature_table[f].fi_flags & 3048 ZFEATURE_FLAG_PER_DATASET)) { 3049 ASSERT0(dataset_feature_count[f]); 3050 continue; 3051 } 3052 (void) feature_get_refcount(spa, 3053 &spa_feature_table[f], &refcount); 3054 if (dataset_feature_count[f] != refcount) { 3055 (void) printf("%s feature refcount mismatch: " 3056 "%lld datasets != %lld refcount\n", 3057 spa_feature_table[f].fi_uname, 3058 (longlong_t)dataset_feature_count[f], 3059 (longlong_t)refcount); 3060 rc = 2; 3061 } else { 3062 (void) printf("Verified %s feature refcount " 3063 "of %llu is correct\n", 3064 spa_feature_table[f].fi_uname, 3065 (longlong_t)refcount); 3066 } 3067 } 3068 } 3069 if (rc == 0 && (dump_opt['b'] || dump_opt['c'])) 3070 rc = dump_block_stats(spa); 3071 3072 if (rc == 0) 3073 rc = verify_spacemap_refcounts(spa); 3074 3075 if (dump_opt['s']) 3076 show_pool_stats(spa); 3077 3078 if (dump_opt['h']) 3079 dump_history(spa); 3080 3081 if (rc != 0) 3082 exit(rc); 3083 } 3084 3085 #define ZDB_FLAG_CHECKSUM 0x0001 3086 #define ZDB_FLAG_DECOMPRESS 0x0002 3087 #define ZDB_FLAG_BSWAP 0x0004 3088 #define ZDB_FLAG_GBH 0x0008 3089 #define ZDB_FLAG_INDIRECT 0x0010 3090 #define ZDB_FLAG_PHYS 0x0020 3091 #define ZDB_FLAG_RAW 0x0040 3092 #define ZDB_FLAG_PRINT_BLKPTR 0x0080 3093 3094 int flagbits[256]; 3095 3096 static void 3097 zdb_print_blkptr(blkptr_t *bp, int flags) 3098 { 3099 char blkbuf[BP_SPRINTF_LEN]; 3100 3101 if (flags & ZDB_FLAG_BSWAP) 3102 byteswap_uint64_array((void *)bp, sizeof (blkptr_t)); 3103 3104 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 3105 (void) printf("%s\n", blkbuf); 3106 } 3107 3108 static void 3109 zdb_dump_indirect(blkptr_t *bp, int nbps, int flags) 3110 { 3111 int i; 3112 3113 for (i = 0; i < nbps; i++) 3114 zdb_print_blkptr(&bp[i], flags); 3115 } 3116 3117 static void 3118 zdb_dump_gbh(void *buf, int flags) 3119 { 3120 zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags); 3121 } 3122 3123 static void 3124 zdb_dump_block_raw(void *buf, uint64_t size, int flags) 3125 { 3126 if (flags & ZDB_FLAG_BSWAP) 3127 byteswap_uint64_array(buf, size); 3128 (void) write(1, buf, size); 3129 } 3130 3131 static void 3132 zdb_dump_block(char *label, void *buf, uint64_t size, int flags) 3133 { 3134 uint64_t *d = (uint64_t *)buf; 3135 int nwords = size / sizeof (uint64_t); 3136 int do_bswap = !!(flags & ZDB_FLAG_BSWAP); 3137 int i, j; 3138 char *hdr, *c; 3139 3140 3141 if (do_bswap) 3142 hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8"; 3143 else 3144 hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f"; 3145 3146 (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr); 3147 3148 for (i = 0; i < nwords; i += 2) { 3149 (void) printf("%06llx: %016llx %016llx ", 3150 (u_longlong_t)(i * sizeof (uint64_t)), 3151 (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]), 3152 (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1])); 3153 3154 c = (char *)&d[i]; 3155 for (j = 0; j < 2 * sizeof (uint64_t); j++) 3156 (void) printf("%c", isprint(c[j]) ? c[j] : '.'); 3157 (void) printf("\n"); 3158 } 3159 } 3160 3161 /* 3162 * There are two acceptable formats: 3163 * leaf_name - For example: c1t0d0 or /tmp/ztest.0a 3164 * child[.child]* - For example: 0.1.1 3165 * 3166 * The second form can be used to specify arbitrary vdevs anywhere 3167 * in the heirarchy. For example, in a pool with a mirror of 3168 * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 . 3169 */ 3170 static vdev_t * 3171 zdb_vdev_lookup(vdev_t *vdev, char *path) 3172 { 3173 char *s, *p, *q; 3174 int i; 3175 3176 if (vdev == NULL) 3177 return (NULL); 3178 3179 /* First, assume the x.x.x.x format */ 3180 i = (int)strtoul(path, &s, 10); 3181 if (s == path || (s && *s != '.' && *s != '\0')) 3182 goto name; 3183 if (i < 0 || i >= vdev->vdev_children) 3184 return (NULL); 3185 3186 vdev = vdev->vdev_child[i]; 3187 if (*s == '\0') 3188 return (vdev); 3189 return (zdb_vdev_lookup(vdev, s+1)); 3190 3191 name: 3192 for (i = 0; i < vdev->vdev_children; i++) { 3193 vdev_t *vc = vdev->vdev_child[i]; 3194 3195 if (vc->vdev_path == NULL) { 3196 vc = zdb_vdev_lookup(vc, path); 3197 if (vc == NULL) 3198 continue; 3199 else 3200 return (vc); 3201 } 3202 3203 p = strrchr(vc->vdev_path, '/'); 3204 p = p ? p + 1 : vc->vdev_path; 3205 q = &vc->vdev_path[strlen(vc->vdev_path) - 2]; 3206 3207 if (strcmp(vc->vdev_path, path) == 0) 3208 return (vc); 3209 if (strcmp(p, path) == 0) 3210 return (vc); 3211 if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0) 3212 return (vc); 3213 } 3214 3215 return (NULL); 3216 } 3217 3218 /* 3219 * Read a block from a pool and print it out. The syntax of the 3220 * block descriptor is: 3221 * 3222 * pool:vdev_specifier:offset:size[:flags] 3223 * 3224 * pool - The name of the pool you wish to read from 3225 * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup) 3226 * offset - offset, in hex, in bytes 3227 * size - Amount of data to read, in hex, in bytes 3228 * flags - A string of characters specifying options 3229 * b: Decode a blkptr at given offset within block 3230 * *c: Calculate and display checksums 3231 * d: Decompress data before dumping 3232 * e: Byteswap data before dumping 3233 * g: Display data as a gang block header 3234 * i: Display as an indirect block 3235 * p: Do I/O to physical offset 3236 * r: Dump raw data to stdout 3237 * 3238 * * = not yet implemented 3239 */ 3240 static void 3241 zdb_read_block(char *thing, spa_t *spa) 3242 { 3243 blkptr_t blk, *bp = &blk; 3244 dva_t *dva = bp->blk_dva; 3245 int flags = 0; 3246 uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0; 3247 zio_t *zio; 3248 vdev_t *vd; 3249 void *pbuf, *lbuf, *buf; 3250 char *s, *p, *dup, *vdev, *flagstr; 3251 int i, error; 3252 3253 dup = strdup(thing); 3254 s = strtok(dup, ":"); 3255 vdev = s ? s : ""; 3256 s = strtok(NULL, ":"); 3257 offset = strtoull(s ? s : "", NULL, 16); 3258 s = strtok(NULL, ":"); 3259 size = strtoull(s ? s : "", NULL, 16); 3260 s = strtok(NULL, ":"); 3261 flagstr = s ? s : ""; 3262 3263 s = NULL; 3264 if (size == 0) 3265 s = "size must not be zero"; 3266 if (!IS_P2ALIGNED(size, DEV_BSIZE)) 3267 s = "size must be a multiple of sector size"; 3268 if (!IS_P2ALIGNED(offset, DEV_BSIZE)) 3269 s = "offset must be a multiple of sector size"; 3270 if (s) { 3271 (void) printf("Invalid block specifier: %s - %s\n", thing, s); 3272 free(dup); 3273 return; 3274 } 3275 3276 for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) { 3277 for (i = 0; flagstr[i]; i++) { 3278 int bit = flagbits[(uchar_t)flagstr[i]]; 3279 3280 if (bit == 0) { 3281 (void) printf("***Invalid flag: %c\n", 3282 flagstr[i]); 3283 continue; 3284 } 3285 flags |= bit; 3286 3287 /* If it's not something with an argument, keep going */ 3288 if ((bit & (ZDB_FLAG_CHECKSUM | 3289 ZDB_FLAG_PRINT_BLKPTR)) == 0) 3290 continue; 3291 3292 p = &flagstr[i + 1]; 3293 if (bit == ZDB_FLAG_PRINT_BLKPTR) 3294 blkptr_offset = strtoull(p, &p, 16); 3295 if (*p != ':' && *p != '\0') { 3296 (void) printf("***Invalid flag arg: '%s'\n", s); 3297 free(dup); 3298 return; 3299 } 3300 } 3301 } 3302 3303 vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev); 3304 if (vd == NULL) { 3305 (void) printf("***Invalid vdev: %s\n", vdev); 3306 free(dup); 3307 return; 3308 } else { 3309 if (vd->vdev_path) 3310 (void) fprintf(stderr, "Found vdev: %s\n", 3311 vd->vdev_path); 3312 else 3313 (void) fprintf(stderr, "Found vdev type: %s\n", 3314 vd->vdev_ops->vdev_op_type); 3315 } 3316 3317 psize = size; 3318 lsize = size; 3319 3320 pbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3321 lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3322 3323 BP_ZERO(bp); 3324 3325 DVA_SET_VDEV(&dva[0], vd->vdev_id); 3326 DVA_SET_OFFSET(&dva[0], offset); 3327 DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH)); 3328 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize)); 3329 3330 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 3331 3332 BP_SET_LSIZE(bp, lsize); 3333 BP_SET_PSIZE(bp, psize); 3334 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 3335 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 3336 BP_SET_TYPE(bp, DMU_OT_NONE); 3337 BP_SET_LEVEL(bp, 0); 3338 BP_SET_DEDUP(bp, 0); 3339 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 3340 3341 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3342 zio = zio_root(spa, NULL, NULL, 0); 3343 3344 if (vd == vd->vdev_top) { 3345 /* 3346 * Treat this as a normal block read. 3347 */ 3348 zio_nowait(zio_read(zio, spa, bp, pbuf, psize, NULL, NULL, 3349 ZIO_PRIORITY_SYNC_READ, 3350 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL)); 3351 } else { 3352 /* 3353 * Treat this as a vdev child I/O. 3354 */ 3355 zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pbuf, psize, 3356 ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ, 3357 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE | 3358 ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY | 3359 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL, NULL)); 3360 } 3361 3362 error = zio_wait(zio); 3363 spa_config_exit(spa, SCL_STATE, FTAG); 3364 3365 if (error) { 3366 (void) printf("Read of %s failed, error: %d\n", thing, error); 3367 goto out; 3368 } 3369 3370 if (flags & ZDB_FLAG_DECOMPRESS) { 3371 /* 3372 * We don't know how the data was compressed, so just try 3373 * every decompress function at every inflated blocksize. 3374 */ 3375 enum zio_compress c; 3376 void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3377 void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3378 3379 bcopy(pbuf, pbuf2, psize); 3380 3381 VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf + psize, 3382 SPA_MAXBLOCKSIZE - psize) == 0); 3383 3384 VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize, 3385 SPA_MAXBLOCKSIZE - psize) == 0); 3386 3387 for (lsize = SPA_MAXBLOCKSIZE; lsize > psize; 3388 lsize -= SPA_MINBLOCKSIZE) { 3389 for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) { 3390 if (zio_decompress_data(c, pbuf, lbuf, 3391 psize, lsize) == 0 && 3392 zio_decompress_data(c, pbuf2, lbuf2, 3393 psize, lsize) == 0 && 3394 bcmp(lbuf, lbuf2, lsize) == 0) 3395 break; 3396 } 3397 if (c != ZIO_COMPRESS_FUNCTIONS) 3398 break; 3399 lsize -= SPA_MINBLOCKSIZE; 3400 } 3401 3402 umem_free(pbuf2, SPA_MAXBLOCKSIZE); 3403 umem_free(lbuf2, SPA_MAXBLOCKSIZE); 3404 3405 if (lsize <= psize) { 3406 (void) printf("Decompress of %s failed\n", thing); 3407 goto out; 3408 } 3409 buf = lbuf; 3410 size = lsize; 3411 } else { 3412 buf = pbuf; 3413 size = psize; 3414 } 3415 3416 if (flags & ZDB_FLAG_PRINT_BLKPTR) 3417 zdb_print_blkptr((blkptr_t *)(void *) 3418 ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags); 3419 else if (flags & ZDB_FLAG_RAW) 3420 zdb_dump_block_raw(buf, size, flags); 3421 else if (flags & ZDB_FLAG_INDIRECT) 3422 zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t), 3423 flags); 3424 else if (flags & ZDB_FLAG_GBH) 3425 zdb_dump_gbh(buf, flags); 3426 else 3427 zdb_dump_block(thing, buf, size, flags); 3428 3429 out: 3430 umem_free(pbuf, SPA_MAXBLOCKSIZE); 3431 umem_free(lbuf, SPA_MAXBLOCKSIZE); 3432 free(dup); 3433 } 3434 3435 static boolean_t 3436 pool_match(nvlist_t *cfg, char *tgt) 3437 { 3438 uint64_t v, guid = strtoull(tgt, NULL, 0); 3439 char *s; 3440 3441 if (guid != 0) { 3442 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0) 3443 return (v == guid); 3444 } else { 3445 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0) 3446 return (strcmp(s, tgt) == 0); 3447 } 3448 return (B_FALSE); 3449 } 3450 3451 static char * 3452 find_zpool(char **target, nvlist_t **configp, int dirc, char **dirv) 3453 { 3454 nvlist_t *pools; 3455 nvlist_t *match = NULL; 3456 char *name = NULL; 3457 char *sepp = NULL; 3458 char sep; 3459 int count = 0; 3460 importargs_t args = { 0 }; 3461 3462 args.paths = dirc; 3463 args.path = dirv; 3464 args.can_be_active = B_TRUE; 3465 3466 if ((sepp = strpbrk(*target, "/@")) != NULL) { 3467 sep = *sepp; 3468 *sepp = '\0'; 3469 } 3470 3471 pools = zpool_search_import(g_zfs, &args); 3472 3473 if (pools != NULL) { 3474 nvpair_t *elem = NULL; 3475 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 3476 verify(nvpair_value_nvlist(elem, configp) == 0); 3477 if (pool_match(*configp, *target)) { 3478 count++; 3479 if (match != NULL) { 3480 /* print previously found config */ 3481 if (name != NULL) { 3482 (void) printf("%s\n", name); 3483 dump_nvlist(match, 8); 3484 name = NULL; 3485 } 3486 (void) printf("%s\n", 3487 nvpair_name(elem)); 3488 dump_nvlist(*configp, 8); 3489 } else { 3490 match = *configp; 3491 name = nvpair_name(elem); 3492 } 3493 } 3494 } 3495 } 3496 if (count > 1) 3497 (void) fatal("\tMatched %d pools - use pool GUID " 3498 "instead of pool name or \n" 3499 "\tpool name part of a dataset name to select pool", count); 3500 3501 if (sepp) 3502 *sepp = sep; 3503 /* 3504 * If pool GUID was specified for pool id, replace it with pool name 3505 */ 3506 if (name && (strstr(*target, name) != *target)) { 3507 int sz = 1 + strlen(name) + ((sepp) ? strlen(sepp) : 0); 3508 3509 *target = umem_alloc(sz, UMEM_NOFAIL); 3510 (void) snprintf(*target, sz, "%s%s", name, sepp ? sepp : ""); 3511 } 3512 3513 *configp = name ? match : NULL; 3514 3515 return (name); 3516 } 3517 3518 int 3519 main(int argc, char **argv) 3520 { 3521 int i, c; 3522 struct rlimit rl = { 1024, 1024 }; 3523 spa_t *spa = NULL; 3524 objset_t *os = NULL; 3525 int dump_all = 1; 3526 int verbose = 0; 3527 int error = 0; 3528 char **searchdirs = NULL; 3529 int nsearch = 0; 3530 char *target; 3531 nvlist_t *policy = NULL; 3532 uint64_t max_txg = UINT64_MAX; 3533 int rewind = ZPOOL_NEVER_REWIND; 3534 char *spa_config_path_env; 3535 3536 (void) setrlimit(RLIMIT_NOFILE, &rl); 3537 (void) enable_extended_FILE_stdio(-1, -1); 3538 3539 dprintf_setup(&argc, argv); 3540 3541 /* 3542 * If there is an environment variable SPA_CONFIG_PATH it overrides 3543 * default spa_config_path setting. If -U flag is specified it will 3544 * override this environment variable settings once again. 3545 */ 3546 spa_config_path_env = getenv("SPA_CONFIG_PATH"); 3547 if (spa_config_path_env != NULL) 3548 spa_config_path = spa_config_path_env; 3549 3550 while ((c = getopt(argc, argv, 3551 "bcdhilmMI:suCDRSAFLXx:evp:t:U:P")) != -1) { 3552 switch (c) { 3553 case 'b': 3554 case 'c': 3555 case 'd': 3556 case 'h': 3557 case 'i': 3558 case 'l': 3559 case 'm': 3560 case 's': 3561 case 'u': 3562 case 'C': 3563 case 'D': 3564 case 'M': 3565 case 'R': 3566 case 'S': 3567 dump_opt[c]++; 3568 dump_all = 0; 3569 break; 3570 case 'A': 3571 case 'F': 3572 case 'L': 3573 case 'X': 3574 case 'e': 3575 case 'P': 3576 dump_opt[c]++; 3577 break; 3578 case 'I': 3579 max_inflight = strtoull(optarg, NULL, 0); 3580 if (max_inflight == 0) { 3581 (void) fprintf(stderr, "maximum number " 3582 "of inflight I/Os must be greater " 3583 "than 0\n"); 3584 usage(); 3585 } 3586 break; 3587 case 'p': 3588 if (searchdirs == NULL) { 3589 searchdirs = umem_alloc(sizeof (char *), 3590 UMEM_NOFAIL); 3591 } else { 3592 char **tmp = umem_alloc((nsearch + 1) * 3593 sizeof (char *), UMEM_NOFAIL); 3594 bcopy(searchdirs, tmp, nsearch * 3595 sizeof (char *)); 3596 umem_free(searchdirs, 3597 nsearch * sizeof (char *)); 3598 searchdirs = tmp; 3599 } 3600 searchdirs[nsearch++] = optarg; 3601 break; 3602 case 't': 3603 max_txg = strtoull(optarg, NULL, 0); 3604 if (max_txg < TXG_INITIAL) { 3605 (void) fprintf(stderr, "incorrect txg " 3606 "specified: %s\n", optarg); 3607 usage(); 3608 } 3609 break; 3610 case 'U': 3611 spa_config_path = optarg; 3612 break; 3613 case 'v': 3614 verbose++; 3615 break; 3616 case 'x': 3617 vn_dumpdir = optarg; 3618 break; 3619 default: 3620 usage(); 3621 break; 3622 } 3623 } 3624 3625 if (!dump_opt['e'] && searchdirs != NULL) { 3626 (void) fprintf(stderr, "-p option requires use of -e\n"); 3627 usage(); 3628 } 3629 3630 /* 3631 * ZDB does not typically re-read blocks; therefore limit the ARC 3632 * to 256 MB, which can be used entirely for metadata. 3633 */ 3634 zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024; 3635 3636 /* 3637 * "zdb -c" uses checksum-verifying scrub i/os which are async reads. 3638 * "zdb -b" uses traversal prefetch which uses async reads. 3639 * For good performance, let several of them be active at once. 3640 */ 3641 zfs_vdev_async_read_max_active = 10; 3642 3643 kernel_init(FREAD); 3644 g_zfs = libzfs_init(); 3645 ASSERT(g_zfs != NULL); 3646 3647 if (dump_all) 3648 verbose = MAX(verbose, 1); 3649 3650 for (c = 0; c < 256; c++) { 3651 if (dump_all && !strchr("elAFLRSXP", c)) 3652 dump_opt[c] = 1; 3653 if (dump_opt[c]) 3654 dump_opt[c] += verbose; 3655 } 3656 3657 aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2); 3658 zfs_recover = (dump_opt['A'] > 1); 3659 3660 argc -= optind; 3661 argv += optind; 3662 3663 if (argc < 2 && dump_opt['R']) 3664 usage(); 3665 if (argc < 1) { 3666 if (!dump_opt['e'] && dump_opt['C']) { 3667 dump_cachefile(spa_config_path); 3668 return (0); 3669 } 3670 usage(); 3671 } 3672 3673 if (dump_opt['l']) { 3674 dump_label(argv[0]); 3675 return (0); 3676 } 3677 3678 if (dump_opt['X'] || dump_opt['F']) 3679 rewind = ZPOOL_DO_REWIND | 3680 (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0); 3681 3682 if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 || 3683 nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, max_txg) != 0 || 3684 nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind) != 0) 3685 fatal("internal error: %s", strerror(ENOMEM)); 3686 3687 error = 0; 3688 target = argv[0]; 3689 3690 if (dump_opt['e']) { 3691 nvlist_t *cfg = NULL; 3692 char *name = find_zpool(&target, &cfg, nsearch, searchdirs); 3693 3694 error = ENOENT; 3695 if (name) { 3696 if (dump_opt['C'] > 1) { 3697 (void) printf("\nConfiguration for import:\n"); 3698 dump_nvlist(cfg, 8); 3699 } 3700 if (nvlist_add_nvlist(cfg, 3701 ZPOOL_REWIND_POLICY, policy) != 0) { 3702 fatal("can't open '%s': %s", 3703 target, strerror(ENOMEM)); 3704 } 3705 if ((error = spa_import(name, cfg, NULL, 3706 ZFS_IMPORT_MISSING_LOG)) != 0) { 3707 error = spa_import(name, cfg, NULL, 3708 ZFS_IMPORT_VERBATIM); 3709 } 3710 } 3711 } 3712 3713 if (error == 0) { 3714 if (strpbrk(target, "/@") == NULL || dump_opt['R']) { 3715 error = spa_open_rewind(target, &spa, FTAG, policy, 3716 NULL); 3717 if (error) { 3718 /* 3719 * If we're missing the log device then 3720 * try opening the pool after clearing the 3721 * log state. 3722 */ 3723 mutex_enter(&spa_namespace_lock); 3724 if ((spa = spa_lookup(target)) != NULL && 3725 spa->spa_log_state == SPA_LOG_MISSING) { 3726 spa->spa_log_state = SPA_LOG_CLEAR; 3727 error = 0; 3728 } 3729 mutex_exit(&spa_namespace_lock); 3730 3731 if (!error) { 3732 error = spa_open_rewind(target, &spa, 3733 FTAG, policy, NULL); 3734 } 3735 } 3736 } else { 3737 error = dmu_objset_own(target, DMU_OST_ANY, 3738 B_TRUE, FTAG, &os); 3739 } 3740 } 3741 nvlist_free(policy); 3742 3743 if (error) 3744 fatal("can't open '%s': %s", target, strerror(error)); 3745 3746 argv++; 3747 argc--; 3748 if (!dump_opt['R']) { 3749 if (argc > 0) { 3750 zopt_objects = argc; 3751 zopt_object = calloc(zopt_objects, sizeof (uint64_t)); 3752 for (i = 0; i < zopt_objects; i++) { 3753 errno = 0; 3754 zopt_object[i] = strtoull(argv[i], NULL, 0); 3755 if (zopt_object[i] == 0 && errno != 0) 3756 fatal("bad number %s: %s", 3757 argv[i], strerror(errno)); 3758 } 3759 } 3760 if (os != NULL) { 3761 dump_dir(os); 3762 } else if (zopt_objects > 0 && !dump_opt['m']) { 3763 dump_dir(spa->spa_meta_objset); 3764 } else { 3765 dump_zpool(spa); 3766 } 3767 } else { 3768 flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR; 3769 flagbits['c'] = ZDB_FLAG_CHECKSUM; 3770 flagbits['d'] = ZDB_FLAG_DECOMPRESS; 3771 flagbits['e'] = ZDB_FLAG_BSWAP; 3772 flagbits['g'] = ZDB_FLAG_GBH; 3773 flagbits['i'] = ZDB_FLAG_INDIRECT; 3774 flagbits['p'] = ZDB_FLAG_PHYS; 3775 flagbits['r'] = ZDB_FLAG_RAW; 3776 3777 for (i = 0; i < argc; i++) 3778 zdb_read_block(argv[i], spa); 3779 } 3780 3781 (os != NULL) ? dmu_objset_disown(os, FTAG) : spa_close(spa, FTAG); 3782 3783 fuid_table_destroy(); 3784 sa_loaded = B_FALSE; 3785 3786 libzfs_fini(g_zfs); 3787 kernel_fini(); 3788 3789 return (0); 3790 } 3791