1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 Nexenta Systems, Inc.
27 * Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
28 * Copyright 2017 RackTop Systems.
29 */
30
31 #include <stdio.h>
32 #include <unistd.h>
33 #include <stdio_ext.h>
34 #include <stdlib.h>
35 #include <ctype.h>
36 #include <sys/zfs_context.h>
37 #include <sys/spa.h>
38 #include <sys/spa_impl.h>
39 #include <sys/dmu.h>
40 #include <sys/zap.h>
41 #include <sys/fs/zfs.h>
42 #include <sys/zfs_znode.h>
43 #include <sys/zfs_sa.h>
44 #include <sys/sa.h>
45 #include <sys/sa_impl.h>
46 #include <sys/vdev.h>
47 #include <sys/vdev_impl.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/dmu_objset.h>
50 #include <sys/dsl_dir.h>
51 #include <sys/dsl_dataset.h>
52 #include <sys/dsl_pool.h>
53 #include <sys/dbuf.h>
54 #include <sys/zil.h>
55 #include <sys/zil_impl.h>
56 #include <sys/stat.h>
57 #include <sys/resource.h>
58 #include <sys/dmu_traverse.h>
59 #include <sys/zio_checksum.h>
60 #include <sys/zio_compress.h>
61 #include <zfs_fletcher.h>
62 #include <sys/zfs_fuid.h>
63 #include <sys/arc.h>
64 #include <sys/arc_impl.h>
65 #include <sys/ddt.h>
66 #include <sys/zfeature.h>
67 #include <sys/abd.h>
68 #include <sys/blkptr.h>
69 #include <sys/dsl_scan.h>
70 #include <sys/dsl_crypt.h>
71 #include <zfs_comutil.h>
72 #include <libcmdutils.h>
73 #undef verify
74 #include <libzfs.h>
75
76 #include <libnvpair.h>
77 #include <libzutil.h>
78
79 #include "zdb.h"
80
81 #define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
82 zio_compress_table[(idx)].ci_name : "UNKNOWN")
83 #define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
84 zio_checksum_table[(idx)].ci_name : "UNKNOWN")
85 #define ZDB_OT_NAME(idx) ((idx) < DMU_OT_NUMTYPES ? \
86 dmu_ot[(idx)].ot_name : DMU_OT_IS_VALID(idx) ? \
87 dmu_ot_byteswap[DMU_OT_BYTESWAP(idx)].ob_name : "UNKNOWN")
88 #define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
89 (idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
90 DMU_OT_ZAP_OTHER : \
91 (idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
92 DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
93
94 extern int reference_tracking_enable;
95 extern boolean_t zfs_recover;
96 extern uint64_t zfs_arc_max, zfs_arc_meta_limit;
97 extern int zfs_vdev_async_read_max_active;
98 extern int aok;
99 extern boolean_t spa_load_verify_dryrun;
100 extern int zfs_btree_verify_intensity;
101
102 static const char cmdname[] = "zdb";
103 uint8_t dump_opt[256];
104
105 typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
106
107 uint64_t *zopt_object = NULL;
108 static unsigned zopt_objects = 0;
109 uint64_t max_inflight = 1000;
110 static int leaked_objects = 0;
111
112 static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *);
113 static void mos_obj_refd(uint64_t);
114
115 /*
116 * These libumem hooks provide a reasonable set of defaults for the allocator's
117 * debugging facilities.
118 */
119 const char *
_umem_debug_init()120 _umem_debug_init()
121 {
122 return ("default,verbose"); /* $UMEM_DEBUG setting */
123 }
124
125 const char *
_umem_logging_init(void)126 _umem_logging_init(void)
127 {
128 return ("fail,contents"); /* $UMEM_LOGGING setting */
129 }
130
131 static void
usage(void)132 usage(void)
133 {
134 (void) fprintf(stderr,
135 "Usage:\t%s [-AbcdDFGhikLMPsvX] [-e [-V] [-p <path> ...]] "
136 "[-I <inflight I/Os>]\n"
137 "\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
138 "\t\t[<poolname> [<object> ...]]\n"
139 "\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] <dataset> "
140 "[<object> ...]\n"
141 "\t%s -C [-A] [-U <cache>]\n"
142 "\t%s -l [-Aqu] <device>\n"
143 "\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
144 "[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
145 "\t%s -O <dataset> <path>\n"
146 "\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
147 "\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
148 "\t%s -E [-A] word0:word1:...:word15\n"
149 "\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
150 "<poolname>\n\n",
151 cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
152 cmdname, cmdname);
153
154 (void) fprintf(stderr, " Dataset name must include at least one "
155 "separator character '/' or '@'\n");
156 (void) fprintf(stderr, " If dataset name is specified, only that "
157 "dataset is dumped\n");
158 (void) fprintf(stderr, " If object numbers are specified, only "
159 "those objects are dumped\n\n");
160 (void) fprintf(stderr, " Options to control amount of output:\n");
161 (void) fprintf(stderr, " -b block statistics\n");
162 (void) fprintf(stderr, " -c checksum all metadata (twice for "
163 "all data) blocks\n");
164 (void) fprintf(stderr, " -C config (or cachefile if alone)\n");
165 (void) fprintf(stderr, " -d dataset(s)\n");
166 (void) fprintf(stderr, " -D dedup statistics\n");
167 (void) fprintf(stderr, " -E decode and display block from an "
168 "embedded block pointer\n");
169 (void) fprintf(stderr, " -h pool history\n");
170 (void) fprintf(stderr, " -i intent logs\n");
171 (void) fprintf(stderr, " -l read label contents\n");
172 (void) fprintf(stderr, " -k examine the checkpointed state "
173 "of the pool\n");
174 (void) fprintf(stderr, " -L disable leak tracking (do not "
175 "load spacemaps)\n");
176 (void) fprintf(stderr, " -m metaslabs\n");
177 (void) fprintf(stderr, " -M metaslab groups\n");
178 (void) fprintf(stderr, " -O perform object lookups by path\n");
179 (void) fprintf(stderr, " -R read and display block from a "
180 "device\n");
181 (void) fprintf(stderr, " -s report stats on zdb's I/O\n");
182 (void) fprintf(stderr, " -S simulate dedup to measure effect\n");
183 (void) fprintf(stderr, " -v verbose (applies to all "
184 "others)\n\n");
185 (void) fprintf(stderr, " Below options are intended for use "
186 "with other options:\n");
187 (void) fprintf(stderr, " -A ignore assertions (-A), enable "
188 "panic recovery (-AA) or both (-AAA)\n");
189 (void) fprintf(stderr, " -e pool is exported/destroyed/"
190 "has altroot/not in a cachefile\n");
191 (void) fprintf(stderr, " -F attempt automatic rewind within "
192 "safe range of transaction groups\n");
193 (void) fprintf(stderr, " -G dump zfs_dbgmsg buffer before "
194 "exiting\n");
195 (void) fprintf(stderr, " -I <number of inflight I/Os> -- "
196 "specify the maximum number of "
197 "checksumming I/Os [default is 200]\n");
198 (void) fprintf(stderr, " -o <variable>=<value> set global "
199 "variable to an unsigned 32-bit integer value\n");
200 (void) fprintf(stderr, " -p <path> -- use one or more with "
201 "-e to specify path to vdev dir\n");
202 (void) fprintf(stderr, " -P print numbers in parseable form\n");
203 (void) fprintf(stderr, " -q don't print label contents\n");
204 (void) fprintf(stderr, " -t <txg> -- highest txg to use when "
205 "searching for uberblocks\n");
206 (void) fprintf(stderr, " -u uberblock\n");
207 (void) fprintf(stderr, " -U <cachefile_path> -- use alternate "
208 "cachefile\n");
209 (void) fprintf(stderr, " -V do verbatim import\n");
210 (void) fprintf(stderr, " -x <dumpdir> -- "
211 "dump all read blocks into specified directory\n");
212 (void) fprintf(stderr, " -X attempt extreme rewind (does not "
213 "work with dataset)\n\n");
214 (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
215 "to make only that option verbose\n");
216 (void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
217 exit(1);
218 }
219
220 static void
dump_debug_buffer()221 dump_debug_buffer()
222 {
223 if (dump_opt['G']) {
224 (void) printf("\n");
225 zfs_dbgmsg_print("zdb");
226 }
227 }
228
229 /*
230 * Called for usage errors that are discovered after a call to spa_open(),
231 * dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
232 */
233
234 static void
fatal(const char * fmt,...)235 fatal(const char *fmt, ...)
236 {
237 va_list ap;
238
239 va_start(ap, fmt);
240 (void) fprintf(stderr, "%s: ", cmdname);
241 (void) vfprintf(stderr, fmt, ap);
242 va_end(ap);
243 (void) fprintf(stderr, "\n");
244
245 dump_debug_buffer();
246
247 exit(1);
248 }
249
250 /* ARGSUSED */
251 static void
dump_packed_nvlist(objset_t * os,uint64_t object,void * data,size_t size)252 dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
253 {
254 nvlist_t *nv;
255 size_t nvsize = *(uint64_t *)data;
256 char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
257
258 VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
259
260 VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
261
262 umem_free(packed, nvsize);
263
264 dump_nvlist(nv, 8);
265
266 nvlist_free(nv);
267 }
268
269 /* ARGSUSED */
270 static void
dump_history_offsets(objset_t * os,uint64_t object,void * data,size_t size)271 dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
272 {
273 spa_history_phys_t *shp = data;
274
275 if (shp == NULL)
276 return;
277
278 (void) printf("\t\tpool_create_len = %llu\n",
279 (u_longlong_t)shp->sh_pool_create_len);
280 (void) printf("\t\tphys_max_off = %llu\n",
281 (u_longlong_t)shp->sh_phys_max_off);
282 (void) printf("\t\tbof = %llu\n",
283 (u_longlong_t)shp->sh_bof);
284 (void) printf("\t\teof = %llu\n",
285 (u_longlong_t)shp->sh_eof);
286 (void) printf("\t\trecords_lost = %llu\n",
287 (u_longlong_t)shp->sh_records_lost);
288 }
289
290 static void
zdb_nicenum(uint64_t num,char * buf,size_t buflen)291 zdb_nicenum(uint64_t num, char *buf, size_t buflen)
292 {
293 if (dump_opt['P'])
294 (void) snprintf(buf, buflen, "%llu", (longlong_t)num);
295 else
296 nicenum(num, buf, sizeof (buf));
297 }
298
299 static const char histo_stars[] = "****************************************";
300 static const uint64_t histo_width = sizeof (histo_stars) - 1;
301
302 static void
dump_histogram(const uint64_t * histo,int size,int offset)303 dump_histogram(const uint64_t *histo, int size, int offset)
304 {
305 int i;
306 int minidx = size - 1;
307 int maxidx = 0;
308 uint64_t max = 0;
309
310 for (i = 0; i < size; i++) {
311 if (histo[i] > max)
312 max = histo[i];
313 if (histo[i] > 0 && i > maxidx)
314 maxidx = i;
315 if (histo[i] > 0 && i < minidx)
316 minidx = i;
317 }
318
319 if (max < histo_width)
320 max = histo_width;
321
322 for (i = minidx; i <= maxidx; i++) {
323 (void) printf("\t\t\t%3u: %6llu %s\n",
324 i + offset, (u_longlong_t)histo[i],
325 &histo_stars[(max - histo[i]) * histo_width / max]);
326 }
327 }
328
329 static void
dump_zap_stats(objset_t * os,uint64_t object)330 dump_zap_stats(objset_t *os, uint64_t object)
331 {
332 int error;
333 zap_stats_t zs;
334
335 error = zap_get_stats(os, object, &zs);
336 if (error)
337 return;
338
339 if (zs.zs_ptrtbl_len == 0) {
340 ASSERT(zs.zs_num_blocks == 1);
341 (void) printf("\tmicrozap: %llu bytes, %llu entries\n",
342 (u_longlong_t)zs.zs_blocksize,
343 (u_longlong_t)zs.zs_num_entries);
344 return;
345 }
346
347 (void) printf("\tFat ZAP stats:\n");
348
349 (void) printf("\t\tPointer table:\n");
350 (void) printf("\t\t\t%llu elements\n",
351 (u_longlong_t)zs.zs_ptrtbl_len);
352 (void) printf("\t\t\tzt_blk: %llu\n",
353 (u_longlong_t)zs.zs_ptrtbl_zt_blk);
354 (void) printf("\t\t\tzt_numblks: %llu\n",
355 (u_longlong_t)zs.zs_ptrtbl_zt_numblks);
356 (void) printf("\t\t\tzt_shift: %llu\n",
357 (u_longlong_t)zs.zs_ptrtbl_zt_shift);
358 (void) printf("\t\t\tzt_blks_copied: %llu\n",
359 (u_longlong_t)zs.zs_ptrtbl_blks_copied);
360 (void) printf("\t\t\tzt_nextblk: %llu\n",
361 (u_longlong_t)zs.zs_ptrtbl_nextblk);
362
363 (void) printf("\t\tZAP entries: %llu\n",
364 (u_longlong_t)zs.zs_num_entries);
365 (void) printf("\t\tLeaf blocks: %llu\n",
366 (u_longlong_t)zs.zs_num_leafs);
367 (void) printf("\t\tTotal blocks: %llu\n",
368 (u_longlong_t)zs.zs_num_blocks);
369 (void) printf("\t\tzap_block_type: 0x%llx\n",
370 (u_longlong_t)zs.zs_block_type);
371 (void) printf("\t\tzap_magic: 0x%llx\n",
372 (u_longlong_t)zs.zs_magic);
373 (void) printf("\t\tzap_salt: 0x%llx\n",
374 (u_longlong_t)zs.zs_salt);
375
376 (void) printf("\t\tLeafs with 2^n pointers:\n");
377 dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
378
379 (void) printf("\t\tBlocks with n*5 entries:\n");
380 dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
381
382 (void) printf("\t\tBlocks n/10 full:\n");
383 dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
384
385 (void) printf("\t\tEntries with n chunks:\n");
386 dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
387
388 (void) printf("\t\tBuckets with n entries:\n");
389 dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
390 }
391
392 /*ARGSUSED*/
393 static void
dump_none(objset_t * os,uint64_t object,void * data,size_t size)394 dump_none(objset_t *os, uint64_t object, void *data, size_t size)
395 {
396 }
397
398 /*ARGSUSED*/
399 static void
dump_unknown(objset_t * os,uint64_t object,void * data,size_t size)400 dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
401 {
402 (void) printf("\tUNKNOWN OBJECT TYPE\n");
403 }
404
405 /*ARGSUSED*/
406 static void
dump_uint8(objset_t * os,uint64_t object,void * data,size_t size)407 dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
408 {
409 }
410
411 /*ARGSUSED*/
412 static void
dump_uint64(objset_t * os,uint64_t object,void * data,size_t size)413 dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
414 {
415 }
416
417 /*ARGSUSED*/
418 static void
dump_zap(objset_t * os,uint64_t object,void * data,size_t size)419 dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
420 {
421 zap_cursor_t zc;
422 zap_attribute_t attr;
423 void *prop;
424 unsigned i;
425
426 dump_zap_stats(os, object);
427 (void) printf("\n");
428
429 for (zap_cursor_init(&zc, os, object);
430 zap_cursor_retrieve(&zc, &attr) == 0;
431 zap_cursor_advance(&zc)) {
432 (void) printf("\t\t%s = ", attr.za_name);
433 if (attr.za_num_integers == 0) {
434 (void) printf("\n");
435 continue;
436 }
437 prop = umem_zalloc(attr.za_num_integers *
438 attr.za_integer_length, UMEM_NOFAIL);
439 (void) zap_lookup(os, object, attr.za_name,
440 attr.za_integer_length, attr.za_num_integers, prop);
441 if (attr.za_integer_length == 1) {
442 if (strcmp(attr.za_name,
443 DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
444 strcmp(attr.za_name,
445 DSL_CRYPTO_KEY_HMAC_KEY) == 0 ||
446 strcmp(attr.za_name, DSL_CRYPTO_KEY_IV) == 0 ||
447 strcmp(attr.za_name, DSL_CRYPTO_KEY_MAC) == 0 ||
448 strcmp(attr.za_name, DMU_POOL_CHECKSUM_SALT) == 0) {
449 uint8_t *u8 = prop;
450
451 for (i = 0; i < attr.za_num_integers; i++) {
452 (void) printf("%02x", u8[i]);
453 }
454 } else {
455 (void) printf("%s", (char *)prop);
456 }
457 } else {
458 for (i = 0; i < attr.za_num_integers; i++) {
459 switch (attr.za_integer_length) {
460 case 2:
461 (void) printf("%u ",
462 ((uint16_t *)prop)[i]);
463 break;
464 case 4:
465 (void) printf("%u ",
466 ((uint32_t *)prop)[i]);
467 break;
468 case 8:
469 (void) printf("%lld ",
470 (u_longlong_t)((int64_t *)prop)[i]);
471 break;
472 }
473 }
474 }
475 (void) printf("\n");
476 umem_free(prop, attr.za_num_integers * attr.za_integer_length);
477 }
478 zap_cursor_fini(&zc);
479 }
480
481 static void
dump_bpobj(objset_t * os,uint64_t object,void * data,size_t size)482 dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
483 {
484 bpobj_phys_t *bpop = data;
485 char bytes[32], comp[32], uncomp[32];
486
487 /* make sure the output won't get truncated */
488 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
489 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
490 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
491
492 if (bpop == NULL)
493 return;
494
495 zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
496 zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
497 zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
498
499 (void) printf("\t\tnum_blkptrs = %llu\n",
500 (u_longlong_t)bpop->bpo_num_blkptrs);
501 (void) printf("\t\tbytes = %s\n", bytes);
502 if (size >= BPOBJ_SIZE_V1) {
503 (void) printf("\t\tcomp = %s\n", comp);
504 (void) printf("\t\tuncomp = %s\n", uncomp);
505 }
506 if (size >= sizeof (*bpop)) {
507 (void) printf("\t\tsubobjs = %llu\n",
508 (u_longlong_t)bpop->bpo_subobjs);
509 (void) printf("\t\tnum_subobjs = %llu\n",
510 (u_longlong_t)bpop->bpo_num_subobjs);
511 }
512
513 if (dump_opt['d'] < 5)
514 return;
515
516 for (uint64_t i = 0; i < bpop->bpo_num_blkptrs; i++) {
517 char blkbuf[BP_SPRINTF_LEN];
518 blkptr_t bp;
519
520 int err = dmu_read(os, object,
521 i * sizeof (bp), sizeof (bp), &bp, 0);
522 if (err != 0) {
523 (void) printf("got error %u from dmu_read\n", err);
524 break;
525 }
526 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp);
527 (void) printf("\t%s\n", blkbuf);
528 }
529 }
530
531 /* ARGSUSED */
532 static void
dump_bpobj_subobjs(objset_t * os,uint64_t object,void * data,size_t size)533 dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
534 {
535 dmu_object_info_t doi;
536
537 VERIFY0(dmu_object_info(os, object, &doi));
538 uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
539
540 int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
541 if (err != 0) {
542 (void) printf("got error %u from dmu_read\n", err);
543 kmem_free(subobjs, doi.doi_max_offset);
544 return;
545 }
546
547 int64_t last_nonzero = -1;
548 for (uint64_t i = 0; i < doi.doi_max_offset / 8; i++) {
549 if (subobjs[i] != 0)
550 last_nonzero = i;
551 }
552
553 for (int64_t i = 0; i <= last_nonzero; i++) {
554 (void) printf("\t%llu\n", (longlong_t)subobjs[i]);
555 }
556 kmem_free(subobjs, doi.doi_max_offset);
557 }
558
559 /*ARGSUSED*/
560 static void
dump_ddt_zap(objset_t * os,uint64_t object,void * data,size_t size)561 dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
562 {
563 dump_zap_stats(os, object);
564 /* contents are printed elsewhere, properly decoded */
565 }
566
567 /*ARGSUSED*/
568 static void
dump_sa_attrs(objset_t * os,uint64_t object,void * data,size_t size)569 dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
570 {
571 zap_cursor_t zc;
572 zap_attribute_t attr;
573
574 dump_zap_stats(os, object);
575 (void) printf("\n");
576
577 for (zap_cursor_init(&zc, os, object);
578 zap_cursor_retrieve(&zc, &attr) == 0;
579 zap_cursor_advance(&zc)) {
580 (void) printf("\t\t%s = ", attr.za_name);
581 if (attr.za_num_integers == 0) {
582 (void) printf("\n");
583 continue;
584 }
585 (void) printf(" %llx : [%d:%d:%d]\n",
586 (u_longlong_t)attr.za_first_integer,
587 (int)ATTR_LENGTH(attr.za_first_integer),
588 (int)ATTR_BSWAP(attr.za_first_integer),
589 (int)ATTR_NUM(attr.za_first_integer));
590 }
591 zap_cursor_fini(&zc);
592 }
593
594 /*ARGSUSED*/
595 static void
dump_sa_layouts(objset_t * os,uint64_t object,void * data,size_t size)596 dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
597 {
598 zap_cursor_t zc;
599 zap_attribute_t attr;
600 uint16_t *layout_attrs;
601 unsigned i;
602
603 dump_zap_stats(os, object);
604 (void) printf("\n");
605
606 for (zap_cursor_init(&zc, os, object);
607 zap_cursor_retrieve(&zc, &attr) == 0;
608 zap_cursor_advance(&zc)) {
609 (void) printf("\t\t%s = [", attr.za_name);
610 if (attr.za_num_integers == 0) {
611 (void) printf("\n");
612 continue;
613 }
614
615 VERIFY(attr.za_integer_length == 2);
616 layout_attrs = umem_zalloc(attr.za_num_integers *
617 attr.za_integer_length, UMEM_NOFAIL);
618
619 VERIFY(zap_lookup(os, object, attr.za_name,
620 attr.za_integer_length,
621 attr.za_num_integers, layout_attrs) == 0);
622
623 for (i = 0; i != attr.za_num_integers; i++)
624 (void) printf(" %d ", (int)layout_attrs[i]);
625 (void) printf("]\n");
626 umem_free(layout_attrs,
627 attr.za_num_integers * attr.za_integer_length);
628 }
629 zap_cursor_fini(&zc);
630 }
631
632 /*ARGSUSED*/
633 static void
dump_zpldir(objset_t * os,uint64_t object,void * data,size_t size)634 dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
635 {
636 zap_cursor_t zc;
637 zap_attribute_t attr;
638 const char *typenames[] = {
639 /* 0 */ "not specified",
640 /* 1 */ "FIFO",
641 /* 2 */ "Character Device",
642 /* 3 */ "3 (invalid)",
643 /* 4 */ "Directory",
644 /* 5 */ "5 (invalid)",
645 /* 6 */ "Block Device",
646 /* 7 */ "7 (invalid)",
647 /* 8 */ "Regular File",
648 /* 9 */ "9 (invalid)",
649 /* 10 */ "Symbolic Link",
650 /* 11 */ "11 (invalid)",
651 /* 12 */ "Socket",
652 /* 13 */ "Door",
653 /* 14 */ "Event Port",
654 /* 15 */ "15 (invalid)",
655 };
656
657 dump_zap_stats(os, object);
658 (void) printf("\n");
659
660 for (zap_cursor_init(&zc, os, object);
661 zap_cursor_retrieve(&zc, &attr) == 0;
662 zap_cursor_advance(&zc)) {
663 (void) printf("\t\t%s = %lld (type: %s)\n",
664 attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
665 typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
666 }
667 zap_cursor_fini(&zc);
668 }
669
670 static int
get_dtl_refcount(vdev_t * vd)671 get_dtl_refcount(vdev_t *vd)
672 {
673 int refcount = 0;
674
675 if (vd->vdev_ops->vdev_op_leaf) {
676 space_map_t *sm = vd->vdev_dtl_sm;
677
678 if (sm != NULL &&
679 sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
680 return (1);
681 return (0);
682 }
683
684 for (unsigned c = 0; c < vd->vdev_children; c++)
685 refcount += get_dtl_refcount(vd->vdev_child[c]);
686 return (refcount);
687 }
688
689 static int
get_metaslab_refcount(vdev_t * vd)690 get_metaslab_refcount(vdev_t *vd)
691 {
692 int refcount = 0;
693
694 if (vd->vdev_top == vd) {
695 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
696 space_map_t *sm = vd->vdev_ms[m]->ms_sm;
697
698 if (sm != NULL &&
699 sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
700 refcount++;
701 }
702 }
703 for (unsigned c = 0; c < vd->vdev_children; c++)
704 refcount += get_metaslab_refcount(vd->vdev_child[c]);
705
706 return (refcount);
707 }
708
709 static int
get_obsolete_refcount(vdev_t * vd)710 get_obsolete_refcount(vdev_t *vd)
711 {
712 int refcount = 0;
713
714 uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
715 if (vd->vdev_top == vd && obsolete_sm_obj != 0) {
716 dmu_object_info_t doi;
717 VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
718 obsolete_sm_obj, &doi));
719 if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
720 refcount++;
721 }
722 } else {
723 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
724 ASSERT3U(obsolete_sm_obj, ==, 0);
725 }
726 for (unsigned c = 0; c < vd->vdev_children; c++) {
727 refcount += get_obsolete_refcount(vd->vdev_child[c]);
728 }
729
730 return (refcount);
731 }
732
733 static int
get_prev_obsolete_spacemap_refcount(spa_t * spa)734 get_prev_obsolete_spacemap_refcount(spa_t *spa)
735 {
736 uint64_t prev_obj =
737 spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
738 if (prev_obj != 0) {
739 dmu_object_info_t doi;
740 VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
741 if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
742 return (1);
743 }
744 }
745 return (0);
746 }
747
748 static int
get_checkpoint_refcount(vdev_t * vd)749 get_checkpoint_refcount(vdev_t *vd)
750 {
751 int refcount = 0;
752
753 if (vd->vdev_top == vd && vd->vdev_top_zap != 0 &&
754 zap_contains(spa_meta_objset(vd->vdev_spa),
755 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0)
756 refcount++;
757
758 for (uint64_t c = 0; c < vd->vdev_children; c++)
759 refcount += get_checkpoint_refcount(vd->vdev_child[c]);
760
761 return (refcount);
762 }
763
764 static int
get_log_spacemap_refcount(spa_t * spa)765 get_log_spacemap_refcount(spa_t *spa)
766 {
767 return (avl_numnodes(&spa->spa_sm_logs_by_txg));
768 }
769
770 static int
verify_spacemap_refcounts(spa_t * spa)771 verify_spacemap_refcounts(spa_t *spa)
772 {
773 uint64_t expected_refcount = 0;
774 uint64_t actual_refcount;
775
776 (void) feature_get_refcount(spa,
777 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
778 &expected_refcount);
779 actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
780 actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
781 actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
782 actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
783 actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
784 actual_refcount += get_log_spacemap_refcount(spa);
785
786 if (expected_refcount != actual_refcount) {
787 (void) printf("space map refcount mismatch: expected %lld != "
788 "actual %lld\n",
789 (longlong_t)expected_refcount,
790 (longlong_t)actual_refcount);
791 return (2);
792 }
793 return (0);
794 }
795
796 static void
dump_spacemap(objset_t * os,space_map_t * sm)797 dump_spacemap(objset_t *os, space_map_t *sm)
798 {
799 char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
800 "INVALID", "INVALID", "INVALID", "INVALID" };
801
802 if (sm == NULL)
803 return;
804
805 (void) printf("space map object %llu:\n",
806 (longlong_t)sm->sm_object);
807 (void) printf(" smp_length = 0x%llx\n",
808 (longlong_t)sm->sm_phys->smp_length);
809 (void) printf(" smp_alloc = 0x%llx\n",
810 (longlong_t)sm->sm_phys->smp_alloc);
811
812 if (dump_opt['d'] < 6 && dump_opt['m'] < 4)
813 return;
814
815 /*
816 * Print out the freelist entries in both encoded and decoded form.
817 */
818 uint8_t mapshift = sm->sm_shift;
819 int64_t alloc = 0;
820 uint64_t word, entry_id = 0;
821 for (uint64_t offset = 0; offset < space_map_length(sm);
822 offset += sizeof (word)) {
823
824 VERIFY0(dmu_read(os, space_map_object(sm), offset,
825 sizeof (word), &word, DMU_READ_PREFETCH));
826
827 if (sm_entry_is_debug(word)) {
828 (void) printf("\t [%6llu] %s: txg %llu pass %llu\n",
829 (u_longlong_t)entry_id,
830 ddata[SM_DEBUG_ACTION_DECODE(word)],
831 (u_longlong_t)SM_DEBUG_TXG_DECODE(word),
832 (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(word));
833 entry_id++;
834 continue;
835 }
836
837 uint8_t words;
838 char entry_type;
839 uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
840
841 if (sm_entry_is_single_word(word)) {
842 entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
843 'A' : 'F';
844 entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
845 sm->sm_start;
846 entry_run = SM_RUN_DECODE(word) << mapshift;
847 words = 1;
848 } else {
849 /* it is a two-word entry so we read another word */
850 ASSERT(sm_entry_is_double_word(word));
851
852 uint64_t extra_word;
853 offset += sizeof (extra_word);
854 VERIFY0(dmu_read(os, space_map_object(sm), offset,
855 sizeof (extra_word), &extra_word,
856 DMU_READ_PREFETCH));
857
858 ASSERT3U(offset, <=, space_map_length(sm));
859
860 entry_run = SM2_RUN_DECODE(word) << mapshift;
861 entry_vdev = SM2_VDEV_DECODE(word);
862 entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
863 'A' : 'F';
864 entry_off = (SM2_OFFSET_DECODE(extra_word) <<
865 mapshift) + sm->sm_start;
866 words = 2;
867 }
868
869 (void) printf("\t [%6llu] %c range:"
870 " %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
871 (u_longlong_t)entry_id,
872 entry_type, (u_longlong_t)entry_off,
873 (u_longlong_t)(entry_off + entry_run),
874 (u_longlong_t)entry_run,
875 (u_longlong_t)entry_vdev, words);
876
877 if (entry_type == 'A')
878 alloc += entry_run;
879 else
880 alloc -= entry_run;
881 entry_id++;
882 }
883 if (alloc != space_map_allocated(sm)) {
884 (void) printf("space_map_object alloc (%lld) INCONSISTENT "
885 "with space map summary (%lld)\n",
886 (longlong_t)space_map_allocated(sm), (longlong_t)alloc);
887 }
888 }
889
890 static void
dump_metaslab_stats(metaslab_t * msp)891 dump_metaslab_stats(metaslab_t *msp)
892 {
893 char maxbuf[32];
894 range_tree_t *rt = msp->ms_allocatable;
895 zfs_btree_t *t = &msp->ms_allocatable_by_size;
896 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
897
898 /* max sure nicenum has enough space */
899 CTASSERT(sizeof (maxbuf) >= NN_NUMBUF_SZ);
900
901 zdb_nicenum(metaslab_largest_allocatable(msp), maxbuf, sizeof (maxbuf));
902
903 (void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
904 "segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
905 "freepct", free_pct);
906 (void) printf("\tIn-memory histogram:\n");
907 dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
908 }
909
910 static void
dump_metaslab(metaslab_t * msp)911 dump_metaslab(metaslab_t *msp)
912 {
913 vdev_t *vd = msp->ms_group->mg_vd;
914 spa_t *spa = vd->vdev_spa;
915 space_map_t *sm = msp->ms_sm;
916 char freebuf[32];
917
918 zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
919 sizeof (freebuf));
920
921 (void) printf(
922 "\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
923 (u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
924 (u_longlong_t)space_map_object(sm), freebuf);
925
926 if (dump_opt['m'] > 2 && !dump_opt['L']) {
927 mutex_enter(&msp->ms_lock);
928 VERIFY0(metaslab_load(msp));
929 range_tree_stat_verify(msp->ms_allocatable);
930 dump_metaslab_stats(msp);
931 metaslab_unload(msp);
932 mutex_exit(&msp->ms_lock);
933 }
934
935 if (dump_opt['m'] > 1 && sm != NULL &&
936 spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
937 /*
938 * The space map histogram represents free space in chunks
939 * of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
940 */
941 (void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
942 (u_longlong_t)msp->ms_fragmentation);
943 dump_histogram(sm->sm_phys->smp_histogram,
944 SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
945 }
946
947 ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift));
948 dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
949
950 if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
951 (void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
952 (u_longlong_t)metaslab_unflushed_txg(msp));
953 }
954 }
955
956 static void
print_vdev_metaslab_header(vdev_t * vd)957 print_vdev_metaslab_header(vdev_t *vd)
958 {
959 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
960 const char *bias_str = "";
961
962 if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
963 bias_str = VDEV_ALLOC_BIAS_LOG;
964 } else if (alloc_bias == VDEV_BIAS_SPECIAL) {
965 bias_str = VDEV_ALLOC_BIAS_SPECIAL;
966 } else if (alloc_bias == VDEV_BIAS_DEDUP) {
967 bias_str = VDEV_ALLOC_BIAS_DEDUP;
968 }
969
970 uint64_t ms_flush_data_obj = 0;
971 if (vd->vdev_top_zap != 0) {
972 int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
973 vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
974 sizeof (uint64_t), 1, &ms_flush_data_obj);
975 if (error != ENOENT) {
976 ASSERT0(error);
977 }
978 }
979
980 (void) printf("\tvdev %10llu %s",
981 (u_longlong_t)vd->vdev_id, bias_str);
982
983 if (ms_flush_data_obj != 0) {
984 (void) printf(" ms_unflushed_phys object %llu",
985 (u_longlong_t)ms_flush_data_obj);
986 }
987
988 (void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
989 "metaslabs", (u_longlong_t)vd->vdev_ms_count,
990 "offset", "spacemap", "free");
991 (void) printf("\t%15s %19s %15s %12s\n",
992 "---------------", "-------------------",
993 "---------------", "------------");
994 }
995
996 static void
dump_metaslab_groups(spa_t * spa)997 dump_metaslab_groups(spa_t *spa)
998 {
999 vdev_t *rvd = spa->spa_root_vdev;
1000 metaslab_class_t *mc = spa_normal_class(spa);
1001 uint64_t fragmentation;
1002
1003 metaslab_class_histogram_verify(mc);
1004
1005 for (unsigned c = 0; c < rvd->vdev_children; c++) {
1006 vdev_t *tvd = rvd->vdev_child[c];
1007 metaslab_group_t *mg = tvd->vdev_mg;
1008
1009 if (mg == NULL || mg->mg_class != mc)
1010 continue;
1011
1012 metaslab_group_histogram_verify(mg);
1013 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
1014
1015 (void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
1016 "fragmentation",
1017 (u_longlong_t)tvd->vdev_id,
1018 (u_longlong_t)tvd->vdev_ms_count);
1019 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
1020 (void) printf("%3s\n", "-");
1021 } else {
1022 (void) printf("%3llu%%\n",
1023 (u_longlong_t)mg->mg_fragmentation);
1024 }
1025 dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
1026 }
1027
1028 (void) printf("\tpool %s\tfragmentation", spa_name(spa));
1029 fragmentation = metaslab_class_fragmentation(mc);
1030 if (fragmentation == ZFS_FRAG_INVALID)
1031 (void) printf("\t%3s\n", "-");
1032 else
1033 (void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
1034 dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
1035 }
1036
1037 static void
print_vdev_indirect(vdev_t * vd)1038 print_vdev_indirect(vdev_t *vd)
1039 {
1040 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
1041 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1042 vdev_indirect_births_t *vib = vd->vdev_indirect_births;
1043
1044 if (vim == NULL) {
1045 ASSERT3P(vib, ==, NULL);
1046 return;
1047 }
1048
1049 ASSERT3U(vdev_indirect_mapping_object(vim), ==,
1050 vic->vic_mapping_object);
1051 ASSERT3U(vdev_indirect_births_object(vib), ==,
1052 vic->vic_births_object);
1053
1054 (void) printf("indirect births obj %llu:\n",
1055 (longlong_t)vic->vic_births_object);
1056 (void) printf(" vib_count = %llu\n",
1057 (longlong_t)vdev_indirect_births_count(vib));
1058 for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
1059 vdev_indirect_birth_entry_phys_t *cur_vibe =
1060 &vib->vib_entries[i];
1061 (void) printf("\toffset %llx -> txg %llu\n",
1062 (longlong_t)cur_vibe->vibe_offset,
1063 (longlong_t)cur_vibe->vibe_phys_birth_txg);
1064 }
1065 (void) printf("\n");
1066
1067 (void) printf("indirect mapping obj %llu:\n",
1068 (longlong_t)vic->vic_mapping_object);
1069 (void) printf(" vim_max_offset = 0x%llx\n",
1070 (longlong_t)vdev_indirect_mapping_max_offset(vim));
1071 (void) printf(" vim_bytes_mapped = 0x%llx\n",
1072 (longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
1073 (void) printf(" vim_count = %llu\n",
1074 (longlong_t)vdev_indirect_mapping_num_entries(vim));
1075
1076 if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
1077 return;
1078
1079 uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
1080
1081 for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
1082 vdev_indirect_mapping_entry_phys_t *vimep =
1083 &vim->vim_entries[i];
1084 (void) printf("\t<%llx:%llx:%llx> -> "
1085 "<%llx:%llx:%llx> (%x obsolete)\n",
1086 (longlong_t)vd->vdev_id,
1087 (longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
1088 (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
1089 (longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
1090 (longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
1091 (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
1092 counts[i]);
1093 }
1094 (void) printf("\n");
1095
1096 uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
1097 if (obsolete_sm_object != 0) {
1098 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1099 (void) printf("obsolete space map object %llu:\n",
1100 (u_longlong_t)obsolete_sm_object);
1101 ASSERT(vd->vdev_obsolete_sm != NULL);
1102 ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
1103 obsolete_sm_object);
1104 dump_spacemap(mos, vd->vdev_obsolete_sm);
1105 (void) printf("\n");
1106 }
1107 }
1108
1109 static void
dump_metaslabs(spa_t * spa)1110 dump_metaslabs(spa_t *spa)
1111 {
1112 vdev_t *vd, *rvd = spa->spa_root_vdev;
1113 uint64_t m, c = 0, children = rvd->vdev_children;
1114
1115 (void) printf("\nMetaslabs:\n");
1116
1117 if (!dump_opt['d'] && zopt_objects > 0) {
1118 c = zopt_object[0];
1119
1120 if (c >= children)
1121 (void) fatal("bad vdev id: %llu", (u_longlong_t)c);
1122
1123 if (zopt_objects > 1) {
1124 vd = rvd->vdev_child[c];
1125 print_vdev_metaslab_header(vd);
1126
1127 for (m = 1; m < zopt_objects; m++) {
1128 if (zopt_object[m] < vd->vdev_ms_count)
1129 dump_metaslab(
1130 vd->vdev_ms[zopt_object[m]]);
1131 else
1132 (void) fprintf(stderr, "bad metaslab "
1133 "number %llu\n",
1134 (u_longlong_t)zopt_object[m]);
1135 }
1136 (void) printf("\n");
1137 return;
1138 }
1139 children = c + 1;
1140 }
1141 for (; c < children; c++) {
1142 vd = rvd->vdev_child[c];
1143 print_vdev_metaslab_header(vd);
1144
1145 print_vdev_indirect(vd);
1146
1147 for (m = 0; m < vd->vdev_ms_count; m++)
1148 dump_metaslab(vd->vdev_ms[m]);
1149 (void) printf("\n");
1150 }
1151 }
1152
1153 static void
dump_log_spacemaps(spa_t * spa)1154 dump_log_spacemaps(spa_t *spa)
1155 {
1156 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
1157 return;
1158
1159 (void) printf("\nLog Space Maps in Pool:\n");
1160 for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
1161 sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
1162 space_map_t *sm = NULL;
1163 VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
1164 sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
1165
1166 (void) printf("Log Spacemap object %llu txg %llu\n",
1167 (u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
1168 dump_spacemap(spa->spa_meta_objset, sm);
1169 space_map_close(sm);
1170 }
1171 (void) printf("\n");
1172 }
1173
1174 static void
dump_dde(const ddt_t * ddt,const ddt_entry_t * dde,uint64_t index)1175 dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
1176 {
1177 const ddt_phys_t *ddp = dde->dde_phys;
1178 const ddt_key_t *ddk = &dde->dde_key;
1179 const char *types[4] = { "ditto", "single", "double", "triple" };
1180 char blkbuf[BP_SPRINTF_LEN];
1181 blkptr_t blk;
1182
1183 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
1184 if (ddp->ddp_phys_birth == 0)
1185 continue;
1186 ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
1187 snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
1188 (void) printf("index %llx refcnt %llu %s %s\n",
1189 (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
1190 types[p], blkbuf);
1191 }
1192 }
1193
1194 static void
dump_dedup_ratio(const ddt_stat_t * dds)1195 dump_dedup_ratio(const ddt_stat_t *dds)
1196 {
1197 double rL, rP, rD, D, dedup, compress, copies;
1198
1199 if (dds->dds_blocks == 0)
1200 return;
1201
1202 rL = (double)dds->dds_ref_lsize;
1203 rP = (double)dds->dds_ref_psize;
1204 rD = (double)dds->dds_ref_dsize;
1205 D = (double)dds->dds_dsize;
1206
1207 dedup = rD / D;
1208 compress = rL / rP;
1209 copies = rD / rP;
1210
1211 (void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
1212 "dedup * compress / copies = %.2f\n\n",
1213 dedup, compress, copies, dedup * compress / copies);
1214 }
1215
1216 static void
dump_ddt(ddt_t * ddt,enum ddt_type type,enum ddt_class class)1217 dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
1218 {
1219 char name[DDT_NAMELEN];
1220 ddt_entry_t dde;
1221 uint64_t walk = 0;
1222 dmu_object_info_t doi;
1223 uint64_t count, dspace, mspace;
1224 int error;
1225
1226 error = ddt_object_info(ddt, type, class, &doi);
1227
1228 if (error == ENOENT)
1229 return;
1230 ASSERT(error == 0);
1231
1232 if ((count = ddt_object_count(ddt, type, class)) == 0)
1233 return;
1234
1235 dspace = doi.doi_physical_blocks_512 << 9;
1236 mspace = doi.doi_fill_count * doi.doi_data_block_size;
1237
1238 ddt_object_name(ddt, type, class, name);
1239
1240 (void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
1241 name,
1242 (u_longlong_t)count,
1243 (u_longlong_t)(dspace / count),
1244 (u_longlong_t)(mspace / count));
1245
1246 if (dump_opt['D'] < 3)
1247 return;
1248
1249 zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
1250
1251 if (dump_opt['D'] < 4)
1252 return;
1253
1254 if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
1255 return;
1256
1257 (void) printf("%s contents:\n\n", name);
1258
1259 while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
1260 dump_dde(ddt, &dde, walk);
1261
1262 ASSERT3U(error, ==, ENOENT);
1263
1264 (void) printf("\n");
1265 }
1266
1267 static void
dump_all_ddts(spa_t * spa)1268 dump_all_ddts(spa_t *spa)
1269 {
1270 ddt_histogram_t ddh_total;
1271 ddt_stat_t dds_total;
1272
1273 bzero(&ddh_total, sizeof (ddh_total));
1274 bzero(&dds_total, sizeof (dds_total));
1275
1276 for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
1277 ddt_t *ddt = spa->spa_ddt[c];
1278 for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
1279 for (enum ddt_class class = 0; class < DDT_CLASSES;
1280 class++) {
1281 dump_ddt(ddt, type, class);
1282 }
1283 }
1284 }
1285
1286 ddt_get_dedup_stats(spa, &dds_total);
1287
1288 if (dds_total.dds_blocks == 0) {
1289 (void) printf("All DDTs are empty\n");
1290 return;
1291 }
1292
1293 (void) printf("\n");
1294
1295 if (dump_opt['D'] > 1) {
1296 (void) printf("DDT histogram (aggregated over all DDTs):\n");
1297 ddt_get_dedup_histogram(spa, &ddh_total);
1298 zpool_dump_ddt(&dds_total, &ddh_total);
1299 }
1300
1301 dump_dedup_ratio(&dds_total);
1302 }
1303
1304 static void
dump_dtl_seg(void * arg,uint64_t start,uint64_t size)1305 dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
1306 {
1307 char *prefix = arg;
1308
1309 (void) printf("%s [%llu,%llu) length %llu\n",
1310 prefix,
1311 (u_longlong_t)start,
1312 (u_longlong_t)(start + size),
1313 (u_longlong_t)(size));
1314 }
1315
1316 static void
dump_dtl(vdev_t * vd,int indent)1317 dump_dtl(vdev_t *vd, int indent)
1318 {
1319 spa_t *spa = vd->vdev_spa;
1320 boolean_t required;
1321 const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
1322 "outage" };
1323 char prefix[256];
1324
1325 spa_vdev_state_enter(spa, SCL_NONE);
1326 required = vdev_dtl_required(vd);
1327 (void) spa_vdev_state_exit(spa, NULL, 0);
1328
1329 if (indent == 0)
1330 (void) printf("\nDirty time logs:\n\n");
1331
1332 (void) printf("\t%*s%s [%s]\n", indent, "",
1333 vd->vdev_path ? vd->vdev_path :
1334 vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
1335 required ? "DTL-required" : "DTL-expendable");
1336
1337 for (int t = 0; t < DTL_TYPES; t++) {
1338 range_tree_t *rt = vd->vdev_dtl[t];
1339 if (range_tree_space(rt) == 0)
1340 continue;
1341 (void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
1342 indent + 2, "", name[t]);
1343 range_tree_walk(rt, dump_dtl_seg, prefix);
1344 if (dump_opt['d'] > 5 && vd->vdev_children == 0)
1345 dump_spacemap(spa->spa_meta_objset, vd->vdev_dtl_sm);
1346 }
1347
1348 for (unsigned c = 0; c < vd->vdev_children; c++)
1349 dump_dtl(vd->vdev_child[c], indent + 4);
1350 }
1351
1352 static void
dump_history(spa_t * spa)1353 dump_history(spa_t *spa)
1354 {
1355 nvlist_t **events = NULL;
1356 uint64_t resid, len, off = 0;
1357 uint_t num = 0;
1358 int error;
1359 char tbuf[30];
1360
1361 char *buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
1362 do {
1363 len = SPA_MAXBLOCKSIZE;
1364
1365 if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
1366 (void) fprintf(stderr, "Unable to read history: "
1367 "error %d\n", error);
1368 umem_free(buf, SPA_MAXBLOCKSIZE);
1369 return;
1370 }
1371
1372 if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
1373 break;
1374
1375 off -= resid;
1376 } while (len != 0);
1377 umem_free(buf, SPA_MAXBLOCKSIZE);
1378
1379 (void) printf("\nHistory:\n");
1380 for (unsigned i = 0; i < num; i++) {
1381 boolean_t printed = B_FALSE;
1382
1383 if (nvlist_exists(events[i], ZPOOL_HIST_TIME)) {
1384 time_t tsec;
1385 struct tm t;
1386
1387 tsec = fnvlist_lookup_uint64(events[i],
1388 ZPOOL_HIST_TIME);
1389 (void) localtime_r(&tsec, &t);
1390 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
1391 } else {
1392 tbuf[0] = '\0';
1393 }
1394
1395 if (nvlist_exists(events[i], ZPOOL_HIST_CMD)) {
1396 (void) printf("%s %s\n", tbuf,
1397 fnvlist_lookup_string(events[i], ZPOOL_HIST_CMD));
1398 } else if (nvlist_exists(events[i], ZPOOL_HIST_INT_EVENT)) {
1399 uint64_t ievent;
1400
1401 ievent = fnvlist_lookup_uint64(events[i],
1402 ZPOOL_HIST_INT_EVENT);
1403 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
1404 goto next;
1405
1406 (void) printf(" %s [internal %s txg:%ju] %s\n",
1407 tbuf,
1408 zfs_history_event_names[ievent],
1409 fnvlist_lookup_uint64(events[i],
1410 ZPOOL_HIST_TXG),
1411 fnvlist_lookup_string(events[i],
1412 ZPOOL_HIST_INT_STR));
1413 } else if (nvlist_exists(events[i], ZPOOL_HIST_INT_NAME)) {
1414 (void) printf("%s [txg:%ju] %s", tbuf,
1415 fnvlist_lookup_uint64(events[i],
1416 ZPOOL_HIST_TXG),
1417 fnvlist_lookup_string(events[i],
1418 ZPOOL_HIST_INT_NAME));
1419
1420 if (nvlist_exists(events[i], ZPOOL_HIST_DSNAME)) {
1421 (void) printf(" %s (%llu)",
1422 fnvlist_lookup_string(events[i],
1423 ZPOOL_HIST_DSNAME),
1424 (u_longlong_t)fnvlist_lookup_uint64(
1425 events[i],
1426 ZPOOL_HIST_DSID));
1427 }
1428
1429 (void) printf(" %s\n", fnvlist_lookup_string(events[i],
1430 ZPOOL_HIST_INT_STR));
1431 } else if (nvlist_exists(events[i], ZPOOL_HIST_IOCTL)) {
1432 (void) printf("%s ioctl %s\n", tbuf,
1433 fnvlist_lookup_string(events[i],
1434 ZPOOL_HIST_IOCTL));
1435
1436 if (nvlist_exists(events[i], ZPOOL_HIST_INPUT_NVL)) {
1437 (void) printf(" input:\n");
1438 dump_nvlist(fnvlist_lookup_nvlist(events[i],
1439 ZPOOL_HIST_INPUT_NVL), 8);
1440 }
1441 if (nvlist_exists(events[i], ZPOOL_HIST_OUTPUT_NVL)) {
1442 (void) printf(" output:\n");
1443 dump_nvlist(fnvlist_lookup_nvlist(events[i],
1444 ZPOOL_HIST_OUTPUT_NVL), 8);
1445 }
1446 if (nvlist_exists(events[i], ZPOOL_HIST_ERRNO)) {
1447 (void) printf(" errno: %lld\n",
1448 (longlong_t)fnvlist_lookup_int64(events[i],
1449 ZPOOL_HIST_ERRNO));
1450 }
1451 } else {
1452 goto next;
1453 }
1454
1455 printed = B_TRUE;
1456 next:
1457 if (dump_opt['h'] > 1) {
1458 if (!printed)
1459 (void) printf("unrecognized record:\n");
1460 dump_nvlist(events[i], 2);
1461 }
1462 }
1463 }
1464
1465 /*ARGSUSED*/
1466 static void
dump_dnode(objset_t * os,uint64_t object,void * data,size_t size)1467 dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
1468 {
1469 }
1470
1471 static uint64_t
blkid2offset(const dnode_phys_t * dnp,const blkptr_t * bp,const zbookmark_phys_t * zb)1472 blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
1473 const zbookmark_phys_t *zb)
1474 {
1475 if (dnp == NULL) {
1476 ASSERT(zb->zb_level < 0);
1477 if (zb->zb_object == 0)
1478 return (zb->zb_blkid);
1479 return (zb->zb_blkid * BP_GET_LSIZE(bp));
1480 }
1481
1482 ASSERT(zb->zb_level >= 0);
1483
1484 return ((zb->zb_blkid <<
1485 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
1486 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
1487 }
1488
1489 static void
snprintf_blkptr_compact(char * blkbuf,size_t buflen,const blkptr_t * bp)1490 snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp)
1491 {
1492 const dva_t *dva = bp->blk_dva;
1493 unsigned int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
1494
1495 if (dump_opt['b'] >= 6) {
1496 snprintf_blkptr(blkbuf, buflen, bp);
1497 return;
1498 }
1499
1500 if (BP_IS_EMBEDDED(bp)) {
1501 (void) sprintf(blkbuf,
1502 "EMBEDDED et=%u %llxL/%llxP B=%llu",
1503 (int)BPE_GET_ETYPE(bp),
1504 (u_longlong_t)BPE_GET_LSIZE(bp),
1505 (u_longlong_t)BPE_GET_PSIZE(bp),
1506 (u_longlong_t)bp->blk_birth);
1507 return;
1508 }
1509
1510 blkbuf[0] = '\0';
1511 for (unsigned int i = 0; i < ndvas; i++)
1512 (void) snprintf(blkbuf + strlen(blkbuf),
1513 buflen - strlen(blkbuf), "%llu:%llx:%llx ",
1514 (u_longlong_t)DVA_GET_VDEV(&dva[i]),
1515 (u_longlong_t)DVA_GET_OFFSET(&dva[i]),
1516 (u_longlong_t)DVA_GET_ASIZE(&dva[i]));
1517
1518 if (BP_IS_HOLE(bp)) {
1519 (void) snprintf(blkbuf + strlen(blkbuf),
1520 buflen - strlen(blkbuf),
1521 "%llxL B=%llu",
1522 (u_longlong_t)BP_GET_LSIZE(bp),
1523 (u_longlong_t)bp->blk_birth);
1524 } else {
1525 (void) snprintf(blkbuf + strlen(blkbuf),
1526 buflen - strlen(blkbuf),
1527 "%llxL/%llxP F=%llu B=%llu/%llu",
1528 (u_longlong_t)BP_GET_LSIZE(bp),
1529 (u_longlong_t)BP_GET_PSIZE(bp),
1530 (u_longlong_t)BP_GET_FILL(bp),
1531 (u_longlong_t)bp->blk_birth,
1532 (u_longlong_t)BP_PHYSICAL_BIRTH(bp));
1533 }
1534 }
1535
1536 static void
print_indirect(blkptr_t * bp,const zbookmark_phys_t * zb,const dnode_phys_t * dnp)1537 print_indirect(blkptr_t *bp, const zbookmark_phys_t *zb,
1538 const dnode_phys_t *dnp)
1539 {
1540 char blkbuf[BP_SPRINTF_LEN];
1541 int l;
1542
1543 if (!BP_IS_EMBEDDED(bp)) {
1544 ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
1545 ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
1546 }
1547
1548 (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
1549
1550 ASSERT(zb->zb_level >= 0);
1551
1552 for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
1553 if (l == zb->zb_level) {
1554 (void) printf("L%llx", (u_longlong_t)zb->zb_level);
1555 } else {
1556 (void) printf(" ");
1557 }
1558 }
1559
1560 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp);
1561 (void) printf("%s\n", blkbuf);
1562 }
1563
1564 static int
visit_indirect(spa_t * spa,const dnode_phys_t * dnp,blkptr_t * bp,const zbookmark_phys_t * zb)1565 visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
1566 blkptr_t *bp, const zbookmark_phys_t *zb)
1567 {
1568 int err = 0;
1569
1570 if (bp->blk_birth == 0)
1571 return (0);
1572
1573 print_indirect(bp, zb, dnp);
1574
1575 if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
1576 arc_flags_t flags = ARC_FLAG_WAIT;
1577 int i;
1578 blkptr_t *cbp;
1579 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
1580 arc_buf_t *buf;
1581 uint64_t fill = 0;
1582
1583 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
1584 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
1585 if (err)
1586 return (err);
1587 ASSERT(buf->b_data);
1588
1589 /* recursively visit blocks below this */
1590 cbp = buf->b_data;
1591 for (i = 0; i < epb; i++, cbp++) {
1592 zbookmark_phys_t czb;
1593
1594 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
1595 zb->zb_level - 1,
1596 zb->zb_blkid * epb + i);
1597 err = visit_indirect(spa, dnp, cbp, &czb);
1598 if (err)
1599 break;
1600 fill += BP_GET_FILL(cbp);
1601 }
1602 if (!err)
1603 ASSERT3U(fill, ==, BP_GET_FILL(bp));
1604 arc_buf_destroy(buf, &buf);
1605 }
1606
1607 return (err);
1608 }
1609
1610 /*ARGSUSED*/
1611 static void
dump_indirect(dnode_t * dn)1612 dump_indirect(dnode_t *dn)
1613 {
1614 dnode_phys_t *dnp = dn->dn_phys;
1615 int j;
1616 zbookmark_phys_t czb;
1617
1618 (void) printf("Indirect blocks:\n");
1619
1620 SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
1621 dn->dn_object, dnp->dn_nlevels - 1, 0);
1622 for (j = 0; j < dnp->dn_nblkptr; j++) {
1623 czb.zb_blkid = j;
1624 (void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
1625 &dnp->dn_blkptr[j], &czb);
1626 }
1627
1628 (void) printf("\n");
1629 }
1630
1631 /*ARGSUSED*/
1632 static void
dump_dsl_dir(objset_t * os,uint64_t object,void * data,size_t size)1633 dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
1634 {
1635 dsl_dir_phys_t *dd = data;
1636 time_t crtime;
1637 char nice[32];
1638
1639 /* make sure nicenum has enough space */
1640 CTASSERT(sizeof (nice) >= NN_NUMBUF_SZ);
1641
1642 if (dd == NULL)
1643 return;
1644
1645 ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
1646
1647 crtime = dd->dd_creation_time;
1648 (void) printf("\t\tcreation_time = %s", ctime(&crtime));
1649 (void) printf("\t\thead_dataset_obj = %llu\n",
1650 (u_longlong_t)dd->dd_head_dataset_obj);
1651 (void) printf("\t\tparent_dir_obj = %llu\n",
1652 (u_longlong_t)dd->dd_parent_obj);
1653 (void) printf("\t\torigin_obj = %llu\n",
1654 (u_longlong_t)dd->dd_origin_obj);
1655 (void) printf("\t\tchild_dir_zapobj = %llu\n",
1656 (u_longlong_t)dd->dd_child_dir_zapobj);
1657 zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
1658 (void) printf("\t\tused_bytes = %s\n", nice);
1659 zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
1660 (void) printf("\t\tcompressed_bytes = %s\n", nice);
1661 zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
1662 (void) printf("\t\tuncompressed_bytes = %s\n", nice);
1663 zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
1664 (void) printf("\t\tquota = %s\n", nice);
1665 zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
1666 (void) printf("\t\treserved = %s\n", nice);
1667 (void) printf("\t\tprops_zapobj = %llu\n",
1668 (u_longlong_t)dd->dd_props_zapobj);
1669 (void) printf("\t\tdeleg_zapobj = %llu\n",
1670 (u_longlong_t)dd->dd_deleg_zapobj);
1671 (void) printf("\t\tflags = %llx\n",
1672 (u_longlong_t)dd->dd_flags);
1673
1674 #define DO(which) \
1675 zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
1676 sizeof (nice)); \
1677 (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
1678 DO(HEAD);
1679 DO(SNAP);
1680 DO(CHILD);
1681 DO(CHILD_RSRV);
1682 DO(REFRSRV);
1683 #undef DO
1684 (void) printf("\t\tclones = %llu\n",
1685 (u_longlong_t)dd->dd_clones);
1686 }
1687
1688 /*ARGSUSED*/
1689 static void
dump_dsl_dataset(objset_t * os,uint64_t object,void * data,size_t size)1690 dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
1691 {
1692 dsl_dataset_phys_t *ds = data;
1693 time_t crtime;
1694 char used[32], compressed[32], uncompressed[32], unique[32];
1695 char blkbuf[BP_SPRINTF_LEN];
1696
1697 /* make sure nicenum has enough space */
1698 CTASSERT(sizeof (used) >= NN_NUMBUF_SZ);
1699 CTASSERT(sizeof (compressed) >= NN_NUMBUF_SZ);
1700 CTASSERT(sizeof (uncompressed) >= NN_NUMBUF_SZ);
1701 CTASSERT(sizeof (unique) >= NN_NUMBUF_SZ);
1702
1703 if (ds == NULL)
1704 return;
1705
1706 ASSERT(size == sizeof (*ds));
1707 crtime = ds->ds_creation_time;
1708 zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
1709 zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
1710 zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
1711 sizeof (uncompressed));
1712 zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
1713 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
1714
1715 (void) printf("\t\tdir_obj = %llu\n",
1716 (u_longlong_t)ds->ds_dir_obj);
1717 (void) printf("\t\tprev_snap_obj = %llu\n",
1718 (u_longlong_t)ds->ds_prev_snap_obj);
1719 (void) printf("\t\tprev_snap_txg = %llu\n",
1720 (u_longlong_t)ds->ds_prev_snap_txg);
1721 (void) printf("\t\tnext_snap_obj = %llu\n",
1722 (u_longlong_t)ds->ds_next_snap_obj);
1723 (void) printf("\t\tsnapnames_zapobj = %llu\n",
1724 (u_longlong_t)ds->ds_snapnames_zapobj);
1725 (void) printf("\t\tnum_children = %llu\n",
1726 (u_longlong_t)ds->ds_num_children);
1727 (void) printf("\t\tuserrefs_obj = %llu\n",
1728 (u_longlong_t)ds->ds_userrefs_obj);
1729 (void) printf("\t\tcreation_time = %s", ctime(&crtime));
1730 (void) printf("\t\tcreation_txg = %llu\n",
1731 (u_longlong_t)ds->ds_creation_txg);
1732 (void) printf("\t\tdeadlist_obj = %llu\n",
1733 (u_longlong_t)ds->ds_deadlist_obj);
1734 (void) printf("\t\tused_bytes = %s\n", used);
1735 (void) printf("\t\tcompressed_bytes = %s\n", compressed);
1736 (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
1737 (void) printf("\t\tunique = %s\n", unique);
1738 (void) printf("\t\tfsid_guid = %llu\n",
1739 (u_longlong_t)ds->ds_fsid_guid);
1740 (void) printf("\t\tguid = %llu\n",
1741 (u_longlong_t)ds->ds_guid);
1742 (void) printf("\t\tflags = %llx\n",
1743 (u_longlong_t)ds->ds_flags);
1744 (void) printf("\t\tnext_clones_obj = %llu\n",
1745 (u_longlong_t)ds->ds_next_clones_obj);
1746 (void) printf("\t\tprops_obj = %llu\n",
1747 (u_longlong_t)ds->ds_props_obj);
1748 (void) printf("\t\tbp = %s\n", blkbuf);
1749 }
1750
1751 /* ARGSUSED */
1752 static int
dump_bptree_cb(void * arg,const blkptr_t * bp,dmu_tx_t * tx)1753 dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1754 {
1755 char blkbuf[BP_SPRINTF_LEN];
1756
1757 if (bp->blk_birth != 0) {
1758 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
1759 (void) printf("\t%s\n", blkbuf);
1760 }
1761 return (0);
1762 }
1763
1764 static void
dump_bptree(objset_t * os,uint64_t obj,const char * name)1765 dump_bptree(objset_t *os, uint64_t obj, const char *name)
1766 {
1767 char bytes[32];
1768 bptree_phys_t *bt;
1769 dmu_buf_t *db;
1770
1771 /* make sure nicenum has enough space */
1772 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
1773
1774 if (dump_opt['d'] < 3)
1775 return;
1776
1777 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
1778 bt = db->db_data;
1779 zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
1780 (void) printf("\n %s: %llu datasets, %s\n",
1781 name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
1782 dmu_buf_rele(db, FTAG);
1783
1784 if (dump_opt['d'] < 5)
1785 return;
1786
1787 (void) printf("\n");
1788
1789 (void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
1790 }
1791
1792 /* ARGSUSED */
1793 static int
dump_bpobj_cb(void * arg,const blkptr_t * bp,dmu_tx_t * tx)1794 dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1795 {
1796 char blkbuf[BP_SPRINTF_LEN];
1797
1798 ASSERT(bp->blk_birth != 0);
1799 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp);
1800 (void) printf("\t%s\n", blkbuf);
1801 return (0);
1802 }
1803
1804 static void
dump_full_bpobj(bpobj_t * bpo,const char * name,int indent)1805 dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
1806 {
1807 char bytes[32];
1808 char comp[32];
1809 char uncomp[32];
1810
1811 /* make sure nicenum has enough space */
1812 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
1813 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
1814 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
1815
1816 if (dump_opt['d'] < 3)
1817 return;
1818
1819 zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
1820 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
1821 zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
1822 zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
1823 (void) printf(" %*s: object %llu, %llu local blkptrs, "
1824 "%llu subobjs in object %llu, %s (%s/%s comp)\n",
1825 indent * 8, name,
1826 (u_longlong_t)bpo->bpo_object,
1827 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
1828 (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
1829 (u_longlong_t)bpo->bpo_phys->bpo_subobjs,
1830 bytes, comp, uncomp);
1831
1832 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
1833 uint64_t subobj;
1834 bpobj_t subbpo;
1835 int error;
1836 VERIFY0(dmu_read(bpo->bpo_os,
1837 bpo->bpo_phys->bpo_subobjs,
1838 i * sizeof (subobj), sizeof (subobj), &subobj, 0));
1839 error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
1840 if (error != 0) {
1841 (void) printf("ERROR %u while trying to open "
1842 "subobj id %llu\n",
1843 error, (u_longlong_t)subobj);
1844 continue;
1845 }
1846 dump_full_bpobj(&subbpo, "subobj", indent + 1);
1847 bpobj_close(&subbpo);
1848 }
1849 } else {
1850 (void) printf(" %*s: object %llu, %llu blkptrs, %s\n",
1851 indent * 8, name,
1852 (u_longlong_t)bpo->bpo_object,
1853 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
1854 bytes);
1855 }
1856
1857 if (dump_opt['d'] < 5)
1858 return;
1859
1860
1861 if (indent == 0) {
1862 (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
1863 (void) printf("\n");
1864 }
1865 }
1866
1867 static void
bpobj_count_refd(bpobj_t * bpo)1868 bpobj_count_refd(bpobj_t *bpo)
1869 {
1870 mos_obj_refd(bpo->bpo_object);
1871
1872 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
1873 mos_obj_refd(bpo->bpo_phys->bpo_subobjs);
1874 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
1875 uint64_t subobj;
1876 bpobj_t subbpo;
1877 int error;
1878 VERIFY0(dmu_read(bpo->bpo_os,
1879 bpo->bpo_phys->bpo_subobjs,
1880 i * sizeof (subobj), sizeof (subobj), &subobj, 0));
1881 error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
1882 if (error != 0) {
1883 (void) printf("ERROR %u while trying to open "
1884 "subobj id %llu\n",
1885 error, (u_longlong_t)subobj);
1886 continue;
1887 }
1888 bpobj_count_refd(&subbpo);
1889 bpobj_close(&subbpo);
1890 }
1891 }
1892 }
1893
1894 static void
dump_deadlist(dsl_deadlist_t * dl)1895 dump_deadlist(dsl_deadlist_t *dl)
1896 {
1897 dsl_deadlist_entry_t *dle;
1898 uint64_t unused;
1899 char bytes[32];
1900 char comp[32];
1901 char uncomp[32];
1902 uint64_t empty_bpobj =
1903 dmu_objset_spa(dl->dl_os)->spa_dsl_pool->dp_empty_bpobj;
1904
1905 /* force the tree to be loaded */
1906 dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused);
1907
1908 if (dl->dl_oldfmt) {
1909 if (dl->dl_bpobj.bpo_object != empty_bpobj)
1910 bpobj_count_refd(&dl->dl_bpobj);
1911 } else {
1912 mos_obj_refd(dl->dl_object);
1913 for (dle = avl_first(&dl->dl_tree); dle;
1914 dle = AVL_NEXT(&dl->dl_tree, dle)) {
1915 if (dle->dle_bpobj.bpo_object != empty_bpobj)
1916 bpobj_count_refd(&dle->dle_bpobj);
1917 }
1918 }
1919
1920 /* make sure nicenum has enough space */
1921 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
1922 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
1923 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
1924
1925 if (dump_opt['d'] < 3)
1926 return;
1927
1928 if (dl->dl_oldfmt) {
1929 dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
1930 return;
1931 }
1932
1933 zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
1934 zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
1935 zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
1936 (void) printf("\n Deadlist: %s (%s/%s comp)\n",
1937 bytes, comp, uncomp);
1938
1939 if (dump_opt['d'] < 4)
1940 return;
1941
1942 (void) printf("\n");
1943
1944 for (dle = avl_first(&dl->dl_tree); dle;
1945 dle = AVL_NEXT(&dl->dl_tree, dle)) {
1946 if (dump_opt['d'] >= 5) {
1947 char buf[128];
1948 (void) snprintf(buf, sizeof (buf),
1949 "mintxg %llu -> obj %llu",
1950 (longlong_t)dle->dle_mintxg,
1951 (longlong_t)dle->dle_bpobj.bpo_object);
1952
1953 dump_full_bpobj(&dle->dle_bpobj, buf, 0);
1954 } else {
1955 (void) printf("mintxg %llu -> obj %llu\n",
1956 (longlong_t)dle->dle_mintxg,
1957 (longlong_t)dle->dle_bpobj.bpo_object);
1958 }
1959 }
1960 }
1961
1962 static avl_tree_t idx_tree;
1963 static avl_tree_t domain_tree;
1964 static boolean_t fuid_table_loaded;
1965 static objset_t *sa_os = NULL;
1966 static sa_attr_type_t *sa_attr_table = NULL;
1967
1968 static int
open_objset(const char * path,dmu_objset_type_t type,void * tag,objset_t ** osp)1969 open_objset(const char *path, dmu_objset_type_t type, void *tag, objset_t **osp)
1970 {
1971 int err;
1972 uint64_t sa_attrs = 0;
1973 uint64_t version = 0;
1974
1975 VERIFY3P(sa_os, ==, NULL);
1976 err = dmu_objset_own(path, type, B_TRUE, B_FALSE, tag, osp);
1977 if (err != 0) {
1978 (void) fprintf(stderr, "failed to own dataset '%s': %s\n", path,
1979 strerror(err));
1980 return (err);
1981 }
1982
1983 if (dmu_objset_type(*osp) == DMU_OST_ZFS && !(*osp)->os_encrypted) {
1984 (void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
1985 8, 1, &version);
1986 if (version >= ZPL_VERSION_SA) {
1987 (void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
1988 8, 1, &sa_attrs);
1989 }
1990 err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
1991 &sa_attr_table);
1992 if (err != 0) {
1993 (void) fprintf(stderr, "sa_setup failed: %s\n",
1994 strerror(err));
1995 dmu_objset_disown(*osp, B_FALSE, tag);
1996 *osp = NULL;
1997 }
1998 }
1999 sa_os = *osp;
2000
2001 return (0);
2002 }
2003
2004 static void
close_objset(objset_t * os,void * tag)2005 close_objset(objset_t *os, void *tag)
2006 {
2007 VERIFY3P(os, ==, sa_os);
2008 if (os->os_sa != NULL)
2009 sa_tear_down(os);
2010 dmu_objset_disown(os, B_FALSE, tag);
2011 sa_attr_table = NULL;
2012 sa_os = NULL;
2013 }
2014
2015 static void
fuid_table_destroy()2016 fuid_table_destroy()
2017 {
2018 if (fuid_table_loaded) {
2019 zfs_fuid_table_destroy(&idx_tree, &domain_tree);
2020 fuid_table_loaded = B_FALSE;
2021 }
2022 }
2023
2024 /*
2025 * print uid or gid information.
2026 * For normal POSIX id just the id is printed in decimal format.
2027 * For CIFS files with FUID the fuid is printed in hex followed by
2028 * the domain-rid string.
2029 */
2030 static void
print_idstr(uint64_t id,const char * id_type)2031 print_idstr(uint64_t id, const char *id_type)
2032 {
2033 if (FUID_INDEX(id)) {
2034 char *domain;
2035
2036 domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
2037 (void) printf("\t%s %llx [%s-%d]\n", id_type,
2038 (u_longlong_t)id, domain, (int)FUID_RID(id));
2039 } else {
2040 (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
2041 }
2042
2043 }
2044
2045 static void
dump_uidgid(objset_t * os,uint64_t uid,uint64_t gid)2046 dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
2047 {
2048 uint32_t uid_idx, gid_idx;
2049
2050 uid_idx = FUID_INDEX(uid);
2051 gid_idx = FUID_INDEX(gid);
2052
2053 /* Load domain table, if not already loaded */
2054 if (!fuid_table_loaded && (uid_idx || gid_idx)) {
2055 uint64_t fuid_obj;
2056
2057 /* first find the fuid object. It lives in the master node */
2058 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
2059 8, 1, &fuid_obj) == 0);
2060 zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
2061 (void) zfs_fuid_table_load(os, fuid_obj,
2062 &idx_tree, &domain_tree);
2063 fuid_table_loaded = B_TRUE;
2064 }
2065
2066 print_idstr(uid, "uid");
2067 print_idstr(gid, "gid");
2068 }
2069
2070 /*ARGSUSED*/
2071 static void
dump_znode(objset_t * os,uint64_t object,void * data,size_t size)2072 dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
2073 {
2074 char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
2075 sa_handle_t *hdl;
2076 uint64_t xattr, rdev, gen;
2077 uint64_t uid, gid, mode, fsize, parent, links;
2078 uint64_t pflags;
2079 uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
2080 time_t z_crtime, z_atime, z_mtime, z_ctime;
2081 sa_bulk_attr_t bulk[12];
2082 int idx = 0;
2083 int error;
2084
2085 VERIFY3P(os, ==, sa_os);
2086 if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
2087 (void) printf("Failed to get handle for SA znode\n");
2088 return;
2089 }
2090
2091 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
2092 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
2093 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
2094 &links, 8);
2095 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
2096 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
2097 &mode, 8);
2098 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
2099 NULL, &parent, 8);
2100 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
2101 &fsize, 8);
2102 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
2103 acctm, 16);
2104 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
2105 modtm, 16);
2106 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
2107 crtm, 16);
2108 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
2109 chgtm, 16);
2110 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
2111 &pflags, 8);
2112
2113 if (sa_bulk_lookup(hdl, bulk, idx)) {
2114 (void) sa_handle_destroy(hdl);
2115 return;
2116 }
2117
2118 z_crtime = (time_t)crtm[0];
2119 z_atime = (time_t)acctm[0];
2120 z_mtime = (time_t)modtm[0];
2121 z_ctime = (time_t)chgtm[0];
2122
2123 if (dump_opt['d'] > 4) {
2124 error = zfs_obj_to_path(os, object, path, sizeof (path));
2125 if (error == ESTALE) {
2126 (void) snprintf(path, sizeof (path), "on delete queue");
2127 } else if (error != 0) {
2128 leaked_objects++;
2129 (void) snprintf(path, sizeof (path),
2130 "path not found, possibly leaked");
2131 }
2132 (void) printf("\tpath %s\n", path);
2133 }
2134 dump_uidgid(os, uid, gid);
2135 (void) printf("\tatime %s", ctime(&z_atime));
2136 (void) printf("\tmtime %s", ctime(&z_mtime));
2137 (void) printf("\tctime %s", ctime(&z_ctime));
2138 (void) printf("\tcrtime %s", ctime(&z_crtime));
2139 (void) printf("\tgen %llu\n", (u_longlong_t)gen);
2140 (void) printf("\tmode %llo\n", (u_longlong_t)mode);
2141 (void) printf("\tsize %llu\n", (u_longlong_t)fsize);
2142 (void) printf("\tparent %llu\n", (u_longlong_t)parent);
2143 (void) printf("\tlinks %llu\n", (u_longlong_t)links);
2144 (void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
2145 if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
2146 uint64_t projid;
2147
2148 if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
2149 sizeof (uint64_t)) == 0)
2150 (void) printf("\tprojid %llu\n", (u_longlong_t)projid);
2151 }
2152 if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
2153 sizeof (uint64_t)) == 0)
2154 (void) printf("\txattr %llu\n", (u_longlong_t)xattr);
2155 if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
2156 sizeof (uint64_t)) == 0)
2157 (void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
2158 sa_handle_destroy(hdl);
2159 }
2160
2161 /*ARGSUSED*/
2162 static void
dump_acl(objset_t * os,uint64_t object,void * data,size_t size)2163 dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
2164 {
2165 }
2166
2167 /*ARGSUSED*/
2168 static void
dump_dmu_objset(objset_t * os,uint64_t object,void * data,size_t size)2169 dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
2170 {
2171 }
2172
2173
2174 static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
2175 dump_none, /* unallocated */
2176 dump_zap, /* object directory */
2177 dump_uint64, /* object array */
2178 dump_none, /* packed nvlist */
2179 dump_packed_nvlist, /* packed nvlist size */
2180 dump_none, /* bpobj */
2181 dump_bpobj, /* bpobj header */
2182 dump_none, /* SPA space map header */
2183 dump_none, /* SPA space map */
2184 dump_none, /* ZIL intent log */
2185 dump_dnode, /* DMU dnode */
2186 dump_dmu_objset, /* DMU objset */
2187 dump_dsl_dir, /* DSL directory */
2188 dump_zap, /* DSL directory child map */
2189 dump_zap, /* DSL dataset snap map */
2190 dump_zap, /* DSL props */
2191 dump_dsl_dataset, /* DSL dataset */
2192 dump_znode, /* ZFS znode */
2193 dump_acl, /* ZFS V0 ACL */
2194 dump_uint8, /* ZFS plain file */
2195 dump_zpldir, /* ZFS directory */
2196 dump_zap, /* ZFS master node */
2197 dump_zap, /* ZFS delete queue */
2198 dump_uint8, /* zvol object */
2199 dump_zap, /* zvol prop */
2200 dump_uint8, /* other uint8[] */
2201 dump_uint64, /* other uint64[] */
2202 dump_zap, /* other ZAP */
2203 dump_zap, /* persistent error log */
2204 dump_uint8, /* SPA history */
2205 dump_history_offsets, /* SPA history offsets */
2206 dump_zap, /* Pool properties */
2207 dump_zap, /* DSL permissions */
2208 dump_acl, /* ZFS ACL */
2209 dump_uint8, /* ZFS SYSACL */
2210 dump_none, /* FUID nvlist */
2211 dump_packed_nvlist, /* FUID nvlist size */
2212 dump_zap, /* DSL dataset next clones */
2213 dump_zap, /* DSL scrub queue */
2214 dump_zap, /* ZFS user/group/project used */
2215 dump_zap, /* ZFS user/group/project quota */
2216 dump_zap, /* snapshot refcount tags */
2217 dump_ddt_zap, /* DDT ZAP object */
2218 dump_zap, /* DDT statistics */
2219 dump_znode, /* SA object */
2220 dump_zap, /* SA Master Node */
2221 dump_sa_attrs, /* SA attribute registration */
2222 dump_sa_layouts, /* SA attribute layouts */
2223 dump_zap, /* DSL scrub translations */
2224 dump_none, /* fake dedup BP */
2225 dump_zap, /* deadlist */
2226 dump_none, /* deadlist hdr */
2227 dump_zap, /* dsl clones */
2228 dump_bpobj_subobjs, /* bpobj subobjs */
2229 dump_unknown, /* Unknown type, must be last */
2230 };
2231
2232 static void
dump_object(objset_t * os,uint64_t object,int verbosity,int * print_header,uint64_t * dnode_slots_used)2233 dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header,
2234 uint64_t *dnode_slots_used)
2235 {
2236 dmu_buf_t *db = NULL;
2237 dmu_object_info_t doi;
2238 dnode_t *dn;
2239 boolean_t dnode_held = B_FALSE;
2240 void *bonus = NULL;
2241 size_t bsize = 0;
2242 char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
2243 char bonus_size[32];
2244 char aux[50];
2245 int error;
2246
2247 /* make sure nicenum has enough space */
2248 CTASSERT(sizeof (iblk) >= NN_NUMBUF_SZ);
2249 CTASSERT(sizeof (dblk) >= NN_NUMBUF_SZ);
2250 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
2251 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
2252 CTASSERT(sizeof (bonus_size) >= NN_NUMBUF_SZ);
2253
2254 if (*print_header) {
2255 (void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
2256 "Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
2257 "lsize", "%full", "type");
2258 *print_header = 0;
2259 }
2260
2261 if (object == 0) {
2262 dn = DMU_META_DNODE(os);
2263 dmu_object_info_from_dnode(dn, &doi);
2264 } else {
2265 /*
2266 * Encrypted datasets will have sensitive bonus buffers
2267 * encrypted. Therefore we cannot hold the bonus buffer and
2268 * must hold the dnode itself instead.
2269 */
2270 error = dmu_object_info(os, object, &doi);
2271 if (error)
2272 fatal("dmu_object_info() failed, errno %u", error);
2273
2274 if (os->os_encrypted &&
2275 DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
2276 error = dnode_hold(os, object, FTAG, &dn);
2277 if (error)
2278 fatal("dnode_hold() failed, errno %u", error);
2279 dnode_held = B_TRUE;
2280 } else {
2281 error = dmu_bonus_hold(os, object, FTAG, &db);
2282 if (error)
2283 fatal("dmu_bonus_hold(%llu) failed, errno %u",
2284 object, error);
2285 bonus = db->db_data;
2286 bsize = db->db_size;
2287 dn = DB_DNODE((dmu_buf_impl_t *)db);
2288 }
2289 }
2290
2291 if (dnode_slots_used != NULL)
2292 *dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
2293
2294 zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
2295 zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
2296 zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
2297 zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
2298 zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
2299 zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
2300 (void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count *
2301 doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) /
2302 doi.doi_max_offset);
2303
2304 aux[0] = '\0';
2305
2306 if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
2307 (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)",
2308 ZDB_CHECKSUM_NAME(doi.doi_checksum));
2309 }
2310
2311 if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
2312 (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)",
2313 ZDB_COMPRESS_NAME(doi.doi_compress));
2314 }
2315
2316 (void) printf("%10" PRIu64
2317 " %3u %5s %5s %5s %5s %5s %6s %s%s\n",
2318 object, doi.doi_indirection, iblk, dblk,
2319 asize, dnsize, lsize, fill, ZDB_OT_NAME(doi.doi_type), aux);
2320
2321 if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
2322 (void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
2323 "", "", "", "", "", "", bonus_size, "bonus",
2324 ZDB_OT_NAME(doi.doi_bonus_type));
2325 }
2326
2327 if (verbosity >= 4) {
2328 (void) printf("\tdnode flags: %s%s%s%s\n",
2329 (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
2330 "USED_BYTES " : "",
2331 (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
2332 "USERUSED_ACCOUNTED " : "",
2333 (dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
2334 "USEROBJUSED_ACCOUNTED " : "",
2335 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
2336 "SPILL_BLKPTR" : "");
2337 (void) printf("\tdnode maxblkid: %llu\n",
2338 (longlong_t)dn->dn_phys->dn_maxblkid);
2339
2340 if (!dnode_held) {
2341 object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
2342 object, bonus, bsize);
2343 } else {
2344 (void) printf("\t\t(bonus encrypted)\n");
2345 }
2346
2347 if (!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type)) {
2348 object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
2349 NULL, 0);
2350 } else {
2351 (void) printf("\t\t(object encrypted)\n");
2352 }
2353
2354 *print_header = 1;
2355 }
2356
2357 if (verbosity >= 5)
2358 dump_indirect(dn);
2359
2360 if (verbosity >= 5) {
2361 /*
2362 * Report the list of segments that comprise the object.
2363 */
2364 uint64_t start = 0;
2365 uint64_t end;
2366 uint64_t blkfill = 1;
2367 int minlvl = 1;
2368
2369 if (dn->dn_type == DMU_OT_DNODE) {
2370 minlvl = 0;
2371 blkfill = DNODES_PER_BLOCK;
2372 }
2373
2374 for (;;) {
2375 char segsize[32];
2376 /* make sure nicenum has enough space */
2377 CTASSERT(sizeof (segsize) >= NN_NUMBUF_SZ);
2378 error = dnode_next_offset(dn,
2379 0, &start, minlvl, blkfill, 0);
2380 if (error)
2381 break;
2382 end = start;
2383 error = dnode_next_offset(dn,
2384 DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
2385 zdb_nicenum(end - start, segsize, sizeof (segsize));
2386 (void) printf("\t\tsegment [%016llx, %016llx)"
2387 " size %5s\n", (u_longlong_t)start,
2388 (u_longlong_t)end, segsize);
2389 if (error)
2390 break;
2391 start = end;
2392 }
2393 }
2394
2395 if (db != NULL)
2396 dmu_buf_rele(db, FTAG);
2397 if (dnode_held)
2398 dnode_rele(dn, FTAG);
2399 }
2400
2401 static void
count_dir_mos_objects(dsl_dir_t * dd)2402 count_dir_mos_objects(dsl_dir_t *dd)
2403 {
2404 mos_obj_refd(dd->dd_object);
2405 mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj);
2406 mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj);
2407 mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj);
2408 mos_obj_refd(dsl_dir_phys(dd)->dd_clones);
2409 }
2410
2411 static void
count_ds_mos_objects(dsl_dataset_t * ds)2412 count_ds_mos_objects(dsl_dataset_t *ds)
2413 {
2414 mos_obj_refd(ds->ds_object);
2415 mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj);
2416 mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj);
2417 mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj);
2418 mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
2419
2420 if (!dsl_dataset_is_snapshot(ds)) {
2421 count_dir_mos_objects(ds->ds_dir);
2422 }
2423 }
2424
2425 static const char *objset_types[DMU_OST_NUMTYPES] = {
2426 "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
2427
2428 static void
dump_dir(objset_t * os)2429 dump_dir(objset_t *os)
2430 {
2431 dmu_objset_stats_t dds;
2432 uint64_t object, object_count;
2433 uint64_t refdbytes, usedobjs, scratch;
2434 char numbuf[32];
2435 char blkbuf[BP_SPRINTF_LEN + 20];
2436 char osname[ZFS_MAX_DATASET_NAME_LEN];
2437 const char *type = "UNKNOWN";
2438 int verbosity = dump_opt['d'];
2439 int print_header = 1;
2440 unsigned i;
2441 int error;
2442 uint64_t total_slots_used = 0;
2443 uint64_t max_slot_used = 0;
2444 uint64_t dnode_slots;
2445
2446 /* make sure nicenum has enough space */
2447 CTASSERT(sizeof (numbuf) >= NN_NUMBUF_SZ);
2448
2449 dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2450 dmu_objset_fast_stat(os, &dds);
2451 dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2452
2453 if (dds.dds_type < DMU_OST_NUMTYPES)
2454 type = objset_types[dds.dds_type];
2455
2456 if (dds.dds_type == DMU_OST_META) {
2457 dds.dds_creation_txg = TXG_INITIAL;
2458 usedobjs = BP_GET_FILL(os->os_rootbp);
2459 refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
2460 dd_used_bytes;
2461 } else {
2462 dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
2463 }
2464
2465 ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
2466
2467 zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
2468
2469 if (verbosity >= 4) {
2470 (void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
2471 (void) snprintf_blkptr(blkbuf + strlen(blkbuf),
2472 sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
2473 } else {
2474 blkbuf[0] = '\0';
2475 }
2476
2477 dmu_objset_name(os, osname);
2478
2479 (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
2480 "%s, %llu objects%s%s\n",
2481 osname, type, (u_longlong_t)dmu_objset_id(os),
2482 (u_longlong_t)dds.dds_creation_txg,
2483 numbuf, (u_longlong_t)usedobjs, blkbuf,
2484 (dds.dds_inconsistent) ? " (inconsistent)" : "");
2485
2486 if (zopt_objects != 0) {
2487 for (i = 0; i < zopt_objects; i++)
2488 dump_object(os, zopt_object[i], verbosity,
2489 &print_header, NULL);
2490 (void) printf("\n");
2491 return;
2492 }
2493
2494 if (dump_opt['i'] != 0 || verbosity >= 2)
2495 dump_intent_log(dmu_objset_zil(os));
2496
2497 if (dmu_objset_ds(os) != NULL) {
2498 dsl_dataset_t *ds = dmu_objset_ds(os);
2499 dump_deadlist(&ds->ds_deadlist);
2500
2501 if (dsl_dataset_remap_deadlist_exists(ds)) {
2502 (void) printf("ds_remap_deadlist:\n");
2503 dump_deadlist(&ds->ds_remap_deadlist);
2504 }
2505 count_ds_mos_objects(ds);
2506 }
2507
2508 if (verbosity < 2)
2509 return;
2510
2511 if (BP_IS_HOLE(os->os_rootbp))
2512 return;
2513
2514 dump_object(os, 0, verbosity, &print_header, NULL);
2515 object_count = 0;
2516 if (DMU_USERUSED_DNODE(os) != NULL &&
2517 DMU_USERUSED_DNODE(os)->dn_type != 0) {
2518 dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
2519 NULL);
2520 dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
2521 NULL);
2522 }
2523
2524 if (DMU_PROJECTUSED_DNODE(os) != NULL &&
2525 DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
2526 dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
2527 &print_header, NULL);
2528
2529 object = 0;
2530 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
2531 dump_object(os, object, verbosity, &print_header, &dnode_slots);
2532 object_count++;
2533 total_slots_used += dnode_slots;
2534 max_slot_used = object + dnode_slots - 1;
2535 }
2536
2537 (void) printf("\n");
2538
2539 (void) printf(" Dnode slots:\n");
2540 (void) printf("\tTotal used: %10llu\n",
2541 (u_longlong_t)total_slots_used);
2542 (void) printf("\tMax used: %10llu\n",
2543 (u_longlong_t)max_slot_used);
2544 (void) printf("\tPercent empty: %10lf\n",
2545 (double)(max_slot_used - total_slots_used)*100 /
2546 (double)max_slot_used);
2547
2548 (void) printf("\n");
2549
2550 if (error != ESRCH) {
2551 (void) fprintf(stderr, "dmu_object_next() = %d\n", error);
2552 abort();
2553 }
2554 if (leaked_objects != 0) {
2555 (void) printf("%d potentially leaked objects detected\n",
2556 leaked_objects);
2557 leaked_objects = 0;
2558 }
2559
2560 ASSERT3U(object_count, ==, usedobjs);
2561 }
2562
2563 static void
dump_uberblock(uberblock_t * ub,const char * header,const char * footer)2564 dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
2565 {
2566 time_t timestamp = ub->ub_timestamp;
2567
2568 (void) printf("%s", header ? header : "");
2569 (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
2570 (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
2571 (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
2572 (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
2573 (void) printf("\ttimestamp = %llu UTC = %s",
2574 (u_longlong_t)ub->ub_timestamp, asctime(localtime(×tamp)));
2575
2576 (void) printf("\tmmp_magic = %016llx\n",
2577 (u_longlong_t)ub->ub_mmp_magic);
2578 if (MMP_VALID(ub)) {
2579 (void) printf("\tmmp_delay = %0llu\n",
2580 (u_longlong_t)ub->ub_mmp_delay);
2581 if (MMP_SEQ_VALID(ub))
2582 (void) printf("\tmmp_seq = %u\n",
2583 (unsigned int) MMP_SEQ(ub));
2584 if (MMP_FAIL_INT_VALID(ub))
2585 (void) printf("\tmmp_fail = %u\n",
2586 (unsigned int) MMP_FAIL_INT(ub));
2587 if (MMP_INTERVAL_VALID(ub))
2588 (void) printf("\tmmp_write = %u\n",
2589 (unsigned int) MMP_INTERVAL(ub));
2590 /* After MMP_* to make summarize_uberblock_mmp cleaner */
2591 (void) printf("\tmmp_valid = %x\n",
2592 (unsigned int) ub->ub_mmp_config & 0xFF);
2593 }
2594
2595 if (dump_opt['u'] >= 4) {
2596 char blkbuf[BP_SPRINTF_LEN];
2597 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
2598 (void) printf("\trootbp = %s\n", blkbuf);
2599 }
2600 (void) printf("\tcheckpoint_txg = %llu\n",
2601 (u_longlong_t)ub->ub_checkpoint_txg);
2602 (void) printf("%s", footer ? footer : "");
2603 }
2604
2605 static void
dump_config(spa_t * spa)2606 dump_config(spa_t *spa)
2607 {
2608 dmu_buf_t *db;
2609 size_t nvsize = 0;
2610 int error = 0;
2611
2612
2613 error = dmu_bonus_hold(spa->spa_meta_objset,
2614 spa->spa_config_object, FTAG, &db);
2615
2616 if (error == 0) {
2617 nvsize = *(uint64_t *)db->db_data;
2618 dmu_buf_rele(db, FTAG);
2619
2620 (void) printf("\nMOS Configuration:\n");
2621 dump_packed_nvlist(spa->spa_meta_objset,
2622 spa->spa_config_object, (void *)&nvsize, 1);
2623 } else {
2624 (void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
2625 (u_longlong_t)spa->spa_config_object, error);
2626 }
2627 }
2628
2629 static void
dump_cachefile(const char * cachefile)2630 dump_cachefile(const char *cachefile)
2631 {
2632 int fd;
2633 struct stat64 statbuf;
2634 char *buf;
2635 nvlist_t *config;
2636
2637 if ((fd = open64(cachefile, O_RDONLY)) < 0) {
2638 (void) printf("cannot open '%s': %s\n", cachefile,
2639 strerror(errno));
2640 exit(1);
2641 }
2642
2643 if (fstat64(fd, &statbuf) != 0) {
2644 (void) printf("failed to stat '%s': %s\n", cachefile,
2645 strerror(errno));
2646 exit(1);
2647 }
2648
2649 if ((buf = malloc(statbuf.st_size)) == NULL) {
2650 (void) fprintf(stderr, "failed to allocate %llu bytes\n",
2651 (u_longlong_t)statbuf.st_size);
2652 exit(1);
2653 }
2654
2655 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
2656 (void) fprintf(stderr, "failed to read %llu bytes\n",
2657 (u_longlong_t)statbuf.st_size);
2658 exit(1);
2659 }
2660
2661 (void) close(fd);
2662
2663 if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
2664 (void) fprintf(stderr, "failed to unpack nvlist\n");
2665 exit(1);
2666 }
2667
2668 free(buf);
2669
2670 dump_nvlist(config, 0);
2671
2672 nvlist_free(config);
2673 }
2674
2675 static void
print_l2arc_header(void)2676 print_l2arc_header(void)
2677 {
2678 (void) printf("------------------------------------\n");
2679 (void) printf("L2ARC device header\n");
2680 (void) printf("------------------------------------\n");
2681 }
2682
2683 static void
print_l2arc_log_blocks(void)2684 print_l2arc_log_blocks(void)
2685 {
2686 (void) printf("------------------------------------\n");
2687 (void) printf("L2ARC device log blocks\n");
2688 (void) printf("------------------------------------\n");
2689 }
2690
2691 static void
dump_l2arc_log_entries(uint64_t log_entries,l2arc_log_ent_phys_t * le,uint64_t i)2692 dump_l2arc_log_entries(uint64_t log_entries,
2693 l2arc_log_ent_phys_t *le, uint64_t i)
2694 {
2695 for (uint64_t j = 0; j < log_entries; j++) {
2696 dva_t dva = le[j].le_dva;
2697 (void) printf("lb[%4llu]\tle[%4d]\tDVA asize: %llu, "
2698 "vdev: %llu, offset: %llu\n",
2699 (u_longlong_t)i, j + 1,
2700 (u_longlong_t)DVA_GET_ASIZE(&dva),
2701 (u_longlong_t)DVA_GET_VDEV(&dva),
2702 (u_longlong_t)DVA_GET_OFFSET(&dva));
2703 (void) printf("|\t\t\t\tbirth: %llu\n",
2704 (u_longlong_t)le[j].le_birth);
2705 (void) printf("|\t\t\t\tlsize: %llu\n",
2706 (u_longlong_t)L2BLK_GET_LSIZE((&le[j])->le_prop));
2707 (void) printf("|\t\t\t\tpsize: %llu\n",
2708 (u_longlong_t)L2BLK_GET_PSIZE((&le[j])->le_prop));
2709 (void) printf("|\t\t\t\tcompr: %llu\n",
2710 (u_longlong_t)L2BLK_GET_COMPRESS((&le[j])->le_prop));
2711 (void) printf("|\t\t\t\ttype: %llu\n",
2712 (u_longlong_t)L2BLK_GET_TYPE((&le[j])->le_prop));
2713 (void) printf("|\t\t\t\tprotected: %llu\n",
2714 (u_longlong_t)L2BLK_GET_PROTECTED((&le[j])->le_prop));
2715 (void) printf("|\t\t\t\tprefetch: %llu\n",
2716 (u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop));
2717 (void) printf("|\t\t\t\taddress: %llu\n",
2718 (u_longlong_t)le[j].le_daddr);
2719 (void) printf("|\t\t\t\tARC state: %llu\n",
2720 (u_longlong_t)L2BLK_GET_STATE((&le[j])->le_prop));
2721 (void) printf("|\n");
2722 }
2723 (void) printf("\n");
2724 }
2725
2726 static void
dump_l2arc_log_blkptr(l2arc_log_blkptr_t lbps)2727 dump_l2arc_log_blkptr(l2arc_log_blkptr_t lbps)
2728 {
2729 (void) printf("|\t\tdaddr: %llu\n", (u_longlong_t)lbps.lbp_daddr);
2730 (void) printf("|\t\tpayload_asize: %llu\n",
2731 (u_longlong_t)lbps.lbp_payload_asize);
2732 (void) printf("|\t\tpayload_start: %llu\n",
2733 (u_longlong_t)lbps.lbp_payload_start);
2734 (void) printf("|\t\tlsize: %llu\n",
2735 (u_longlong_t)L2BLK_GET_LSIZE((&lbps)->lbp_prop));
2736 (void) printf("|\t\tasize: %llu\n",
2737 (u_longlong_t)L2BLK_GET_PSIZE((&lbps)->lbp_prop));
2738 (void) printf("|\t\tcompralgo: %llu\n",
2739 (u_longlong_t)L2BLK_GET_COMPRESS((&lbps)->lbp_prop));
2740 (void) printf("|\t\tcksumalgo: %llu\n",
2741 (u_longlong_t)L2BLK_GET_CHECKSUM((&lbps)->lbp_prop));
2742 (void) printf("|\n\n");
2743 }
2744
2745 static void
dump_l2arc_log_blocks(int fd,l2arc_dev_hdr_phys_t l2dhdr,l2arc_dev_hdr_phys_t * rebuild)2746 dump_l2arc_log_blocks(int fd, l2arc_dev_hdr_phys_t l2dhdr,
2747 l2arc_dev_hdr_phys_t *rebuild)
2748 {
2749 l2arc_log_blk_phys_t this_lb;
2750 uint64_t asize;
2751 l2arc_log_blkptr_t lbps[2];
2752 abd_t *abd;
2753 zio_cksum_t cksum;
2754 int failed = 0;
2755 l2arc_dev_t dev;
2756
2757 if (!dump_opt['q'])
2758 print_l2arc_log_blocks();
2759 bcopy((&l2dhdr)->dh_start_lbps, lbps, sizeof (lbps));
2760
2761 dev.l2ad_evict = l2dhdr.dh_evict;
2762 dev.l2ad_start = l2dhdr.dh_start;
2763 dev.l2ad_end = l2dhdr.dh_end;
2764
2765 if (l2dhdr.dh_start_lbps[0].lbp_daddr == 0) {
2766 /* no log blocks to read */
2767 if (!dump_opt['q']) {
2768 (void) printf("No log blocks to read\n");
2769 (void) printf("\n");
2770 }
2771 return;
2772 } else {
2773 dev.l2ad_hand = lbps[0].lbp_daddr +
2774 L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
2775 }
2776
2777 dev.l2ad_first = !!(l2dhdr.dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
2778
2779 for (;;) {
2780 if (!l2arc_log_blkptr_valid(&dev, &lbps[0]))
2781 break;
2782
2783 /* L2BLK_GET_PSIZE returns aligned size for log blocks */
2784 asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
2785 if (pread64(fd, &this_lb, asize, lbps[0].lbp_daddr) !=
2786 (ssize_t)asize) {
2787 if (!dump_opt['q']) {
2788 (void) printf("Error while reading next log "
2789 "block\n\n");
2790 }
2791 break;
2792 }
2793
2794 fletcher_4_native(&this_lb, asize, NULL, &cksum);
2795 if (!ZIO_CHECKSUM_EQUAL(cksum, lbps[0].lbp_cksum)) {
2796 failed++;
2797 if (!dump_opt['q']) {
2798 (void) printf("Invalid cksum\n");
2799 dump_l2arc_log_blkptr(lbps[0]);
2800 }
2801 break;
2802 }
2803
2804 switch (L2BLK_GET_COMPRESS((&lbps[0])->lbp_prop)) {
2805 case ZIO_COMPRESS_OFF:
2806 break;
2807 case ZIO_COMPRESS_LZ4:
2808 abd = abd_alloc_for_io(asize, B_TRUE);
2809 abd_copy_from_buf_off(abd, &this_lb, 0, asize);
2810 zio_decompress_data(L2BLK_GET_COMPRESS(
2811 (&lbps[0])->lbp_prop), abd, &this_lb,
2812 asize, sizeof (this_lb));
2813 abd_free(abd);
2814 break;
2815 default:
2816 break;
2817 }
2818
2819 if (this_lb.lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
2820 byteswap_uint64_array(&this_lb, sizeof (this_lb));
2821 if (this_lb.lb_magic != L2ARC_LOG_BLK_MAGIC) {
2822 if (!dump_opt['q'])
2823 (void) printf("Invalid log block magic\n\n");
2824 break;
2825 }
2826
2827 rebuild->dh_lb_count++;
2828 rebuild->dh_lb_asize += asize;
2829 if (dump_opt['l'] > 1 && !dump_opt['q']) {
2830 (void) printf("lb[%4llu]\tmagic: %llu\n",
2831 (u_longlong_t)rebuild->dh_lb_count,
2832 (u_longlong_t)this_lb.lb_magic);
2833 dump_l2arc_log_blkptr(lbps[0]);
2834 }
2835
2836 if (dump_opt['l'] > 2 && !dump_opt['q'])
2837 dump_l2arc_log_entries(l2dhdr.dh_log_entries,
2838 this_lb.lb_entries,
2839 rebuild->dh_lb_count);
2840
2841 if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
2842 lbps[0].lbp_payload_start, dev.l2ad_evict) &&
2843 !dev.l2ad_first)
2844 break;
2845
2846 lbps[0] = lbps[1];
2847 lbps[1] = this_lb.lb_prev_lbp;
2848 }
2849
2850 if (!dump_opt['q']) {
2851 (void) printf("log_blk_count:\t %llu with valid cksum\n",
2852 (u_longlong_t)rebuild->dh_lb_count);
2853 (void) printf("\t\t %d with invalid cksum\n", failed);
2854 (void) printf("log_blk_asize:\t %llu\n\n",
2855 (u_longlong_t)rebuild->dh_lb_asize);
2856 }
2857 }
2858
2859 static int
dump_l2arc_header(int fd)2860 dump_l2arc_header(int fd)
2861 {
2862 l2arc_dev_hdr_phys_t l2dhdr, rebuild;
2863 int error = B_FALSE;
2864
2865 bzero(&l2dhdr, sizeof (l2dhdr));
2866 bzero(&rebuild, sizeof (rebuild));
2867
2868 if (pread64(fd, &l2dhdr, sizeof (l2dhdr),
2869 VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) {
2870 error = B_TRUE;
2871 } else {
2872 if (l2dhdr.dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
2873 byteswap_uint64_array(&l2dhdr, sizeof (l2dhdr));
2874
2875 if (l2dhdr.dh_magic != L2ARC_DEV_HDR_MAGIC)
2876 error = B_TRUE;
2877 }
2878
2879 if (error) {
2880 (void) printf("L2ARC device header not found\n\n");
2881 /* Do not return an error here for backward compatibility */
2882 return (0);
2883 } else if (!dump_opt['q']) {
2884 print_l2arc_header();
2885
2886 (void) printf(" magic: %llu\n",
2887 (u_longlong_t)l2dhdr.dh_magic);
2888 (void) printf(" version: %llu\n",
2889 (u_longlong_t)l2dhdr.dh_version);
2890 (void) printf(" pool_guid: %llu\n",
2891 (u_longlong_t)l2dhdr.dh_spa_guid);
2892 (void) printf(" flags: %llu\n",
2893 (u_longlong_t)l2dhdr.dh_flags);
2894 (void) printf(" start_lbps[0]: %llu\n",
2895 (u_longlong_t)
2896 l2dhdr.dh_start_lbps[0].lbp_daddr);
2897 (void) printf(" start_lbps[1]: %llu\n",
2898 (u_longlong_t)
2899 l2dhdr.dh_start_lbps[1].lbp_daddr);
2900 (void) printf(" log_blk_ent: %llu\n",
2901 (u_longlong_t)l2dhdr.dh_log_entries);
2902 (void) printf(" start: %llu\n",
2903 (u_longlong_t)l2dhdr.dh_start);
2904 (void) printf(" end: %llu\n",
2905 (u_longlong_t)l2dhdr.dh_end);
2906 (void) printf(" evict: %llu\n",
2907 (u_longlong_t)l2dhdr.dh_evict);
2908 (void) printf(" lb_asize_refcount: %llu\n",
2909 (u_longlong_t)l2dhdr.dh_lb_asize);
2910 (void) printf(" lb_count_refcount: %llu\n\n",
2911 (u_longlong_t)l2dhdr.dh_lb_count);
2912 }
2913
2914 dump_l2arc_log_blocks(fd, l2dhdr, &rebuild);
2915 /*
2916 * The total aligned size of log blocks and the number of log blocks
2917 * reported in the header of the device may be less than what zdb
2918 * reports by dump_l2arc_log_blocks() which emulates l2arc_rebuild().
2919 * This happens because dump_l2arc_log_blocks() lacks the memory
2920 * pressure valve that l2arc_rebuild() has. Thus, if we are on a system
2921 * with low memory, l2arc_rebuild will exit prematurely and dh_lb_asize
2922 * and dh_lb_count will be lower to begin with than what exists on the
2923 * device. This is normal and zdb should not exit with an error. The
2924 * opposite case should never happen though, the values reported in the
2925 * header should never be higher than what dump_l2arc_log_blocks() and
2926 * l2arc_rebuild() report. If this happens there is a leak in the
2927 * accounting of log blocks.
2928 */
2929 if (l2dhdr.dh_lb_asize > rebuild.dh_lb_asize ||
2930 l2dhdr.dh_lb_count > rebuild.dh_lb_count)
2931 return (1);
2932
2933 return (0);
2934 }
2935
2936 static char curpath[PATH_MAX];
2937
2938 /*
2939 * Iterate through the path components, recursively passing
2940 * current one's obj and remaining path until we find the obj
2941 * for the last one.
2942 */
2943 static int
dump_path_impl(objset_t * os,uint64_t obj,char * name)2944 dump_path_impl(objset_t *os, uint64_t obj, char *name)
2945 {
2946 int err;
2947 int header = 1;
2948 uint64_t child_obj;
2949 char *s;
2950 dmu_buf_t *db;
2951 dmu_object_info_t doi;
2952
2953 if ((s = strchr(name, '/')) != NULL)
2954 *s = '\0';
2955 err = zap_lookup(os, obj, name, 8, 1, &child_obj);
2956
2957 (void) strlcat(curpath, name, sizeof (curpath));
2958
2959 if (err != 0) {
2960 (void) fprintf(stderr, "failed to lookup %s: %s\n",
2961 curpath, strerror(err));
2962 return (err);
2963 }
2964
2965 child_obj = ZFS_DIRENT_OBJ(child_obj);
2966 err = sa_buf_hold(os, child_obj, FTAG, &db);
2967 if (err != 0) {
2968 (void) fprintf(stderr,
2969 "failed to get SA dbuf for obj %llu: %s\n",
2970 (u_longlong_t)child_obj, strerror(err));
2971 return (EINVAL);
2972 }
2973 dmu_object_info_from_db(db, &doi);
2974 sa_buf_rele(db, FTAG);
2975
2976 if (doi.doi_bonus_type != DMU_OT_SA &&
2977 doi.doi_bonus_type != DMU_OT_ZNODE) {
2978 (void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
2979 doi.doi_bonus_type, (u_longlong_t)child_obj);
2980 return (EINVAL);
2981 }
2982
2983 if (dump_opt['v'] > 6) {
2984 (void) printf("obj=%llu %s type=%d bonustype=%d\n",
2985 (u_longlong_t)child_obj, curpath, doi.doi_type,
2986 doi.doi_bonus_type);
2987 }
2988
2989 (void) strlcat(curpath, "/", sizeof (curpath));
2990
2991 switch (doi.doi_type) {
2992 case DMU_OT_DIRECTORY_CONTENTS:
2993 if (s != NULL && *(s + 1) != '\0')
2994 return (dump_path_impl(os, child_obj, s + 1));
2995 /*FALLTHROUGH*/
2996 case DMU_OT_PLAIN_FILE_CONTENTS:
2997 dump_object(os, child_obj, dump_opt['v'], &header, NULL);
2998 return (0);
2999 default:
3000 (void) fprintf(stderr, "object %llu has non-file/directory "
3001 "type %d\n", (u_longlong_t)obj, doi.doi_type);
3002 break;
3003 }
3004
3005 return (EINVAL);
3006 }
3007
3008 /*
3009 * Dump the blocks for the object specified by path inside the dataset.
3010 */
3011 static int
dump_path(char * ds,char * path)3012 dump_path(char *ds, char *path)
3013 {
3014 int err;
3015 objset_t *os;
3016 uint64_t root_obj;
3017
3018 err = open_objset(ds, DMU_OST_ZFS, FTAG, &os);
3019 if (err != 0)
3020 return (err);
3021
3022 err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
3023 if (err != 0) {
3024 (void) fprintf(stderr, "can't lookup root znode: %s\n",
3025 strerror(err));
3026 dmu_objset_disown(os, B_FALSE, FTAG);
3027 return (EINVAL);
3028 }
3029
3030 (void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
3031
3032 err = dump_path_impl(os, root_obj, path);
3033
3034 close_objset(os, FTAG);
3035 return (err);
3036 }
3037
3038 typedef struct cksum_record {
3039 zio_cksum_t cksum;
3040 boolean_t labels[VDEV_LABELS];
3041 avl_node_t link;
3042 } cksum_record_t;
3043
3044 static int
cksum_record_compare(const void * x1,const void * x2)3045 cksum_record_compare(const void *x1, const void *x2)
3046 {
3047 const cksum_record_t *l = (cksum_record_t *)x1;
3048 const cksum_record_t *r = (cksum_record_t *)x2;
3049 int arraysize = ARRAY_SIZE(l->cksum.zc_word);
3050 int difference;
3051
3052 for (int i = 0; i < arraysize; i++) {
3053 difference = AVL_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
3054 if (difference)
3055 break;
3056 }
3057
3058 return (difference);
3059 }
3060
3061 static cksum_record_t *
cksum_record_alloc(zio_cksum_t * cksum,int l)3062 cksum_record_alloc(zio_cksum_t *cksum, int l)
3063 {
3064 cksum_record_t *rec;
3065
3066 rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
3067 rec->cksum = *cksum;
3068 rec->labels[l] = B_TRUE;
3069
3070 return (rec);
3071 }
3072
3073 static cksum_record_t *
cksum_record_lookup(avl_tree_t * tree,zio_cksum_t * cksum)3074 cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
3075 {
3076 cksum_record_t lookup = { .cksum = *cksum };
3077 avl_index_t where;
3078
3079 return (avl_find(tree, &lookup, &where));
3080 }
3081
3082 static cksum_record_t *
cksum_record_insert(avl_tree_t * tree,zio_cksum_t * cksum,int l)3083 cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
3084 {
3085 cksum_record_t *rec;
3086
3087 rec = cksum_record_lookup(tree, cksum);
3088 if (rec) {
3089 rec->labels[l] = B_TRUE;
3090 } else {
3091 rec = cksum_record_alloc(cksum, l);
3092 avl_add(tree, rec);
3093 }
3094
3095 return (rec);
3096 }
3097
3098 static int
first_label(cksum_record_t * rec)3099 first_label(cksum_record_t *rec)
3100 {
3101 for (int i = 0; i < VDEV_LABELS; i++)
3102 if (rec->labels[i])
3103 return (i);
3104
3105 return (-1);
3106 }
3107
3108 static void
print_label_numbers(char * prefix,cksum_record_t * rec)3109 print_label_numbers(char *prefix, cksum_record_t *rec)
3110 {
3111 printf("%s", prefix);
3112 for (int i = 0; i < VDEV_LABELS; i++)
3113 if (rec->labels[i] == B_TRUE)
3114 printf("%d ", i);
3115 printf("\n");
3116 }
3117
3118 #define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
3119
3120 typedef struct zdb_label {
3121 vdev_label_t label;
3122 nvlist_t *config_nv;
3123 cksum_record_t *config;
3124 cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
3125 boolean_t header_printed;
3126 boolean_t read_failed;
3127 } zdb_label_t;
3128
3129 static void
print_label_header(zdb_label_t * label,int l)3130 print_label_header(zdb_label_t *label, int l)
3131 {
3132
3133 if (dump_opt['q'])
3134 return;
3135
3136 if (label->header_printed == B_TRUE)
3137 return;
3138
3139 (void) printf("------------------------------------\n");
3140 (void) printf("LABEL %d\n", l);
3141 (void) printf("------------------------------------\n");
3142
3143 label->header_printed = B_TRUE;
3144 }
3145
3146 static void
dump_config_from_label(zdb_label_t * label,size_t buflen,int l)3147 dump_config_from_label(zdb_label_t *label, size_t buflen, int l)
3148 {
3149 if (dump_opt['q'])
3150 return;
3151
3152 if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
3153 return;
3154
3155 print_label_header(label, l);
3156 dump_nvlist(label->config_nv, 4);
3157 print_label_numbers(" labels = ", label->config);
3158 }
3159
3160 #define ZDB_MAX_UB_HEADER_SIZE 32
3161
3162 static void
dump_label_uberblocks(zdb_label_t * label,uint64_t ashift,int label_num)3163 dump_label_uberblocks(zdb_label_t *label, uint64_t ashift, int label_num)
3164 {
3165 vdev_t vd;
3166 char header[ZDB_MAX_UB_HEADER_SIZE];
3167
3168 vd.vdev_ashift = ashift;
3169 vd.vdev_top = &vd;
3170
3171 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
3172 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
3173 uberblock_t *ub = (void *)((char *)&label->label + uoff);
3174 cksum_record_t *rec = label->uberblocks[i];
3175
3176 if (rec == NULL) {
3177 if (dump_opt['u'] >= 2) {
3178 print_label_header(label, label_num);
3179 (void) printf(" Uberblock[%d] invalid\n", i);
3180 }
3181 continue;
3182 }
3183
3184 if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
3185 continue;
3186
3187 print_label_header(label, label_num);
3188 (void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
3189 " Uberblock[%d]\n", i);
3190 dump_uberblock(ub, header, "");
3191 print_label_numbers(" labels = ", rec);
3192 }
3193 }
3194
3195 static int
dump_label(const char * dev)3196 dump_label(const char *dev)
3197 {
3198 char path[MAXPATHLEN];
3199 zdb_label_t labels[VDEV_LABELS];
3200 uint64_t psize, ashift, l2cache;
3201 struct stat64 statbuf;
3202 boolean_t config_found = B_FALSE;
3203 boolean_t error = B_FALSE;
3204 boolean_t read_l2arc_header = B_FALSE;
3205 avl_tree_t config_tree;
3206 avl_tree_t uberblock_tree;
3207 void *node, *cookie;
3208 int fd;
3209
3210 bzero(labels, sizeof (labels));
3211
3212 (void) strlcpy(path, dev, sizeof (path));
3213 if (dev[0] == '/') {
3214 if (strncmp(dev, ZFS_DISK_ROOTD,
3215 strlen(ZFS_DISK_ROOTD)) == 0) {
3216 (void) snprintf(path, sizeof (path), "%s%s",
3217 ZFS_RDISK_ROOTD, dev + strlen(ZFS_DISK_ROOTD));
3218 }
3219 } else if (stat64(path, &statbuf) != 0) {
3220 char *s;
3221
3222 (void) snprintf(path, sizeof (path), "%s%s", ZFS_RDISK_ROOTD,
3223 dev);
3224 if (((s = strrchr(dev, 's')) == NULL &&
3225 (s = strchr(dev, 'p')) == NULL) ||
3226 !isdigit(*(s + 1)))
3227 (void) strlcat(path, "s0", sizeof (path));
3228 }
3229
3230 if ((fd = open64(path, O_RDONLY)) < 0) {
3231 (void) fprintf(stderr, "cannot open '%s': %s\n", path,
3232 strerror(errno));
3233 exit(1);
3234 }
3235
3236 if (fstat64(fd, &statbuf) != 0) {
3237 (void) fprintf(stderr, "failed to stat '%s': %s\n", path,
3238 strerror(errno));
3239 (void) close(fd);
3240 exit(1);
3241 }
3242
3243 if (S_ISBLK(statbuf.st_mode)) {
3244 (void) fprintf(stderr,
3245 "cannot use '%s': character device required\n", path);
3246 (void) close(fd);
3247 exit(1);
3248 }
3249
3250 avl_create(&config_tree, cksum_record_compare,
3251 sizeof (cksum_record_t), offsetof(cksum_record_t, link));
3252 avl_create(&uberblock_tree, cksum_record_compare,
3253 sizeof (cksum_record_t), offsetof(cksum_record_t, link));
3254
3255 psize = statbuf.st_size;
3256 psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
3257 ashift = SPA_MINBLOCKSHIFT;
3258
3259 /*
3260 * 1. Read the label from disk
3261 * 2. Unpack the configuration and insert in config tree.
3262 * 3. Traverse all uberblocks and insert in uberblock tree.
3263 */
3264 for (int l = 0; l < VDEV_LABELS; l++) {
3265 zdb_label_t *label = &labels[l];
3266 char *buf = label->label.vl_vdev_phys.vp_nvlist;
3267 size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
3268 nvlist_t *config;
3269 cksum_record_t *rec;
3270 zio_cksum_t cksum;
3271 vdev_t vd;
3272
3273 if (pread64(fd, &label->label, sizeof (label->label),
3274 vdev_label_offset(psize, l, 0)) != sizeof (label->label)) {
3275 if (!dump_opt['q'])
3276 (void) printf("failed to read label %d\n", l);
3277 label->read_failed = B_TRUE;
3278 error = B_TRUE;
3279 continue;
3280 }
3281
3282 label->read_failed = B_FALSE;
3283
3284 if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
3285 nvlist_t *vdev_tree = NULL;
3286 size_t size;
3287
3288 if ((nvlist_lookup_nvlist(config,
3289 ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
3290 (nvlist_lookup_uint64(vdev_tree,
3291 ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
3292 ashift = SPA_MINBLOCKSHIFT;
3293
3294 /* If the device is a cache device clear the header. */
3295 if (!read_l2arc_header) {
3296 if (nvlist_lookup_uint64(config,
3297 ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
3298 l2cache == POOL_STATE_L2CACHE) {
3299 read_l2arc_header = B_TRUE;
3300 }
3301 }
3302
3303 if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
3304 size = buflen;
3305
3306 fletcher_4_native(buf, size, NULL, &cksum);
3307 rec = cksum_record_insert(&config_tree, &cksum, l);
3308
3309 label->config = rec;
3310 label->config_nv = config;
3311 config_found = B_TRUE;
3312 } else {
3313 error = B_TRUE;
3314 }
3315
3316 vd.vdev_ashift = ashift;
3317 vd.vdev_top = &vd;
3318
3319 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
3320 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
3321 uberblock_t *ub = (void *)((char *)label + uoff);
3322
3323 if (uberblock_verify(ub))
3324 continue;
3325
3326 fletcher_4_native(ub, sizeof (*ub), NULL, &cksum);
3327 rec = cksum_record_insert(&uberblock_tree, &cksum, l);
3328
3329 label->uberblocks[i] = rec;
3330 }
3331 }
3332
3333 /*
3334 * Dump the label and uberblocks.
3335 */
3336 for (int l = 0; l < VDEV_LABELS; l++) {
3337 zdb_label_t *label = &labels[l];
3338 size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
3339
3340 if (label->read_failed == B_TRUE)
3341 continue;
3342
3343 if (label->config_nv) {
3344 dump_config_from_label(label, buflen, l);
3345 } else {
3346 if (!dump_opt['q'])
3347 (void) printf("failed to unpack label %d\n", l);
3348 }
3349
3350 if (dump_opt['u'])
3351 dump_label_uberblocks(label, ashift, l);
3352
3353 nvlist_free(label->config_nv);
3354 }
3355
3356 /*
3357 * Dump the L2ARC header, if existent.
3358 */
3359 if (read_l2arc_header)
3360 error |= dump_l2arc_header(fd);
3361
3362 cookie = NULL;
3363 while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
3364 umem_free(node, sizeof (cksum_record_t));
3365
3366 cookie = NULL;
3367 while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
3368 umem_free(node, sizeof (cksum_record_t));
3369
3370 avl_destroy(&config_tree);
3371 avl_destroy(&uberblock_tree);
3372
3373 (void) close(fd);
3374
3375 return (config_found == B_FALSE ? 2 :
3376 (error == B_TRUE ? 1 : 0));
3377 }
3378
3379 static uint64_t dataset_feature_count[SPA_FEATURES];
3380 static uint64_t remap_deadlist_count = 0;
3381
3382 static int
dump_one_dir(const char * dsname,void * arg __unused)3383 dump_one_dir(const char *dsname, void *arg __unused)
3384 {
3385 int error;
3386 objset_t *os;
3387
3388 error = open_objset(dsname, DMU_OST_ANY, FTAG, &os);
3389 if (error != 0)
3390 return (0);
3391
3392 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
3393 if (!dmu_objset_ds(os)->ds_feature_inuse[f])
3394 continue;
3395 ASSERT(spa_feature_table[f].fi_flags &
3396 ZFEATURE_FLAG_PER_DATASET);
3397 dataset_feature_count[f]++;
3398 }
3399
3400 if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
3401 remap_deadlist_count++;
3402 }
3403
3404 dump_dir(os);
3405 close_objset(os, FTAG);
3406 fuid_table_destroy();
3407 return (0);
3408 }
3409
3410 /*
3411 * Block statistics.
3412 */
3413 #define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
3414 typedef struct zdb_blkstats {
3415 uint64_t zb_asize;
3416 uint64_t zb_lsize;
3417 uint64_t zb_psize;
3418 uint64_t zb_count;
3419 uint64_t zb_gangs;
3420 uint64_t zb_ditto_samevdev;
3421 uint64_t zb_ditto_same_ms;
3422 uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
3423 } zdb_blkstats_t;
3424
3425 /*
3426 * Extended object types to report deferred frees and dedup auto-ditto blocks.
3427 */
3428 #define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
3429 #define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
3430 #define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
3431 #define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
3432
3433 static const char *zdb_ot_extname[] = {
3434 "deferred free",
3435 "dedup ditto",
3436 "other",
3437 "Total",
3438 };
3439
3440 #define ZB_TOTAL DN_MAX_LEVELS
3441
3442 typedef struct zdb_cb {
3443 zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
3444 uint64_t zcb_removing_size;
3445 uint64_t zcb_checkpoint_size;
3446 uint64_t zcb_dedup_asize;
3447 uint64_t zcb_dedup_blocks;
3448 uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
3449 uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
3450 [BPE_PAYLOAD_SIZE];
3451 uint64_t zcb_start;
3452 hrtime_t zcb_lastprint;
3453 uint64_t zcb_totalasize;
3454 uint64_t zcb_errors[256];
3455 int zcb_readfails;
3456 int zcb_haderrors;
3457 spa_t *zcb_spa;
3458 uint32_t **zcb_vd_obsolete_counts;
3459 } zdb_cb_t;
3460
3461 /* test if two DVA offsets from same vdev are within the same metaslab */
3462 static boolean_t
same_metaslab(spa_t * spa,uint64_t vdev,uint64_t off1,uint64_t off2)3463 same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2)
3464 {
3465 vdev_t *vd = vdev_lookup_top(spa, vdev);
3466 uint64_t ms_shift = vd->vdev_ms_shift;
3467
3468 return ((off1 >> ms_shift) == (off2 >> ms_shift));
3469 }
3470
3471 static void
zdb_count_block(zdb_cb_t * zcb,zilog_t * zilog,const blkptr_t * bp,dmu_object_type_t type)3472 zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
3473 dmu_object_type_t type)
3474 {
3475 uint64_t refcnt = 0;
3476
3477 ASSERT(type < ZDB_OT_TOTAL);
3478
3479 if (zilog && zil_bp_tree_add(zilog, bp) != 0)
3480 return;
3481
3482 spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER);
3483
3484 for (int i = 0; i < 4; i++) {
3485 int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
3486 int t = (i & 1) ? type : ZDB_OT_TOTAL;
3487 int equal;
3488 zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
3489
3490 zb->zb_asize += BP_GET_ASIZE(bp);
3491 zb->zb_lsize += BP_GET_LSIZE(bp);
3492 zb->zb_psize += BP_GET_PSIZE(bp);
3493 zb->zb_count++;
3494
3495 /*
3496 * The histogram is only big enough to record blocks up to
3497 * SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
3498 * "other", bucket.
3499 */
3500 unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
3501 idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
3502 zb->zb_psize_histogram[idx]++;
3503
3504 zb->zb_gangs += BP_COUNT_GANG(bp);
3505
3506 switch (BP_GET_NDVAS(bp)) {
3507 case 2:
3508 if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3509 DVA_GET_VDEV(&bp->blk_dva[1])) {
3510 zb->zb_ditto_samevdev++;
3511
3512 if (same_metaslab(zcb->zcb_spa,
3513 DVA_GET_VDEV(&bp->blk_dva[0]),
3514 DVA_GET_OFFSET(&bp->blk_dva[0]),
3515 DVA_GET_OFFSET(&bp->blk_dva[1])))
3516 zb->zb_ditto_same_ms++;
3517 }
3518 break;
3519 case 3:
3520 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3521 DVA_GET_VDEV(&bp->blk_dva[1])) +
3522 (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3523 DVA_GET_VDEV(&bp->blk_dva[2])) +
3524 (DVA_GET_VDEV(&bp->blk_dva[1]) ==
3525 DVA_GET_VDEV(&bp->blk_dva[2]));
3526 if (equal != 0) {
3527 zb->zb_ditto_samevdev++;
3528
3529 if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3530 DVA_GET_VDEV(&bp->blk_dva[1]) &&
3531 same_metaslab(zcb->zcb_spa,
3532 DVA_GET_VDEV(&bp->blk_dva[0]),
3533 DVA_GET_OFFSET(&bp->blk_dva[0]),
3534 DVA_GET_OFFSET(&bp->blk_dva[1])))
3535 zb->zb_ditto_same_ms++;
3536 else if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3537 DVA_GET_VDEV(&bp->blk_dva[2]) &&
3538 same_metaslab(zcb->zcb_spa,
3539 DVA_GET_VDEV(&bp->blk_dva[0]),
3540 DVA_GET_OFFSET(&bp->blk_dva[0]),
3541 DVA_GET_OFFSET(&bp->blk_dva[2])))
3542 zb->zb_ditto_same_ms++;
3543 else if (DVA_GET_VDEV(&bp->blk_dva[1]) ==
3544 DVA_GET_VDEV(&bp->blk_dva[2]) &&
3545 same_metaslab(zcb->zcb_spa,
3546 DVA_GET_VDEV(&bp->blk_dva[1]),
3547 DVA_GET_OFFSET(&bp->blk_dva[1]),
3548 DVA_GET_OFFSET(&bp->blk_dva[2])))
3549 zb->zb_ditto_same_ms++;
3550 }
3551 break;
3552 }
3553 }
3554
3555 spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG);
3556
3557 if (BP_IS_EMBEDDED(bp)) {
3558 zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
3559 zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
3560 [BPE_GET_PSIZE(bp)]++;
3561 return;
3562 }
3563
3564 if (dump_opt['L'])
3565 return;
3566
3567 if (BP_GET_DEDUP(bp)) {
3568 ddt_t *ddt;
3569 ddt_entry_t *dde;
3570
3571 ddt = ddt_select(zcb->zcb_spa, bp);
3572 ddt_enter(ddt);
3573 dde = ddt_lookup(ddt, bp, B_FALSE);
3574
3575 if (dde == NULL) {
3576 refcnt = 0;
3577 } else {
3578 ddt_phys_t *ddp = ddt_phys_select(dde, bp);
3579 ddt_phys_decref(ddp);
3580 refcnt = ddp->ddp_refcnt;
3581 if (ddt_phys_total_refcnt(dde) == 0)
3582 ddt_remove(ddt, dde);
3583 }
3584 ddt_exit(ddt);
3585 }
3586
3587 VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
3588 refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa),
3589 bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
3590 }
3591
3592 static void
zdb_blkptr_done(zio_t * zio)3593 zdb_blkptr_done(zio_t *zio)
3594 {
3595 spa_t *spa = zio->io_spa;
3596 blkptr_t *bp = zio->io_bp;
3597 int ioerr = zio->io_error;
3598 zdb_cb_t *zcb = zio->io_private;
3599 zbookmark_phys_t *zb = &zio->io_bookmark;
3600
3601 abd_free(zio->io_abd);
3602
3603 mutex_enter(&spa->spa_scrub_lock);
3604 spa->spa_load_verify_ios--;
3605 cv_broadcast(&spa->spa_scrub_io_cv);
3606
3607 if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
3608 char blkbuf[BP_SPRINTF_LEN];
3609
3610 zcb->zcb_haderrors = 1;
3611 zcb->zcb_errors[ioerr]++;
3612
3613 if (dump_opt['b'] >= 2)
3614 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
3615 else
3616 blkbuf[0] = '\0';
3617
3618 (void) printf("zdb_blkptr_cb: "
3619 "Got error %d reading "
3620 "<%llu, %llu, %lld, %llx> %s -- skipping\n",
3621 ioerr,
3622 (u_longlong_t)zb->zb_objset,
3623 (u_longlong_t)zb->zb_object,
3624 (u_longlong_t)zb->zb_level,
3625 (u_longlong_t)zb->zb_blkid,
3626 blkbuf);
3627 }
3628 mutex_exit(&spa->spa_scrub_lock);
3629 }
3630
3631 static int
zdb_blkptr_cb(spa_t * spa,zilog_t * zilog,const blkptr_t * bp,const zbookmark_phys_t * zb,const dnode_phys_t * dnp,void * arg)3632 zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
3633 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
3634 {
3635 zdb_cb_t *zcb = arg;
3636 dmu_object_type_t type;
3637 boolean_t is_metadata;
3638
3639 if (bp == NULL)
3640 return (0);
3641
3642 if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
3643 char blkbuf[BP_SPRINTF_LEN];
3644 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
3645 (void) printf("objset %llu object %llu "
3646 "level %lld offset 0x%llx %s\n",
3647 (u_longlong_t)zb->zb_objset,
3648 (u_longlong_t)zb->zb_object,
3649 (longlong_t)zb->zb_level,
3650 (u_longlong_t)blkid2offset(dnp, bp, zb),
3651 blkbuf);
3652 }
3653
3654 if (BP_IS_HOLE(bp))
3655 return (0);
3656
3657 type = BP_GET_TYPE(bp);
3658
3659 zdb_count_block(zcb, zilog, bp,
3660 (type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
3661
3662 is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
3663
3664 if (!BP_IS_EMBEDDED(bp) &&
3665 (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
3666 size_t size = BP_GET_PSIZE(bp);
3667 abd_t *abd = abd_alloc(size, B_FALSE);
3668 int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
3669
3670 /* If it's an intent log block, failure is expected. */
3671 if (zb->zb_level == ZB_ZIL_LEVEL)
3672 flags |= ZIO_FLAG_SPECULATIVE;
3673
3674 mutex_enter(&spa->spa_scrub_lock);
3675 while (spa->spa_load_verify_ios > max_inflight)
3676 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
3677 spa->spa_load_verify_ios++;
3678 mutex_exit(&spa->spa_scrub_lock);
3679
3680 zio_nowait(zio_read(NULL, spa, bp, abd, size,
3681 zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
3682 }
3683
3684 zcb->zcb_readfails = 0;
3685
3686 /* only call gethrtime() every 100 blocks */
3687 static int iters;
3688 if (++iters > 100)
3689 iters = 0;
3690 else
3691 return (0);
3692
3693 if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
3694 uint64_t now = gethrtime();
3695 char buf[10];
3696 uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
3697 int kb_per_sec =
3698 1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
3699 int sec_remaining =
3700 (zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
3701
3702 /* make sure nicenum has enough space */
3703 CTASSERT(sizeof (buf) >= NN_NUMBUF_SZ);
3704
3705 zfs_nicebytes(bytes, buf, sizeof (buf));
3706 (void) fprintf(stderr,
3707 "\r%5s completed (%4dMB/s) "
3708 "estimated time remaining: %uhr %02umin %02usec ",
3709 buf, kb_per_sec / 1024,
3710 sec_remaining / 60 / 60,
3711 sec_remaining / 60 % 60,
3712 sec_remaining % 60);
3713
3714 zcb->zcb_lastprint = now;
3715 }
3716
3717 return (0);
3718 }
3719
3720 static void
zdb_leak(void * arg,uint64_t start,uint64_t size)3721 zdb_leak(void *arg, uint64_t start, uint64_t size)
3722 {
3723 vdev_t *vd = arg;
3724
3725 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
3726 (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
3727 }
3728
3729 static metaslab_ops_t zdb_metaslab_ops = {
3730 NULL /* alloc */
3731 };
3732
3733 typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme,
3734 uint64_t txg, void *arg);
3735
3736 typedef struct unflushed_iter_cb_arg {
3737 spa_t *uic_spa;
3738 uint64_t uic_txg;
3739 void *uic_arg;
3740 zdb_log_sm_cb_t uic_cb;
3741 } unflushed_iter_cb_arg_t;
3742
3743 static int
iterate_through_spacemap_logs_cb(space_map_entry_t * sme,void * arg)3744 iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
3745 {
3746 unflushed_iter_cb_arg_t *uic = arg;
3747
3748 return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
3749 }
3750
3751 static void
iterate_through_spacemap_logs(spa_t * spa,zdb_log_sm_cb_t cb,void * arg)3752 iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
3753 {
3754 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
3755 return;
3756
3757 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3758 for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
3759 sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
3760 space_map_t *sm = NULL;
3761 VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
3762 sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
3763
3764 unflushed_iter_cb_arg_t uic = {
3765 .uic_spa = spa,
3766 .uic_txg = sls->sls_txg,
3767 .uic_arg = arg,
3768 .uic_cb = cb
3769 };
3770
3771 VERIFY0(space_map_iterate(sm, space_map_length(sm),
3772 iterate_through_spacemap_logs_cb, &uic));
3773 space_map_close(sm);
3774 }
3775 spa_config_exit(spa, SCL_CONFIG, FTAG);
3776 }
3777
3778 /* ARGSUSED */
3779 static int
load_unflushed_svr_segs_cb(spa_t * spa,space_map_entry_t * sme,uint64_t txg,void * arg)3780 load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
3781 uint64_t txg, void *arg)
3782 {
3783 spa_vdev_removal_t *svr = arg;
3784
3785 uint64_t offset = sme->sme_offset;
3786 uint64_t size = sme->sme_run;
3787
3788 /* skip vdevs we don't care about */
3789 if (sme->sme_vdev != svr->svr_vdev_id)
3790 return (0);
3791
3792 vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
3793 metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3794 ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
3795
3796 if (txg < metaslab_unflushed_txg(ms))
3797 return (0);
3798
3799 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
3800 ASSERT(vim != NULL);
3801 if (offset >= vdev_indirect_mapping_max_offset(vim))
3802 return (0);
3803
3804 if (sme->sme_type == SM_ALLOC)
3805 range_tree_add(svr->svr_allocd_segs, offset, size);
3806 else
3807 range_tree_remove(svr->svr_allocd_segs, offset, size);
3808
3809 return (0);
3810 }
3811
3812 static void
zdb_ddt_leak_init(spa_t * spa,zdb_cb_t * zcb)3813 zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
3814 {
3815 ddt_bookmark_t ddb;
3816 ddt_entry_t dde;
3817 int error;
3818
3819 ASSERT(!dump_opt['L']);
3820
3821 bzero(&ddb, sizeof (ddb));
3822 while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
3823 blkptr_t blk;
3824 ddt_phys_t *ddp = dde.dde_phys;
3825
3826 if (ddb.ddb_class == DDT_CLASS_UNIQUE)
3827 return;
3828
3829 ASSERT(ddt_phys_total_refcnt(&dde) > 1);
3830
3831 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
3832 if (ddp->ddp_phys_birth == 0)
3833 continue;
3834 ddt_bp_create(ddb.ddb_checksum,
3835 &dde.dde_key, ddp, &blk);
3836 if (p == DDT_PHYS_DITTO) {
3837 zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
3838 } else {
3839 zcb->zcb_dedup_asize +=
3840 BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
3841 zcb->zcb_dedup_blocks++;
3842 }
3843 }
3844 ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
3845 ddt_enter(ddt);
3846 VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
3847 ddt_exit(ddt);
3848 }
3849
3850 ASSERT(error == ENOENT);
3851 }
3852
3853 /* ARGSUSED */
3854 static void
claim_segment_impl_cb(uint64_t inner_offset,vdev_t * vd,uint64_t offset,uint64_t size,void * arg)3855 claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3856 uint64_t size, void *arg)
3857 {
3858 /*
3859 * This callback was called through a remap from
3860 * a device being removed. Therefore, the vdev that
3861 * this callback is applied to is a concrete
3862 * vdev.
3863 */
3864 ASSERT(vdev_is_concrete(vd));
3865
3866 VERIFY0(metaslab_claim_impl(vd, offset, size,
3867 spa_min_claim_txg(vd->vdev_spa)));
3868 }
3869
3870 static void
claim_segment_cb(void * arg,uint64_t offset,uint64_t size)3871 claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
3872 {
3873 vdev_t *vd = arg;
3874
3875 vdev_indirect_ops.vdev_op_remap(vd, offset, size,
3876 claim_segment_impl_cb, NULL);
3877 }
3878
3879 /*
3880 * After accounting for all allocated blocks that are directly referenced,
3881 * we might have missed a reference to a block from a partially complete
3882 * (and thus unused) indirect mapping object. We perform a secondary pass
3883 * through the metaslabs we have already mapped and claim the destination
3884 * blocks.
3885 */
3886 static void
zdb_claim_removing(spa_t * spa,zdb_cb_t * zcb)3887 zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
3888 {
3889 if (dump_opt['L'])
3890 return;
3891
3892 if (spa->spa_vdev_removal == NULL)
3893 return;
3894
3895 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3896
3897 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
3898 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
3899 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
3900
3901 ASSERT0(range_tree_space(svr->svr_allocd_segs));
3902
3903 range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
3904 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
3905 metaslab_t *msp = vd->vdev_ms[msi];
3906
3907 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
3908 break;
3909
3910 ASSERT0(range_tree_space(allocs));
3911 if (msp->ms_sm != NULL)
3912 VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
3913 range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
3914 }
3915 range_tree_destroy(allocs);
3916
3917 iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
3918
3919 /*
3920 * Clear everything past what has been synced,
3921 * because we have not allocated mappings for
3922 * it yet.
3923 */
3924 range_tree_clear(svr->svr_allocd_segs,
3925 vdev_indirect_mapping_max_offset(vim),
3926 vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
3927
3928 zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
3929 range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
3930
3931 spa_config_exit(spa, SCL_CONFIG, FTAG);
3932 }
3933
3934 /* ARGSUSED */
3935 static int
increment_indirect_mapping_cb(void * arg,const blkptr_t * bp,dmu_tx_t * tx)3936 increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
3937 {
3938 zdb_cb_t *zcb = arg;
3939 spa_t *spa = zcb->zcb_spa;
3940 vdev_t *vd;
3941 const dva_t *dva = &bp->blk_dva[0];
3942
3943 ASSERT(!dump_opt['L']);
3944 ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
3945
3946 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3947 vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
3948 ASSERT3P(vd, !=, NULL);
3949 spa_config_exit(spa, SCL_VDEV, FTAG);
3950
3951 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
3952 ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
3953
3954 vdev_indirect_mapping_increment_obsolete_count(
3955 vd->vdev_indirect_mapping,
3956 DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
3957 zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
3958
3959 return (0);
3960 }
3961
3962 static uint32_t *
zdb_load_obsolete_counts(vdev_t * vd)3963 zdb_load_obsolete_counts(vdev_t *vd)
3964 {
3965 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
3966 spa_t *spa = vd->vdev_spa;
3967 spa_condensing_indirect_phys_t *scip =
3968 &spa->spa_condensing_indirect_phys;
3969 uint32_t *counts;
3970
3971 EQUIV(vdev_obsolete_sm_object(vd) != 0, vd->vdev_obsolete_sm != NULL);
3972 counts = vdev_indirect_mapping_load_obsolete_counts(vim);
3973 if (vd->vdev_obsolete_sm != NULL) {
3974 vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
3975 vd->vdev_obsolete_sm);
3976 }
3977 if (scip->scip_vdev == vd->vdev_id &&
3978 scip->scip_prev_obsolete_sm_object != 0) {
3979 space_map_t *prev_obsolete_sm = NULL;
3980 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
3981 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
3982 vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
3983 prev_obsolete_sm);
3984 space_map_close(prev_obsolete_sm);
3985 }
3986 return (counts);
3987 }
3988
3989 typedef struct checkpoint_sm_exclude_entry_arg {
3990 vdev_t *cseea_vd;
3991 uint64_t cseea_checkpoint_size;
3992 } checkpoint_sm_exclude_entry_arg_t;
3993
3994 static int
checkpoint_sm_exclude_entry_cb(space_map_entry_t * sme,void * arg)3995 checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
3996 {
3997 checkpoint_sm_exclude_entry_arg_t *cseea = arg;
3998 vdev_t *vd = cseea->cseea_vd;
3999 metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
4000 uint64_t end = sme->sme_offset + sme->sme_run;
4001
4002 ASSERT(sme->sme_type == SM_FREE);
4003
4004 /*
4005 * Since the vdev_checkpoint_sm exists in the vdev level
4006 * and the ms_sm space maps exist in the metaslab level,
4007 * an entry in the checkpoint space map could theoretically
4008 * cross the boundaries of the metaslab that it belongs.
4009 *
4010 * In reality, because of the way that we populate and
4011 * manipulate the checkpoint's space maps currently,
4012 * there shouldn't be any entries that cross metaslabs.
4013 * Hence the assertion below.
4014 *
4015 * That said, there is no fundamental requirement that
4016 * the checkpoint's space map entries should not cross
4017 * metaslab boundaries. So if needed we could add code
4018 * that handles metaslab-crossing segments in the future.
4019 */
4020 VERIFY3U(sme->sme_offset, >=, ms->ms_start);
4021 VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
4022
4023 /*
4024 * By removing the entry from the allocated segments we
4025 * also verify that the entry is there to begin with.
4026 */
4027 mutex_enter(&ms->ms_lock);
4028 range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
4029 mutex_exit(&ms->ms_lock);
4030
4031 cseea->cseea_checkpoint_size += sme->sme_run;
4032 return (0);
4033 }
4034
4035 static void
zdb_leak_init_vdev_exclude_checkpoint(vdev_t * vd,zdb_cb_t * zcb)4036 zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb)
4037 {
4038 spa_t *spa = vd->vdev_spa;
4039 space_map_t *checkpoint_sm = NULL;
4040 uint64_t checkpoint_sm_obj;
4041
4042 /*
4043 * If there is no vdev_top_zap, we are in a pool whose
4044 * version predates the pool checkpoint feature.
4045 */
4046 if (vd->vdev_top_zap == 0)
4047 return;
4048
4049 /*
4050 * If there is no reference of the vdev_checkpoint_sm in
4051 * the vdev_top_zap, then one of the following scenarios
4052 * is true:
4053 *
4054 * 1] There is no checkpoint
4055 * 2] There is a checkpoint, but no checkpointed blocks
4056 * have been freed yet
4057 * 3] The current vdev is indirect
4058 *
4059 * In these cases we return immediately.
4060 */
4061 if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
4062 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
4063 return;
4064
4065 VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
4066 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1,
4067 &checkpoint_sm_obj));
4068
4069 checkpoint_sm_exclude_entry_arg_t cseea;
4070 cseea.cseea_vd = vd;
4071 cseea.cseea_checkpoint_size = 0;
4072
4073 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
4074 checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
4075
4076 VERIFY0(space_map_iterate(checkpoint_sm,
4077 space_map_length(checkpoint_sm),
4078 checkpoint_sm_exclude_entry_cb, &cseea));
4079 space_map_close(checkpoint_sm);
4080
4081 zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size;
4082 }
4083
4084 static void
zdb_leak_init_exclude_checkpoint(spa_t * spa,zdb_cb_t * zcb)4085 zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
4086 {
4087 ASSERT(!dump_opt['L']);
4088
4089 vdev_t *rvd = spa->spa_root_vdev;
4090 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
4091 ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id);
4092 zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb);
4093 }
4094 }
4095
4096 static int
count_unflushed_space_cb(spa_t * spa,space_map_entry_t * sme,uint64_t txg,void * arg)4097 count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
4098 uint64_t txg, void *arg)
4099 {
4100 int64_t *ualloc_space = arg;
4101 uint64_t offset = sme->sme_offset;
4102 uint64_t vdev_id = sme->sme_vdev;
4103
4104 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
4105 if (!vdev_is_concrete(vd))
4106 return (0);
4107
4108 metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4109 ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
4110
4111 if (txg < metaslab_unflushed_txg(ms))
4112 return (0);
4113
4114 if (sme->sme_type == SM_ALLOC)
4115 *ualloc_space += sme->sme_run;
4116 else
4117 *ualloc_space -= sme->sme_run;
4118
4119 return (0);
4120 }
4121
4122 static int64_t
get_unflushed_alloc_space(spa_t * spa)4123 get_unflushed_alloc_space(spa_t *spa)
4124 {
4125 if (dump_opt['L'])
4126 return (0);
4127
4128 int64_t ualloc_space = 0;
4129 iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
4130 &ualloc_space);
4131 return (ualloc_space);
4132 }
4133
4134 static int
load_unflushed_cb(spa_t * spa,space_map_entry_t * sme,uint64_t txg,void * arg)4135 load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
4136 {
4137 maptype_t *uic_maptype = arg;
4138 uint64_t offset = sme->sme_offset;
4139 uint64_t size = sme->sme_run;
4140 uint64_t vdev_id = sme->sme_vdev;
4141 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
4142
4143 /* skip indirect vdevs */
4144 if (!vdev_is_concrete(vd))
4145 return (0);
4146
4147 metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4148
4149 ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
4150 ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
4151
4152 if (txg < metaslab_unflushed_txg(ms))
4153 return (0);
4154
4155 if (*uic_maptype == sme->sme_type)
4156 range_tree_add(ms->ms_allocatable, offset, size);
4157 else
4158 range_tree_remove(ms->ms_allocatable, offset, size);
4159
4160 return (0);
4161 }
4162
4163 static void
load_unflushed_to_ms_allocatables(spa_t * spa,maptype_t maptype)4164 load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
4165 {
4166 iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
4167 }
4168
4169 static void
load_concrete_ms_allocatable_trees(spa_t * spa,maptype_t maptype)4170 load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
4171 {
4172 vdev_t *rvd = spa->spa_root_vdev;
4173 for (uint64_t i = 0; i < rvd->vdev_children; i++) {
4174 vdev_t *vd = rvd->vdev_child[i];
4175
4176 ASSERT3U(i, ==, vd->vdev_id);
4177
4178 if (vd->vdev_ops == &vdev_indirect_ops)
4179 continue;
4180
4181 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
4182 metaslab_t *msp = vd->vdev_ms[m];
4183
4184 (void) fprintf(stderr,
4185 "\rloading concrete vdev %llu, "
4186 "metaslab %llu of %llu ...",
4187 (longlong_t)vd->vdev_id,
4188 (longlong_t)msp->ms_id,
4189 (longlong_t)vd->vdev_ms_count);
4190
4191 mutex_enter(&msp->ms_lock);
4192 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
4193
4194 /*
4195 * We don't want to spend the CPU manipulating the
4196 * size-ordered tree, so clear the range_tree ops.
4197 */
4198 msp->ms_allocatable->rt_ops = NULL;
4199
4200 if (msp->ms_sm != NULL) {
4201 VERIFY0(space_map_load(msp->ms_sm,
4202 msp->ms_allocatable, maptype));
4203 }
4204 if (!msp->ms_loaded)
4205 msp->ms_loaded = B_TRUE;
4206 mutex_exit(&msp->ms_lock);
4207 }
4208 }
4209
4210 load_unflushed_to_ms_allocatables(spa, maptype);
4211 }
4212
4213 /*
4214 * vm_idxp is an in-out parameter which (for indirect vdevs) is the
4215 * index in vim_entries that has the first entry in this metaslab.
4216 * On return, it will be set to the first entry after this metaslab.
4217 */
4218 static void
load_indirect_ms_allocatable_tree(vdev_t * vd,metaslab_t * msp,uint64_t * vim_idxp)4219 load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
4220 uint64_t *vim_idxp)
4221 {
4222 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
4223
4224 mutex_enter(&msp->ms_lock);
4225 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
4226
4227 /*
4228 * We don't want to spend the CPU manipulating the
4229 * size-ordered tree, so clear the range_tree ops.
4230 */
4231 msp->ms_allocatable->rt_ops = NULL;
4232
4233 for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
4234 (*vim_idxp)++) {
4235 vdev_indirect_mapping_entry_phys_t *vimep =
4236 &vim->vim_entries[*vim_idxp];
4237 uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
4238 uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
4239 ASSERT3U(ent_offset, >=, msp->ms_start);
4240 if (ent_offset >= msp->ms_start + msp->ms_size)
4241 break;
4242
4243 /*
4244 * Mappings do not cross metaslab boundaries,
4245 * because we create them by walking the metaslabs.
4246 */
4247 ASSERT3U(ent_offset + ent_len, <=,
4248 msp->ms_start + msp->ms_size);
4249 range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
4250 }
4251
4252 if (!msp->ms_loaded)
4253 msp->ms_loaded = B_TRUE;
4254 mutex_exit(&msp->ms_lock);
4255 }
4256
4257 static void
zdb_leak_init_prepare_indirect_vdevs(spa_t * spa,zdb_cb_t * zcb)4258 zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb)
4259 {
4260 ASSERT(!dump_opt['L']);
4261
4262 vdev_t *rvd = spa->spa_root_vdev;
4263 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
4264 vdev_t *vd = rvd->vdev_child[c];
4265
4266 ASSERT3U(c, ==, vd->vdev_id);
4267
4268 if (vd->vdev_ops != &vdev_indirect_ops)
4269 continue;
4270
4271 /*
4272 * Note: we don't check for mapping leaks on
4273 * removing vdevs because their ms_allocatable's
4274 * are used to look for leaks in allocated space.
4275 */
4276 zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd);
4277
4278 /*
4279 * Normally, indirect vdevs don't have any
4280 * metaslabs. We want to set them up for
4281 * zio_claim().
4282 */
4283 VERIFY0(vdev_metaslab_init(vd, 0));
4284
4285 #if defined(DEBUG)
4286 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
4287 #endif
4288 uint64_t vim_idx = 0;
4289 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
4290
4291 (void) fprintf(stderr,
4292 "\rloading indirect vdev %llu, "
4293 "metaslab %llu of %llu ...",
4294 (longlong_t)vd->vdev_id,
4295 (longlong_t)vd->vdev_ms[m]->ms_id,
4296 (longlong_t)vd->vdev_ms_count);
4297
4298 load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m],
4299 &vim_idx);
4300 }
4301 ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim));
4302 }
4303 }
4304
4305 static void
zdb_leak_init(spa_t * spa,zdb_cb_t * zcb)4306 zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
4307 {
4308 zcb->zcb_spa = spa;
4309
4310 if (dump_opt['L'])
4311 return;
4312
4313 dsl_pool_t *dp = spa->spa_dsl_pool;
4314 vdev_t *rvd = spa->spa_root_vdev;
4315
4316 /*
4317 * We are going to be changing the meaning of the metaslab's
4318 * ms_allocatable. Ensure that the allocator doesn't try to
4319 * use the tree.
4320 */
4321 spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
4322 spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
4323
4324 zcb->zcb_vd_obsolete_counts =
4325 umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
4326 UMEM_NOFAIL);
4327
4328 /*
4329 * For leak detection, we overload the ms_allocatable trees
4330 * to contain allocated segments instead of free segments.
4331 * As a result, we can't use the normal metaslab_load/unload
4332 * interfaces.
4333 */
4334 zdb_leak_init_prepare_indirect_vdevs(spa, zcb);
4335 load_concrete_ms_allocatable_trees(spa, SM_ALLOC);
4336
4337 /*
4338 * On load_concrete_ms_allocatable_trees() we loaded all the
4339 * allocated entries from the ms_sm to the ms_allocatable for
4340 * each metaslab. If the pool has a checkpoint or is in the
4341 * middle of discarding a checkpoint, some of these blocks
4342 * may have been freed but their ms_sm may not have been
4343 * updated because they are referenced by the checkpoint. In
4344 * order to avoid false-positives during leak-detection, we
4345 * go through the vdev's checkpoint space map and exclude all
4346 * its entries from their relevant ms_allocatable.
4347 *
4348 * We also aggregate the space held by the checkpoint and add
4349 * it to zcb_checkpoint_size.
4350 *
4351 * Note that at this point we are also verifying that all the
4352 * entries on the checkpoint_sm are marked as allocated in
4353 * the ms_sm of their relevant metaslab.
4354 * [see comment in checkpoint_sm_exclude_entry_cb()]
4355 */
4356 zdb_leak_init_exclude_checkpoint(spa, zcb);
4357 ASSERT3U(zcb->zcb_checkpoint_size, ==, spa_get_checkpoint_space(spa));
4358
4359 /* for cleaner progress output */
4360 (void) fprintf(stderr, "\n");
4361
4362 if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
4363 ASSERT(spa_feature_is_enabled(spa,
4364 SPA_FEATURE_DEVICE_REMOVAL));
4365 (void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
4366 increment_indirect_mapping_cb, zcb, NULL);
4367 }
4368
4369 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4370 zdb_ddt_leak_init(spa, zcb);
4371 spa_config_exit(spa, SCL_CONFIG, FTAG);
4372 }
4373
4374 static boolean_t
zdb_check_for_obsolete_leaks(vdev_t * vd,zdb_cb_t * zcb)4375 zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
4376 {
4377 boolean_t leaks = B_FALSE;
4378 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
4379 uint64_t total_leaked = 0;
4380
4381 ASSERT(vim != NULL);
4382
4383 for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
4384 vdev_indirect_mapping_entry_phys_t *vimep =
4385 &vim->vim_entries[i];
4386 uint64_t obsolete_bytes = 0;
4387 uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
4388 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4389
4390 /*
4391 * This is not very efficient but it's easy to
4392 * verify correctness.
4393 */
4394 for (uint64_t inner_offset = 0;
4395 inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
4396 inner_offset += 1 << vd->vdev_ashift) {
4397 if (range_tree_contains(msp->ms_allocatable,
4398 offset + inner_offset, 1 << vd->vdev_ashift)) {
4399 obsolete_bytes += 1 << vd->vdev_ashift;
4400 }
4401 }
4402
4403 int64_t bytes_leaked = obsolete_bytes -
4404 zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
4405 ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
4406 zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
4407 if (bytes_leaked != 0 &&
4408 (vdev_obsolete_counts_are_precise(vd) ||
4409 dump_opt['d'] >= 5)) {
4410 (void) printf("obsolete indirect mapping count "
4411 "mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
4412 (u_longlong_t)vd->vdev_id,
4413 (u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
4414 (u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
4415 (u_longlong_t)bytes_leaked);
4416 }
4417 total_leaked += ABS(bytes_leaked);
4418 }
4419
4420 if (!vdev_obsolete_counts_are_precise(vd) && total_leaked > 0) {
4421 int pct_leaked = total_leaked * 100 /
4422 vdev_indirect_mapping_bytes_mapped(vim);
4423 (void) printf("cannot verify obsolete indirect mapping "
4424 "counts of vdev %llu because precise feature was not "
4425 "enabled when it was removed: %d%% (%llx bytes) of mapping"
4426 "unreferenced\n",
4427 (u_longlong_t)vd->vdev_id, pct_leaked,
4428 (u_longlong_t)total_leaked);
4429 } else if (total_leaked > 0) {
4430 (void) printf("obsolete indirect mapping count mismatch "
4431 "for vdev %llu -- %llx total bytes mismatched\n",
4432 (u_longlong_t)vd->vdev_id,
4433 (u_longlong_t)total_leaked);
4434 leaks |= B_TRUE;
4435 }
4436
4437 vdev_indirect_mapping_free_obsolete_counts(vim,
4438 zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
4439 zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
4440
4441 return (leaks);
4442 }
4443
4444 static boolean_t
zdb_leak_fini(spa_t * spa,zdb_cb_t * zcb)4445 zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
4446 {
4447 if (dump_opt['L'])
4448 return (B_FALSE);
4449
4450 boolean_t leaks = B_FALSE;
4451
4452 vdev_t *rvd = spa->spa_root_vdev;
4453 for (unsigned c = 0; c < rvd->vdev_children; c++) {
4454 vdev_t *vd = rvd->vdev_child[c];
4455 #if DEBUG
4456 metaslab_group_t *mg = vd->vdev_mg;
4457 #endif
4458
4459 if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
4460 leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
4461 }
4462
4463 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
4464 metaslab_t *msp = vd->vdev_ms[m];
4465 ASSERT3P(mg, ==, msp->ms_group);
4466
4467 /*
4468 * ms_allocatable has been overloaded
4469 * to contain allocated segments. Now that
4470 * we finished traversing all blocks, any
4471 * block that remains in the ms_allocatable
4472 * represents an allocated block that we
4473 * did not claim during the traversal.
4474 * Claimed blocks would have been removed
4475 * from the ms_allocatable. For indirect
4476 * vdevs, space remaining in the tree
4477 * represents parts of the mapping that are
4478 * not referenced, which is not a bug.
4479 */
4480 if (vd->vdev_ops == &vdev_indirect_ops) {
4481 range_tree_vacate(msp->ms_allocatable,
4482 NULL, NULL);
4483 } else {
4484 range_tree_vacate(msp->ms_allocatable,
4485 zdb_leak, vd);
4486 }
4487 if (msp->ms_loaded) {
4488 msp->ms_loaded = B_FALSE;
4489 }
4490 }
4491
4492 }
4493
4494 umem_free(zcb->zcb_vd_obsolete_counts,
4495 rvd->vdev_children * sizeof (uint32_t *));
4496 zcb->zcb_vd_obsolete_counts = NULL;
4497
4498 return (leaks);
4499 }
4500
4501 /* ARGSUSED */
4502 static int
count_block_cb(void * arg,const blkptr_t * bp,dmu_tx_t * tx)4503 count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
4504 {
4505 zdb_cb_t *zcb = arg;
4506
4507 if (dump_opt['b'] >= 5) {
4508 char blkbuf[BP_SPRINTF_LEN];
4509 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
4510 (void) printf("[%s] %s\n",
4511 "deferred free", blkbuf);
4512 }
4513 zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
4514 return (0);
4515 }
4516
4517 static int
dump_block_stats(spa_t * spa)4518 dump_block_stats(spa_t *spa)
4519 {
4520 zdb_cb_t zcb;
4521 zdb_blkstats_t *zb, *tzb;
4522 uint64_t norm_alloc, norm_space, total_alloc, total_found;
4523 int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
4524 TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
4525 boolean_t leaks = B_FALSE;
4526 int err;
4527
4528 bzero(&zcb, sizeof (zcb));
4529 (void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
4530 (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
4531 (dump_opt['c'] == 1) ? "metadata " : "",
4532 dump_opt['c'] ? "checksums " : "",
4533 (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
4534 !dump_opt['L'] ? "nothing leaked " : "");
4535
4536 /*
4537 * When leak detection is enabled we load all space maps as SM_ALLOC
4538 * maps, then traverse the pool claiming each block we discover. If
4539 * the pool is perfectly consistent, the segment trees will be empty
4540 * when we're done. Anything left over is a leak; any block we can't
4541 * claim (because it's not part of any space map) is a double
4542 * allocation, reference to a freed block, or an unclaimed log block.
4543 *
4544 * When leak detection is disabled (-L option) we still traverse the
4545 * pool claiming each block we discover, but we skip opening any space
4546 * maps.
4547 */
4548 bzero(&zcb, sizeof (zdb_cb_t));
4549 zdb_leak_init(spa, &zcb);
4550
4551 /*
4552 * If there's a deferred-free bplist, process that first.
4553 */
4554 (void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
4555 count_block_cb, &zcb, NULL);
4556
4557 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
4558 (void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
4559 count_block_cb, &zcb, NULL);
4560 }
4561
4562 zdb_claim_removing(spa, &zcb);
4563
4564 if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
4565 VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
4566 spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
4567 &zcb, NULL));
4568 }
4569
4570 if (dump_opt['c'] > 1)
4571 flags |= TRAVERSE_PREFETCH_DATA;
4572
4573 zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
4574 zcb.zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
4575 zcb.zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
4576 zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
4577 err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
4578
4579 /*
4580 * If we've traversed the data blocks then we need to wait for those
4581 * I/Os to complete. We leverage "The Godfather" zio to wait on
4582 * all async I/Os to complete.
4583 */
4584 if (dump_opt['c']) {
4585 for (int i = 0; i < max_ncpus; i++) {
4586 (void) zio_wait(spa->spa_async_zio_root[i]);
4587 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
4588 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
4589 ZIO_FLAG_GODFATHER);
4590 }
4591 }
4592
4593 /*
4594 * Done after zio_wait() since zcb_haderrors is modified in
4595 * zdb_blkptr_done()
4596 */
4597 zcb.zcb_haderrors |= err;
4598
4599 if (zcb.zcb_haderrors) {
4600 (void) printf("\nError counts:\n\n");
4601 (void) printf("\t%5s %s\n", "errno", "count");
4602 for (int e = 0; e < 256; e++) {
4603 if (zcb.zcb_errors[e] != 0) {
4604 (void) printf("\t%5d %llu\n",
4605 e, (u_longlong_t)zcb.zcb_errors[e]);
4606 }
4607 }
4608 }
4609
4610 /*
4611 * Report any leaked segments.
4612 */
4613 leaks |= zdb_leak_fini(spa, &zcb);
4614
4615 tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
4616
4617 norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
4618 norm_space = metaslab_class_get_space(spa_normal_class(spa));
4619
4620 total_alloc = norm_alloc +
4621 metaslab_class_get_alloc(spa_log_class(spa)) +
4622 metaslab_class_get_alloc(spa_special_class(spa)) +
4623 metaslab_class_get_alloc(spa_dedup_class(spa)) +
4624 get_unflushed_alloc_space(spa);
4625 total_found = tzb->zb_asize - zcb.zcb_dedup_asize +
4626 zcb.zcb_removing_size + zcb.zcb_checkpoint_size;
4627
4628 if (total_found == total_alloc && !dump_opt['L']) {
4629 (void) printf("\n\tNo leaks (block sum matches space"
4630 " maps exactly)\n");
4631 } else if (!dump_opt['L']) {
4632 (void) printf("block traversal size %llu != alloc %llu "
4633 "(%s %lld)\n",
4634 (u_longlong_t)total_found,
4635 (u_longlong_t)total_alloc,
4636 (dump_opt['L']) ? "unreachable" : "leaked",
4637 (longlong_t)(total_alloc - total_found));
4638 leaks = B_TRUE;
4639 }
4640
4641 if (tzb->zb_count == 0)
4642 return (2);
4643
4644 (void) printf("\n");
4645 (void) printf("\t%-16s %14llu\n", "bp count:",
4646 (u_longlong_t)tzb->zb_count);
4647 (void) printf("\t%-16s %14llu\n", "ganged count:",
4648 (longlong_t)tzb->zb_gangs);
4649 (void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:",
4650 (u_longlong_t)tzb->zb_lsize,
4651 (u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
4652 (void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
4653 "bp physical:", (u_longlong_t)tzb->zb_psize,
4654 (u_longlong_t)(tzb->zb_psize / tzb->zb_count),
4655 (double)tzb->zb_lsize / tzb->zb_psize);
4656 (void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
4657 "bp allocated:", (u_longlong_t)tzb->zb_asize,
4658 (u_longlong_t)(tzb->zb_asize / tzb->zb_count),
4659 (double)tzb->zb_lsize / tzb->zb_asize);
4660 (void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n",
4661 "bp deduped:", (u_longlong_t)zcb.zcb_dedup_asize,
4662 (u_longlong_t)zcb.zcb_dedup_blocks,
4663 (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0);
4664 (void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
4665 (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
4666
4667 if (spa_special_class(spa)->mc_rotor != NULL) {
4668 uint64_t alloc = metaslab_class_get_alloc(
4669 spa_special_class(spa));
4670 uint64_t space = metaslab_class_get_space(
4671 spa_special_class(spa));
4672
4673 (void) printf("\t%-16s %14llu used: %5.2f%%\n",
4674 "Special class", (u_longlong_t)alloc,
4675 100.0 * alloc / space);
4676 }
4677
4678 if (spa_dedup_class(spa)->mc_rotor != NULL) {
4679 uint64_t alloc = metaslab_class_get_alloc(
4680 spa_dedup_class(spa));
4681 uint64_t space = metaslab_class_get_space(
4682 spa_dedup_class(spa));
4683
4684 (void) printf("\t%-16s %14llu used: %5.2f%%\n",
4685 "Dedup class", (u_longlong_t)alloc,
4686 100.0 * alloc / space);
4687 }
4688
4689 for (bp_embedded_type_t i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
4690 if (zcb.zcb_embedded_blocks[i] == 0)
4691 continue;
4692 (void) printf("\n");
4693 (void) printf("\tadditional, non-pointer bps of type %u: "
4694 "%10llu\n",
4695 i, (u_longlong_t)zcb.zcb_embedded_blocks[i]);
4696
4697 if (dump_opt['b'] >= 3) {
4698 (void) printf("\t number of (compressed) bytes: "
4699 "number of bps\n");
4700 dump_histogram(zcb.zcb_embedded_histogram[i],
4701 sizeof (zcb.zcb_embedded_histogram[i]) /
4702 sizeof (zcb.zcb_embedded_histogram[i][0]), 0);
4703 }
4704 }
4705
4706 if (tzb->zb_ditto_samevdev != 0) {
4707 (void) printf("\tDittoed blocks on same vdev: %llu\n",
4708 (longlong_t)tzb->zb_ditto_samevdev);
4709 }
4710 if (tzb->zb_ditto_same_ms != 0) {
4711 (void) printf("\tDittoed blocks in same metaslab: %llu\n",
4712 (longlong_t)tzb->zb_ditto_same_ms);
4713 }
4714
4715 for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
4716 vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
4717 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
4718
4719 if (vim == NULL) {
4720 continue;
4721 }
4722
4723 char mem[32];
4724 zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
4725 mem, vdev_indirect_mapping_size(vim));
4726
4727 (void) printf("\tindirect vdev id %llu has %llu segments "
4728 "(%s in memory)\n",
4729 (longlong_t)vd->vdev_id,
4730 (longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
4731 }
4732
4733 if (dump_opt['b'] >= 2) {
4734 int l, t, level;
4735 (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
4736 "\t avg\t comp\t%%Total\tType\n");
4737
4738 for (t = 0; t <= ZDB_OT_TOTAL; t++) {
4739 char csize[32], lsize[32], psize[32], asize[32];
4740 char avg[32], gang[32];
4741 const char *typename;
4742
4743 /* make sure nicenum has enough space */
4744 CTASSERT(sizeof (csize) >= NN_NUMBUF_SZ);
4745 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
4746 CTASSERT(sizeof (psize) >= NN_NUMBUF_SZ);
4747 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
4748 CTASSERT(sizeof (avg) >= NN_NUMBUF_SZ);
4749 CTASSERT(sizeof (gang) >= NN_NUMBUF_SZ);
4750
4751 if (t < DMU_OT_NUMTYPES)
4752 typename = dmu_ot[t].ot_name;
4753 else
4754 typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
4755
4756 if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) {
4757 (void) printf("%6s\t%5s\t%5s\t%5s"
4758 "\t%5s\t%5s\t%6s\t%s\n",
4759 "-",
4760 "-",
4761 "-",
4762 "-",
4763 "-",
4764 "-",
4765 "-",
4766 typename);
4767 continue;
4768 }
4769
4770 for (l = ZB_TOTAL - 1; l >= -1; l--) {
4771 level = (l == -1 ? ZB_TOTAL : l);
4772 zb = &zcb.zcb_type[level][t];
4773
4774 if (zb->zb_asize == 0)
4775 continue;
4776
4777 if (dump_opt['b'] < 3 && level != ZB_TOTAL)
4778 continue;
4779
4780 if (level == 0 && zb->zb_asize ==
4781 zcb.zcb_type[ZB_TOTAL][t].zb_asize)
4782 continue;
4783
4784 zdb_nicenum(zb->zb_count, csize,
4785 sizeof (csize));
4786 zdb_nicenum(zb->zb_lsize, lsize,
4787 sizeof (lsize));
4788 zdb_nicenum(zb->zb_psize, psize,
4789 sizeof (psize));
4790 zdb_nicenum(zb->zb_asize, asize,
4791 sizeof (asize));
4792 zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
4793 sizeof (avg));
4794 zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
4795
4796 (void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
4797 "\t%5.2f\t%6.2f\t",
4798 csize, lsize, psize, asize, avg,
4799 (double)zb->zb_lsize / zb->zb_psize,
4800 100.0 * zb->zb_asize / tzb->zb_asize);
4801
4802 if (level == ZB_TOTAL)
4803 (void) printf("%s\n", typename);
4804 else
4805 (void) printf(" L%d %s\n",
4806 level, typename);
4807
4808 if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
4809 (void) printf("\t number of ganged "
4810 "blocks: %s\n", gang);
4811 }
4812
4813 if (dump_opt['b'] >= 4) {
4814 (void) printf("psize "
4815 "(in 512-byte sectors): "
4816 "number of blocks\n");
4817 dump_histogram(zb->zb_psize_histogram,
4818 PSIZE_HISTO_SIZE, 0);
4819 }
4820 }
4821 }
4822 }
4823
4824 (void) printf("\n");
4825
4826 if (leaks)
4827 return (2);
4828
4829 if (zcb.zcb_haderrors)
4830 return (3);
4831
4832 return (0);
4833 }
4834
4835 typedef struct zdb_ddt_entry {
4836 ddt_key_t zdde_key;
4837 uint64_t zdde_ref_blocks;
4838 uint64_t zdde_ref_lsize;
4839 uint64_t zdde_ref_psize;
4840 uint64_t zdde_ref_dsize;
4841 avl_node_t zdde_node;
4842 } zdb_ddt_entry_t;
4843
4844 /* ARGSUSED */
4845 static int
zdb_ddt_add_cb(spa_t * spa,zilog_t * zilog,const blkptr_t * bp,const zbookmark_phys_t * zb,const dnode_phys_t * dnp,void * arg)4846 zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
4847 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
4848 {
4849 avl_tree_t *t = arg;
4850 avl_index_t where;
4851 zdb_ddt_entry_t *zdde, zdde_search;
4852
4853 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
4854 return (0);
4855
4856 if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
4857 (void) printf("traversing objset %llu, %llu objects, "
4858 "%lu blocks so far\n",
4859 (u_longlong_t)zb->zb_objset,
4860 (u_longlong_t)BP_GET_FILL(bp),
4861 avl_numnodes(t));
4862 }
4863
4864 if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
4865 BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
4866 return (0);
4867
4868 ddt_key_fill(&zdde_search.zdde_key, bp);
4869
4870 zdde = avl_find(t, &zdde_search, &where);
4871
4872 if (zdde == NULL) {
4873 zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
4874 zdde->zdde_key = zdde_search.zdde_key;
4875 avl_insert(t, zdde, where);
4876 }
4877
4878 zdde->zdde_ref_blocks += 1;
4879 zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
4880 zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
4881 zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
4882
4883 return (0);
4884 }
4885
4886 static void
dump_simulated_ddt(spa_t * spa)4887 dump_simulated_ddt(spa_t *spa)
4888 {
4889 avl_tree_t t;
4890 void *cookie = NULL;
4891 zdb_ddt_entry_t *zdde;
4892 ddt_histogram_t ddh_total;
4893 ddt_stat_t dds_total;
4894
4895 bzero(&ddh_total, sizeof (ddh_total));
4896 bzero(&dds_total, sizeof (dds_total));
4897 avl_create(&t, ddt_entry_compare,
4898 sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
4899
4900 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4901
4902 (void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
4903 TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
4904
4905 spa_config_exit(spa, SCL_CONFIG, FTAG);
4906
4907 while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
4908 ddt_stat_t dds;
4909 uint64_t refcnt = zdde->zdde_ref_blocks;
4910 ASSERT(refcnt != 0);
4911
4912 dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
4913 dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
4914 dds.dds_psize = zdde->zdde_ref_psize / refcnt;
4915 dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
4916
4917 dds.dds_ref_blocks = zdde->zdde_ref_blocks;
4918 dds.dds_ref_lsize = zdde->zdde_ref_lsize;
4919 dds.dds_ref_psize = zdde->zdde_ref_psize;
4920 dds.dds_ref_dsize = zdde->zdde_ref_dsize;
4921
4922 ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
4923 &dds, 0);
4924
4925 umem_free(zdde, sizeof (*zdde));
4926 }
4927
4928 avl_destroy(&t);
4929
4930 ddt_histogram_stat(&dds_total, &ddh_total);
4931
4932 (void) printf("Simulated DDT histogram:\n");
4933
4934 zpool_dump_ddt(&dds_total, &ddh_total);
4935
4936 dump_dedup_ratio(&dds_total);
4937 }
4938
4939 static int
verify_device_removal_feature_counts(spa_t * spa)4940 verify_device_removal_feature_counts(spa_t *spa)
4941 {
4942 uint64_t dr_feature_refcount = 0;
4943 uint64_t oc_feature_refcount = 0;
4944 uint64_t indirect_vdev_count = 0;
4945 uint64_t precise_vdev_count = 0;
4946 uint64_t obsolete_counts_object_count = 0;
4947 uint64_t obsolete_sm_count = 0;
4948 uint64_t obsolete_counts_count = 0;
4949 uint64_t scip_count = 0;
4950 uint64_t obsolete_bpobj_count = 0;
4951 int ret = 0;
4952
4953 spa_condensing_indirect_phys_t *scip =
4954 &spa->spa_condensing_indirect_phys;
4955 if (scip->scip_next_mapping_object != 0) {
4956 vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
4957 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
4958 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
4959
4960 (void) printf("Condensing indirect vdev %llu: new mapping "
4961 "object %llu, prev obsolete sm %llu\n",
4962 (u_longlong_t)scip->scip_vdev,
4963 (u_longlong_t)scip->scip_next_mapping_object,
4964 (u_longlong_t)scip->scip_prev_obsolete_sm_object);
4965 if (scip->scip_prev_obsolete_sm_object != 0) {
4966 space_map_t *prev_obsolete_sm = NULL;
4967 VERIFY0(space_map_open(&prev_obsolete_sm,
4968 spa->spa_meta_objset,
4969 scip->scip_prev_obsolete_sm_object,
4970 0, vd->vdev_asize, 0));
4971 dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
4972 (void) printf("\n");
4973 space_map_close(prev_obsolete_sm);
4974 }
4975
4976 scip_count += 2;
4977 }
4978
4979 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
4980 vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
4981 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
4982
4983 if (vic->vic_mapping_object != 0) {
4984 ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
4985 vd->vdev_removing);
4986 indirect_vdev_count++;
4987
4988 if (vd->vdev_indirect_mapping->vim_havecounts) {
4989 obsolete_counts_count++;
4990 }
4991 }
4992 if (vdev_obsolete_counts_are_precise(vd)) {
4993 ASSERT(vic->vic_mapping_object != 0);
4994 precise_vdev_count++;
4995 }
4996 if (vdev_obsolete_sm_object(vd) != 0) {
4997 ASSERT(vic->vic_mapping_object != 0);
4998 obsolete_sm_count++;
4999 }
5000 }
5001
5002 (void) feature_get_refcount(spa,
5003 &spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
5004 &dr_feature_refcount);
5005 (void) feature_get_refcount(spa,
5006 &spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
5007 &oc_feature_refcount);
5008
5009 if (dr_feature_refcount != indirect_vdev_count) {
5010 ret = 1;
5011 (void) printf("Number of indirect vdevs (%llu) " \
5012 "does not match feature count (%llu)\n",
5013 (u_longlong_t)indirect_vdev_count,
5014 (u_longlong_t)dr_feature_refcount);
5015 } else {
5016 (void) printf("Verified device_removal feature refcount " \
5017 "of %llu is correct\n",
5018 (u_longlong_t)dr_feature_refcount);
5019 }
5020
5021 if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
5022 DMU_POOL_OBSOLETE_BPOBJ) == 0) {
5023 obsolete_bpobj_count++;
5024 }
5025
5026
5027 obsolete_counts_object_count = precise_vdev_count;
5028 obsolete_counts_object_count += obsolete_sm_count;
5029 obsolete_counts_object_count += obsolete_counts_count;
5030 obsolete_counts_object_count += scip_count;
5031 obsolete_counts_object_count += obsolete_bpobj_count;
5032 obsolete_counts_object_count += remap_deadlist_count;
5033
5034 if (oc_feature_refcount != obsolete_counts_object_count) {
5035 ret = 1;
5036 (void) printf("Number of obsolete counts objects (%llu) " \
5037 "does not match feature count (%llu)\n",
5038 (u_longlong_t)obsolete_counts_object_count,
5039 (u_longlong_t)oc_feature_refcount);
5040 (void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
5041 "ob:%llu rd:%llu\n",
5042 (u_longlong_t)precise_vdev_count,
5043 (u_longlong_t)obsolete_sm_count,
5044 (u_longlong_t)obsolete_counts_count,
5045 (u_longlong_t)scip_count,
5046 (u_longlong_t)obsolete_bpobj_count,
5047 (u_longlong_t)remap_deadlist_count);
5048 } else {
5049 (void) printf("Verified indirect_refcount feature refcount " \
5050 "of %llu is correct\n",
5051 (u_longlong_t)oc_feature_refcount);
5052 }
5053 return (ret);
5054 }
5055
5056 static void
zdb_set_skip_mmp(char * target)5057 zdb_set_skip_mmp(char *target)
5058 {
5059 spa_t *spa;
5060
5061 /*
5062 * Disable the activity check to allow examination of
5063 * active pools.
5064 */
5065 mutex_enter(&spa_namespace_lock);
5066 if ((spa = spa_lookup(target)) != NULL) {
5067 spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
5068 }
5069 mutex_exit(&spa_namespace_lock);
5070 }
5071
5072 #define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
5073 /*
5074 * Import the checkpointed state of the pool specified by the target
5075 * parameter as readonly. The function also accepts a pool config
5076 * as an optional parameter, else it attempts to infer the config by
5077 * the name of the target pool.
5078 *
5079 * Note that the checkpointed state's pool name will be the name of
5080 * the original pool with the above suffix appened to it. In addition,
5081 * if the target is not a pool name (e.g. a path to a dataset) then
5082 * the new_path parameter is populated with the updated path to
5083 * reflect the fact that we are looking into the checkpointed state.
5084 *
5085 * The function returns a newly-allocated copy of the name of the
5086 * pool containing the checkpointed state. When this copy is no
5087 * longer needed it should be freed with free(3C). Same thing
5088 * applies to the new_path parameter if allocated.
5089 */
5090 static char *
import_checkpointed_state(char * target,nvlist_t * cfg,char ** new_path)5091 import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
5092 {
5093 int error = 0;
5094 char *poolname, *bogus_name;
5095
5096 /* If the target is not a pool, the extract the pool name */
5097 char *path_start = strchr(target, '/');
5098 if (path_start != NULL) {
5099 size_t poolname_len = path_start - target;
5100 poolname = strndup(target, poolname_len);
5101 } else {
5102 poolname = target;
5103 }
5104
5105 if (cfg == NULL) {
5106 zdb_set_skip_mmp(poolname);
5107 error = spa_get_stats(poolname, &cfg, NULL, 0);
5108 if (error != 0) {
5109 fatal("Tried to read config of pool \"%s\" but "
5110 "spa_get_stats() failed with error %d\n",
5111 poolname, error);
5112 }
5113 }
5114
5115 (void) asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX);
5116 fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name);
5117
5118 error = spa_import(bogus_name, cfg, NULL,
5119 ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
5120 ZFS_IMPORT_SKIP_MMP);
5121 if (error != 0) {
5122 fatal("Tried to import pool \"%s\" but spa_import() failed "
5123 "with error %d\n", bogus_name, error);
5124 }
5125
5126 if (new_path != NULL && path_start != NULL)
5127 (void) asprintf(new_path, "%s%s", bogus_name, path_start);
5128
5129 if (target != poolname)
5130 free(poolname);
5131
5132 return (bogus_name);
5133 }
5134
5135 typedef struct verify_checkpoint_sm_entry_cb_arg {
5136 vdev_t *vcsec_vd;
5137
5138 /* the following fields are only used for printing progress */
5139 uint64_t vcsec_entryid;
5140 uint64_t vcsec_num_entries;
5141 } verify_checkpoint_sm_entry_cb_arg_t;
5142
5143 #define ENTRIES_PER_PROGRESS_UPDATE 10000
5144
5145 static int
verify_checkpoint_sm_entry_cb(space_map_entry_t * sme,void * arg)5146 verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
5147 {
5148 verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg;
5149 vdev_t *vd = vcsec->vcsec_vd;
5150 metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
5151 uint64_t end = sme->sme_offset + sme->sme_run;
5152
5153 ASSERT(sme->sme_type == SM_FREE);
5154
5155 if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) {
5156 (void) fprintf(stderr,
5157 "\rverifying vdev %llu, space map entry %llu of %llu ...",
5158 (longlong_t)vd->vdev_id,
5159 (longlong_t)vcsec->vcsec_entryid,
5160 (longlong_t)vcsec->vcsec_num_entries);
5161 }
5162 vcsec->vcsec_entryid++;
5163
5164 /*
5165 * See comment in checkpoint_sm_exclude_entry_cb()
5166 */
5167 VERIFY3U(sme->sme_offset, >=, ms->ms_start);
5168 VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
5169
5170 /*
5171 * The entries in the vdev_checkpoint_sm should be marked as
5172 * allocated in the checkpointed state of the pool, therefore
5173 * their respective ms_allocateable trees should not contain them.
5174 */
5175 mutex_enter(&ms->ms_lock);
5176 range_tree_verify_not_present(ms->ms_allocatable,
5177 sme->sme_offset, sme->sme_run);
5178 mutex_exit(&ms->ms_lock);
5179
5180 return (0);
5181 }
5182
5183 /*
5184 * Verify that all segments in the vdev_checkpoint_sm are allocated
5185 * according to the checkpoint's ms_sm (i.e. are not in the checkpoint's
5186 * ms_allocatable).
5187 *
5188 * Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of
5189 * each vdev in the current state of the pool to the metaslab space maps
5190 * (ms_sm) of the checkpointed state of the pool.
5191 *
5192 * Note that the function changes the state of the ms_allocatable
5193 * trees of the current spa_t. The entries of these ms_allocatable
5194 * trees are cleared out and then repopulated from with the free
5195 * entries of their respective ms_sm space maps.
5196 */
5197 static void
verify_checkpoint_vdev_spacemaps(spa_t * checkpoint,spa_t * current)5198 verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
5199 {
5200 vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
5201 vdev_t *current_rvd = current->spa_root_vdev;
5202
5203 load_concrete_ms_allocatable_trees(checkpoint, SM_FREE);
5204
5205 for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) {
5206 vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c];
5207 vdev_t *current_vd = current_rvd->vdev_child[c];
5208
5209 space_map_t *checkpoint_sm = NULL;
5210 uint64_t checkpoint_sm_obj;
5211
5212 if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
5213 /*
5214 * Since we don't allow device removal in a pool
5215 * that has a checkpoint, we expect that all removed
5216 * vdevs were removed from the pool before the
5217 * checkpoint.
5218 */
5219 ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
5220 continue;
5221 }
5222
5223 /*
5224 * If the checkpoint space map doesn't exist, then nothing
5225 * here is checkpointed so there's nothing to verify.
5226 */
5227 if (current_vd->vdev_top_zap == 0 ||
5228 zap_contains(spa_meta_objset(current),
5229 current_vd->vdev_top_zap,
5230 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
5231 continue;
5232
5233 VERIFY0(zap_lookup(spa_meta_objset(current),
5234 current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
5235 sizeof (uint64_t), 1, &checkpoint_sm_obj));
5236
5237 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current),
5238 checkpoint_sm_obj, 0, current_vd->vdev_asize,
5239 current_vd->vdev_ashift));
5240
5241 verify_checkpoint_sm_entry_cb_arg_t vcsec;
5242 vcsec.vcsec_vd = ckpoint_vd;
5243 vcsec.vcsec_entryid = 0;
5244 vcsec.vcsec_num_entries =
5245 space_map_length(checkpoint_sm) / sizeof (uint64_t);
5246 VERIFY0(space_map_iterate(checkpoint_sm,
5247 space_map_length(checkpoint_sm),
5248 verify_checkpoint_sm_entry_cb, &vcsec));
5249 dump_spacemap(current->spa_meta_objset, checkpoint_sm);
5250 space_map_close(checkpoint_sm);
5251 }
5252
5253 /*
5254 * If we've added vdevs since we took the checkpoint, ensure
5255 * that their checkpoint space maps are empty.
5256 */
5257 if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) {
5258 for (uint64_t c = ckpoint_rvd->vdev_children;
5259 c < current_rvd->vdev_children; c++) {
5260 vdev_t *current_vd = current_rvd->vdev_child[c];
5261 VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL);
5262 }
5263 }
5264
5265 /* for cleaner progress output */
5266 (void) fprintf(stderr, "\n");
5267 }
5268
5269 /*
5270 * Verifies that all space that's allocated in the checkpoint is
5271 * still allocated in the current version, by checking that everything
5272 * in checkpoint's ms_allocatable (which is actually allocated, not
5273 * allocatable/free) is not present in current's ms_allocatable.
5274 *
5275 * Note that the function changes the state of the ms_allocatable
5276 * trees of both spas when called. The entries of all ms_allocatable
5277 * trees are cleared out and then repopulated from their respective
5278 * ms_sm space maps. In the checkpointed state we load the allocated
5279 * entries, and in the current state we load the free entries.
5280 */
5281 static void
verify_checkpoint_ms_spacemaps(spa_t * checkpoint,spa_t * current)5282 verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
5283 {
5284 vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
5285 vdev_t *current_rvd = current->spa_root_vdev;
5286
5287 load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC);
5288 load_concrete_ms_allocatable_trees(current, SM_FREE);
5289
5290 for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) {
5291 vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i];
5292 vdev_t *current_vd = current_rvd->vdev_child[i];
5293
5294 if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
5295 /*
5296 * See comment in verify_checkpoint_vdev_spacemaps()
5297 */
5298 ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
5299 continue;
5300 }
5301
5302 for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) {
5303 metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m];
5304 metaslab_t *current_msp = current_vd->vdev_ms[m];
5305
5306 (void) fprintf(stderr,
5307 "\rverifying vdev %llu of %llu, "
5308 "metaslab %llu of %llu ...",
5309 (longlong_t)current_vd->vdev_id,
5310 (longlong_t)current_rvd->vdev_children,
5311 (longlong_t)current_vd->vdev_ms[m]->ms_id,
5312 (longlong_t)current_vd->vdev_ms_count);
5313
5314 /*
5315 * We walk through the ms_allocatable trees that
5316 * are loaded with the allocated blocks from the
5317 * ms_sm spacemaps of the checkpoint. For each
5318 * one of these ranges we ensure that none of them
5319 * exists in the ms_allocatable trees of the
5320 * current state which are loaded with the ranges
5321 * that are currently free.
5322 *
5323 * This way we ensure that none of the blocks that
5324 * are part of the checkpoint were freed by mistake.
5325 */
5326 range_tree_walk(ckpoint_msp->ms_allocatable,
5327 (range_tree_func_t *)range_tree_verify_not_present,
5328 current_msp->ms_allocatable);
5329 }
5330 }
5331
5332 /* for cleaner progress output */
5333 (void) fprintf(stderr, "\n");
5334 }
5335
5336 static void
verify_checkpoint_blocks(spa_t * spa)5337 verify_checkpoint_blocks(spa_t *spa)
5338 {
5339 ASSERT(!dump_opt['L']);
5340
5341 spa_t *checkpoint_spa;
5342 char *checkpoint_pool;
5343 nvlist_t *config = NULL;
5344 int error = 0;
5345
5346 /*
5347 * We import the checkpointed state of the pool (under a different
5348 * name) so we can do verification on it against the current state
5349 * of the pool.
5350 */
5351 checkpoint_pool = import_checkpointed_state(spa->spa_name, config,
5352 NULL);
5353 ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
5354
5355 error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG);
5356 if (error != 0) {
5357 fatal("Tried to open pool \"%s\" but spa_open() failed with "
5358 "error %d\n", checkpoint_pool, error);
5359 }
5360
5361 /*
5362 * Ensure that ranges in the checkpoint space maps of each vdev
5363 * are allocated according to the checkpointed state's metaslab
5364 * space maps.
5365 */
5366 verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa);
5367
5368 /*
5369 * Ensure that allocated ranges in the checkpoint's metaslab
5370 * space maps remain allocated in the metaslab space maps of
5371 * the current state.
5372 */
5373 verify_checkpoint_ms_spacemaps(checkpoint_spa, spa);
5374
5375 /*
5376 * Once we are done, we get rid of the checkpointed state.
5377 */
5378 spa_close(checkpoint_spa, FTAG);
5379 free(checkpoint_pool);
5380 }
5381
5382 static void
dump_leftover_checkpoint_blocks(spa_t * spa)5383 dump_leftover_checkpoint_blocks(spa_t *spa)
5384 {
5385 vdev_t *rvd = spa->spa_root_vdev;
5386
5387 for (uint64_t i = 0; i < rvd->vdev_children; i++) {
5388 vdev_t *vd = rvd->vdev_child[i];
5389
5390 space_map_t *checkpoint_sm = NULL;
5391 uint64_t checkpoint_sm_obj;
5392
5393 if (vd->vdev_top_zap == 0)
5394 continue;
5395
5396 if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
5397 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
5398 continue;
5399
5400 VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
5401 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
5402 sizeof (uint64_t), 1, &checkpoint_sm_obj));
5403
5404 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
5405 checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
5406 dump_spacemap(spa->spa_meta_objset, checkpoint_sm);
5407 space_map_close(checkpoint_sm);
5408 }
5409 }
5410
5411 static int
verify_checkpoint(spa_t * spa)5412 verify_checkpoint(spa_t *spa)
5413 {
5414 uberblock_t checkpoint;
5415 int error;
5416
5417 if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
5418 return (0);
5419
5420 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
5421 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
5422 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
5423
5424 if (error == ENOENT && !dump_opt['L']) {
5425 /*
5426 * If the feature is active but the uberblock is missing
5427 * then we must be in the middle of discarding the
5428 * checkpoint.
5429 */
5430 (void) printf("\nPartially discarded checkpoint "
5431 "state found:\n");
5432 dump_leftover_checkpoint_blocks(spa);
5433 return (0);
5434 } else if (error != 0) {
5435 (void) printf("lookup error %d when looking for "
5436 "checkpointed uberblock in MOS\n", error);
5437 return (error);
5438 }
5439 dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n");
5440
5441 if (checkpoint.ub_checkpoint_txg == 0) {
5442 (void) printf("\nub_checkpoint_txg not set in checkpointed "
5443 "uberblock\n");
5444 error = 3;
5445 }
5446
5447 if (error == 0 && !dump_opt['L'])
5448 verify_checkpoint_blocks(spa);
5449
5450 return (error);
5451 }
5452
5453 /* ARGSUSED */
5454 static void
mos_leaks_cb(void * arg,uint64_t start,uint64_t size)5455 mos_leaks_cb(void *arg, uint64_t start, uint64_t size)
5456 {
5457 for (uint64_t i = start; i < size; i++) {
5458 (void) printf("MOS object %llu referenced but not allocated\n",
5459 (u_longlong_t)i);
5460 }
5461 }
5462
5463 static range_tree_t *mos_refd_objs;
5464
5465 static void
mos_obj_refd(uint64_t obj)5466 mos_obj_refd(uint64_t obj)
5467 {
5468 if (obj != 0 && mos_refd_objs != NULL)
5469 range_tree_add(mos_refd_objs, obj, 1);
5470 }
5471
5472 static void
mos_leak_vdev_top_zap(vdev_t * vd)5473 mos_leak_vdev_top_zap(vdev_t *vd)
5474 {
5475 uint64_t ms_flush_data_obj;
5476
5477 int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
5478 vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
5479 sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
5480 if (error == ENOENT)
5481 return;
5482 ASSERT0(error);
5483
5484 mos_obj_refd(ms_flush_data_obj);
5485 }
5486
5487 static void
mos_leak_vdev(vdev_t * vd)5488 mos_leak_vdev(vdev_t *vd)
5489 {
5490 mos_obj_refd(vd->vdev_dtl_object);
5491 mos_obj_refd(vd->vdev_ms_array);
5492 mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
5493 mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
5494 mos_obj_refd(vd->vdev_leaf_zap);
5495 if (vd->vdev_checkpoint_sm != NULL)
5496 mos_obj_refd(vd->vdev_checkpoint_sm->sm_object);
5497 if (vd->vdev_indirect_mapping != NULL) {
5498 mos_obj_refd(vd->vdev_indirect_mapping->
5499 vim_phys->vimp_counts_object);
5500 }
5501 if (vd->vdev_obsolete_sm != NULL)
5502 mos_obj_refd(vd->vdev_obsolete_sm->sm_object);
5503
5504 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
5505 metaslab_t *ms = vd->vdev_ms[m];
5506 mos_obj_refd(space_map_object(ms->ms_sm));
5507 }
5508
5509 if (vd->vdev_top_zap != 0) {
5510 mos_obj_refd(vd->vdev_top_zap);
5511 mos_leak_vdev_top_zap(vd);
5512 }
5513
5514 for (uint64_t c = 0; c < vd->vdev_children; c++) {
5515 mos_leak_vdev(vd->vdev_child[c]);
5516 }
5517 }
5518
5519 static void
mos_leak_log_spacemaps(spa_t * spa)5520 mos_leak_log_spacemaps(spa_t *spa)
5521 {
5522 uint64_t spacemap_zap;
5523
5524 int error = zap_lookup(spa_meta_objset(spa),
5525 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
5526 sizeof (spacemap_zap), 1, &spacemap_zap);
5527 if (error == ENOENT)
5528 return;
5529 ASSERT0(error);
5530
5531 mos_obj_refd(spacemap_zap);
5532 for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
5533 sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
5534 mos_obj_refd(sls->sls_sm_obj);
5535 }
5536
5537 static int
dump_mos_leaks(spa_t * spa)5538 dump_mos_leaks(spa_t *spa)
5539 {
5540 int rv = 0;
5541 objset_t *mos = spa->spa_meta_objset;
5542 dsl_pool_t *dp = spa->spa_dsl_pool;
5543
5544 /* Visit and mark all referenced objects in the MOS */
5545
5546 mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT);
5547 mos_obj_refd(spa->spa_pool_props_object);
5548 mos_obj_refd(spa->spa_config_object);
5549 mos_obj_refd(spa->spa_ddt_stat_object);
5550 mos_obj_refd(spa->spa_feat_desc_obj);
5551 mos_obj_refd(spa->spa_feat_enabled_txg_obj);
5552 mos_obj_refd(spa->spa_feat_for_read_obj);
5553 mos_obj_refd(spa->spa_feat_for_write_obj);
5554 mos_obj_refd(spa->spa_history);
5555 mos_obj_refd(spa->spa_errlog_last);
5556 mos_obj_refd(spa->spa_errlog_scrub);
5557 mos_obj_refd(spa->spa_all_vdev_zaps);
5558 mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj);
5559 mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj);
5560 mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj);
5561 bpobj_count_refd(&spa->spa_deferred_bpobj);
5562 mos_obj_refd(dp->dp_empty_bpobj);
5563 bpobj_count_refd(&dp->dp_obsolete_bpobj);
5564 bpobj_count_refd(&dp->dp_free_bpobj);
5565 mos_obj_refd(spa->spa_l2cache.sav_object);
5566 mos_obj_refd(spa->spa_spares.sav_object);
5567
5568 if (spa->spa_syncing_log_sm != NULL)
5569 mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
5570 mos_leak_log_spacemaps(spa);
5571
5572 mos_obj_refd(spa->spa_condensing_indirect_phys.
5573 scip_next_mapping_object);
5574 mos_obj_refd(spa->spa_condensing_indirect_phys.
5575 scip_prev_obsolete_sm_object);
5576 if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) {
5577 vdev_indirect_mapping_t *vim =
5578 vdev_indirect_mapping_open(mos,
5579 spa->spa_condensing_indirect_phys.scip_next_mapping_object);
5580 mos_obj_refd(vim->vim_phys->vimp_counts_object);
5581 vdev_indirect_mapping_close(vim);
5582 }
5583
5584 if (dp->dp_origin_snap != NULL) {
5585 dsl_dataset_t *ds;
5586
5587 dsl_pool_config_enter(dp, FTAG);
5588 VERIFY0(dsl_dataset_hold_obj(dp,
5589 dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj,
5590 FTAG, &ds));
5591 count_ds_mos_objects(ds);
5592 dump_deadlist(&ds->ds_deadlist);
5593 dsl_dataset_rele(ds, FTAG);
5594 dsl_pool_config_exit(dp, FTAG);
5595
5596 count_ds_mos_objects(dp->dp_origin_snap);
5597 dump_deadlist(&dp->dp_origin_snap->ds_deadlist);
5598 }
5599 count_dir_mos_objects(dp->dp_mos_dir);
5600 if (dp->dp_free_dir != NULL)
5601 count_dir_mos_objects(dp->dp_free_dir);
5602 if (dp->dp_leak_dir != NULL)
5603 count_dir_mos_objects(dp->dp_leak_dir);
5604
5605 mos_leak_vdev(spa->spa_root_vdev);
5606
5607 for (uint64_t class = 0; class < DDT_CLASSES; class++) {
5608 for (uint64_t type = 0; type < DDT_TYPES; type++) {
5609 for (uint64_t cksum = 0;
5610 cksum < ZIO_CHECKSUM_FUNCTIONS; cksum++) {
5611 ddt_t *ddt = spa->spa_ddt[cksum];
5612 mos_obj_refd(ddt->ddt_object[type][class]);
5613 }
5614 }
5615 }
5616
5617 /*
5618 * Visit all allocated objects and make sure they are referenced.
5619 */
5620 uint64_t object = 0;
5621 while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
5622 if (range_tree_contains(mos_refd_objs, object, 1)) {
5623 range_tree_remove(mos_refd_objs, object, 1);
5624 } else {
5625 dmu_object_info_t doi;
5626 const char *name;
5627 dmu_object_info(mos, object, &doi);
5628 if (doi.doi_type & DMU_OT_NEWTYPE) {
5629 dmu_object_byteswap_t bswap =
5630 DMU_OT_BYTESWAP(doi.doi_type);
5631 name = dmu_ot_byteswap[bswap].ob_name;
5632 } else {
5633 name = dmu_ot[doi.doi_type].ot_name;
5634 }
5635
5636 (void) printf("MOS object %llu (%s) leaked\n",
5637 (u_longlong_t)object, name);
5638 rv = 2;
5639 }
5640 }
5641 (void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
5642 if (!range_tree_is_empty(mos_refd_objs))
5643 rv = 2;
5644 range_tree_vacate(mos_refd_objs, NULL, NULL);
5645 range_tree_destroy(mos_refd_objs);
5646 return (rv);
5647 }
5648
5649 typedef struct log_sm_obsolete_stats_arg {
5650 uint64_t lsos_current_txg;
5651
5652 uint64_t lsos_total_entries;
5653 uint64_t lsos_valid_entries;
5654
5655 uint64_t lsos_sm_entries;
5656 uint64_t lsos_valid_sm_entries;
5657 } log_sm_obsolete_stats_arg_t;
5658
5659 static int
log_spacemap_obsolete_stats_cb(spa_t * spa,space_map_entry_t * sme,uint64_t txg,void * arg)5660 log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
5661 uint64_t txg, void *arg)
5662 {
5663 log_sm_obsolete_stats_arg_t *lsos = arg;
5664 uint64_t offset = sme->sme_offset;
5665 uint64_t vdev_id = sme->sme_vdev;
5666
5667 if (lsos->lsos_current_txg == 0) {
5668 /* this is the first log */
5669 lsos->lsos_current_txg = txg;
5670 } else if (lsos->lsos_current_txg < txg) {
5671 /* we just changed log - print stats and reset */
5672 (void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
5673 (u_longlong_t)lsos->lsos_valid_sm_entries,
5674 (u_longlong_t)lsos->lsos_sm_entries,
5675 (u_longlong_t)lsos->lsos_current_txg);
5676 lsos->lsos_valid_sm_entries = 0;
5677 lsos->lsos_sm_entries = 0;
5678 lsos->lsos_current_txg = txg;
5679 }
5680 ASSERT3U(lsos->lsos_current_txg, ==, txg);
5681
5682 lsos->lsos_sm_entries++;
5683 lsos->lsos_total_entries++;
5684
5685 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
5686 if (!vdev_is_concrete(vd))
5687 return (0);
5688
5689 metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5690 ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
5691
5692 if (txg < metaslab_unflushed_txg(ms))
5693 return (0);
5694 lsos->lsos_valid_sm_entries++;
5695 lsos->lsos_valid_entries++;
5696 return (0);
5697 }
5698
5699 static void
dump_log_spacemap_obsolete_stats(spa_t * spa)5700 dump_log_spacemap_obsolete_stats(spa_t *spa)
5701 {
5702 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
5703 return;
5704
5705 log_sm_obsolete_stats_arg_t lsos;
5706 bzero(&lsos, sizeof (lsos));
5707
5708 (void) printf("Log Space Map Obsolete Entry Statistics:\n");
5709
5710 iterate_through_spacemap_logs(spa,
5711 log_spacemap_obsolete_stats_cb, &lsos);
5712
5713 /* print stats for latest log */
5714 (void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
5715 (u_longlong_t)lsos.lsos_valid_sm_entries,
5716 (u_longlong_t)lsos.lsos_sm_entries,
5717 (u_longlong_t)lsos.lsos_current_txg);
5718
5719 (void) printf("%-8llu valid entries out of %-8llu - total\n\n",
5720 (u_longlong_t)lsos.lsos_valid_entries,
5721 (u_longlong_t)lsos.lsos_total_entries);
5722 }
5723
5724 static void
dump_zpool(spa_t * spa)5725 dump_zpool(spa_t *spa)
5726 {
5727 dsl_pool_t *dp = spa_get_dsl(spa);
5728 int rc = 0;
5729
5730 if (dump_opt['S']) {
5731 dump_simulated_ddt(spa);
5732 return;
5733 }
5734
5735 if (!dump_opt['e'] && dump_opt['C'] > 1) {
5736 (void) printf("\nCached configuration:\n");
5737 dump_nvlist(spa->spa_config, 8);
5738 }
5739
5740 if (dump_opt['C'])
5741 dump_config(spa);
5742
5743 if (dump_opt['u'])
5744 dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
5745
5746 if (dump_opt['D'])
5747 dump_all_ddts(spa);
5748
5749 if (dump_opt['d'] > 2 || dump_opt['m'])
5750 dump_metaslabs(spa);
5751 if (dump_opt['M'])
5752 dump_metaslab_groups(spa);
5753 if (dump_opt['d'] > 2 || dump_opt['m']) {
5754 dump_log_spacemaps(spa);
5755 dump_log_spacemap_obsolete_stats(spa);
5756 }
5757
5758 if (dump_opt['d'] || dump_opt['i']) {
5759 mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
5760 0);
5761 dump_dir(dp->dp_meta_objset);
5762
5763 if (dump_opt['d'] >= 3) {
5764 dsl_pool_t *dp = spa->spa_dsl_pool;
5765 dump_full_bpobj(&spa->spa_deferred_bpobj,
5766 "Deferred frees", 0);
5767 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
5768 dump_full_bpobj(&dp->dp_free_bpobj,
5769 "Pool snapshot frees", 0);
5770 }
5771 if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
5772 ASSERT(spa_feature_is_enabled(spa,
5773 SPA_FEATURE_DEVICE_REMOVAL));
5774 dump_full_bpobj(&dp->dp_obsolete_bpobj,
5775 "Pool obsolete blocks", 0);
5776 }
5777
5778 if (spa_feature_is_active(spa,
5779 SPA_FEATURE_ASYNC_DESTROY)) {
5780 dump_bptree(spa->spa_meta_objset,
5781 dp->dp_bptree_obj,
5782 "Pool dataset frees");
5783 }
5784 dump_dtl(spa->spa_root_vdev, 0);
5785 }
5786 (void) dmu_objset_find(spa_name(spa), dump_one_dir,
5787 NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5788
5789 if (rc == 0 && !dump_opt['L'])
5790 rc = dump_mos_leaks(spa);
5791
5792 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
5793 uint64_t refcount;
5794
5795 if (!(spa_feature_table[f].fi_flags &
5796 ZFEATURE_FLAG_PER_DATASET) ||
5797 !spa_feature_is_enabled(spa, f)) {
5798 ASSERT0(dataset_feature_count[f]);
5799 continue;
5800 }
5801 (void) feature_get_refcount(spa,
5802 &spa_feature_table[f], &refcount);
5803 if (dataset_feature_count[f] != refcount) {
5804 (void) printf("%s feature refcount mismatch: "
5805 "%lld datasets != %lld refcount\n",
5806 spa_feature_table[f].fi_uname,
5807 (longlong_t)dataset_feature_count[f],
5808 (longlong_t)refcount);
5809 rc = 2;
5810 } else {
5811 (void) printf("Verified %s feature refcount "
5812 "of %llu is correct\n",
5813 spa_feature_table[f].fi_uname,
5814 (longlong_t)refcount);
5815 }
5816 }
5817
5818 if (rc == 0)
5819 rc = verify_device_removal_feature_counts(spa);
5820 }
5821
5822 if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
5823 rc = dump_block_stats(spa);
5824
5825 if (rc == 0)
5826 rc = verify_spacemap_refcounts(spa);
5827
5828 if (dump_opt['s'])
5829 show_pool_stats(spa);
5830
5831 if (dump_opt['h'])
5832 dump_history(spa);
5833
5834 if (rc == 0)
5835 rc = verify_checkpoint(spa);
5836
5837 if (rc != 0) {
5838 dump_debug_buffer();
5839 exit(rc);
5840 }
5841 }
5842
5843 #define ZDB_FLAG_CHECKSUM 0x0001
5844 #define ZDB_FLAG_DECOMPRESS 0x0002
5845 #define ZDB_FLAG_BSWAP 0x0004
5846 #define ZDB_FLAG_GBH 0x0008
5847 #define ZDB_FLAG_INDIRECT 0x0010
5848 #define ZDB_FLAG_PHYS 0x0020
5849 #define ZDB_FLAG_RAW 0x0040
5850 #define ZDB_FLAG_PRINT_BLKPTR 0x0080
5851
5852 static int flagbits[256];
5853
5854 static void
zdb_print_blkptr(blkptr_t * bp,int flags)5855 zdb_print_blkptr(blkptr_t *bp, int flags)
5856 {
5857 char blkbuf[BP_SPRINTF_LEN];
5858
5859 if (flags & ZDB_FLAG_BSWAP)
5860 byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
5861
5862 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
5863 (void) printf("%s\n", blkbuf);
5864 }
5865
5866 static void
zdb_dump_indirect(blkptr_t * bp,int nbps,int flags)5867 zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
5868 {
5869 int i;
5870
5871 for (i = 0; i < nbps; i++)
5872 zdb_print_blkptr(&bp[i], flags);
5873 }
5874
5875 static void
zdb_dump_gbh(void * buf,int flags)5876 zdb_dump_gbh(void *buf, int flags)
5877 {
5878 zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
5879 }
5880
5881 static void
zdb_dump_block_raw(void * buf,uint64_t size,int flags)5882 zdb_dump_block_raw(void *buf, uint64_t size, int flags)
5883 {
5884 if (flags & ZDB_FLAG_BSWAP)
5885 byteswap_uint64_array(buf, size);
5886 (void) write(1, buf, size);
5887 }
5888
5889 static void
zdb_dump_block(char * label,void * buf,uint64_t size,int flags)5890 zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
5891 {
5892 uint64_t *d = (uint64_t *)buf;
5893 unsigned nwords = size / sizeof (uint64_t);
5894 int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
5895 unsigned i, j;
5896 const char *hdr;
5897 char *c;
5898
5899
5900 if (do_bswap)
5901 hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
5902 else
5903 hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
5904
5905 (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
5906
5907 for (i = 0; i < nwords; i += 2) {
5908 (void) printf("%06llx: %016llx %016llx ",
5909 (u_longlong_t)(i * sizeof (uint64_t)),
5910 (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
5911 (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
5912
5913 c = (char *)&d[i];
5914 for (j = 0; j < 2 * sizeof (uint64_t); j++)
5915 (void) printf("%c", isprint(c[j]) ? c[j] : '.');
5916 (void) printf("\n");
5917 }
5918 }
5919
5920 /*
5921 * There are two acceptable formats:
5922 * leaf_name - For example: c1t0d0 or /tmp/ztest.0a
5923 * child[.child]* - For example: 0.1.1
5924 *
5925 * The second form can be used to specify arbitrary vdevs anywhere
5926 * in the heirarchy. For example, in a pool with a mirror of
5927 * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
5928 */
5929 static vdev_t *
zdb_vdev_lookup(vdev_t * vdev,const char * path)5930 zdb_vdev_lookup(vdev_t *vdev, const char *path)
5931 {
5932 char *s, *p, *q;
5933 unsigned i;
5934
5935 if (vdev == NULL)
5936 return (NULL);
5937
5938 /* First, assume the x.x.x.x format */
5939 i = strtoul(path, &s, 10);
5940 if (s == path || (s && *s != '.' && *s != '\0'))
5941 goto name;
5942 if (i >= vdev->vdev_children)
5943 return (NULL);
5944
5945 vdev = vdev->vdev_child[i];
5946 if (*s == '\0')
5947 return (vdev);
5948 return (zdb_vdev_lookup(vdev, s+1));
5949
5950 name:
5951 for (i = 0; i < vdev->vdev_children; i++) {
5952 vdev_t *vc = vdev->vdev_child[i];
5953
5954 if (vc->vdev_path == NULL) {
5955 vc = zdb_vdev_lookup(vc, path);
5956 if (vc == NULL)
5957 continue;
5958 else
5959 return (vc);
5960 }
5961
5962 p = strrchr(vc->vdev_path, '/');
5963 p = p ? p + 1 : vc->vdev_path;
5964 q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
5965
5966 if (strcmp(vc->vdev_path, path) == 0)
5967 return (vc);
5968 if (strcmp(p, path) == 0)
5969 return (vc);
5970 if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
5971 return (vc);
5972 }
5973
5974 return (NULL);
5975 }
5976
5977 /* ARGSUSED */
5978 static int
random_get_pseudo_bytes_cb(void * buf,size_t len,void * unused)5979 random_get_pseudo_bytes_cb(void *buf, size_t len, void *unused)
5980 {
5981 return (random_get_pseudo_bytes(buf, len));
5982 }
5983
5984 /*
5985 * Read a block from a pool and print it out. The syntax of the
5986 * block descriptor is:
5987 *
5988 * pool:vdev_specifier:offset:size[:flags]
5989 *
5990 * pool - The name of the pool you wish to read from
5991 * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
5992 * offset - offset, in hex, in bytes
5993 * size - Amount of data to read, in hex, in bytes
5994 * flags - A string of characters specifying options
5995 * b: Decode a blkptr at given offset within block
5996 * *c: Calculate and display checksums
5997 * d: Decompress data before dumping
5998 * e: Byteswap data before dumping
5999 * g: Display data as a gang block header
6000 * i: Display as an indirect block
6001 * p: Do I/O to physical offset
6002 * r: Dump raw data to stdout
6003 *
6004 * * = not yet implemented
6005 */
6006 static void
zdb_read_block(char * thing,spa_t * spa)6007 zdb_read_block(char *thing, spa_t *spa)
6008 {
6009 blkptr_t blk, *bp = &blk;
6010 dva_t *dva = bp->blk_dva;
6011 int flags = 0;
6012 uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0;
6013 zio_t *zio;
6014 vdev_t *vd;
6015 abd_t *pabd;
6016 void *lbuf, *buf;
6017 const char *s, *vdev;
6018 char *p, *dup, *flagstr;
6019 int i, error;
6020
6021 dup = strdup(thing);
6022 s = strtok(dup, ":");
6023 vdev = s ? s : "";
6024 s = strtok(NULL, ":");
6025 offset = strtoull(s ? s : "", NULL, 16);
6026 s = strtok(NULL, ":");
6027 size = strtoull(s ? s : "", NULL, 16);
6028 s = strtok(NULL, ":");
6029 if (s)
6030 flagstr = strdup(s);
6031 else
6032 flagstr = strdup("");
6033
6034 s = NULL;
6035 if (size == 0)
6036 s = "size must not be zero";
6037 if (!IS_P2ALIGNED(size, DEV_BSIZE))
6038 s = "size must be a multiple of sector size";
6039 if (!IS_P2ALIGNED(offset, DEV_BSIZE))
6040 s = "offset must be a multiple of sector size";
6041 if (s) {
6042 (void) printf("Invalid block specifier: %s - %s\n", thing, s);
6043 free(dup);
6044 return;
6045 }
6046
6047 for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) {
6048 for (i = 0; flagstr[i]; i++) {
6049 int bit = flagbits[(uchar_t)flagstr[i]];
6050
6051 if (bit == 0) {
6052 (void) printf("***Invalid flag: %c\n",
6053 flagstr[i]);
6054 continue;
6055 }
6056 flags |= bit;
6057
6058 /* If it's not something with an argument, keep going */
6059 if ((bit & (ZDB_FLAG_CHECKSUM |
6060 ZDB_FLAG_PRINT_BLKPTR)) == 0)
6061 continue;
6062
6063 p = &flagstr[i + 1];
6064 if (bit == ZDB_FLAG_PRINT_BLKPTR)
6065 blkptr_offset = strtoull(p, &p, 16);
6066 if (*p != ':' && *p != '\0') {
6067 (void) printf("***Invalid flag arg: '%s'\n", s);
6068 free(dup);
6069 return;
6070 }
6071 }
6072 }
6073 free(flagstr);
6074
6075 vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
6076 if (vd == NULL) {
6077 (void) printf("***Invalid vdev: %s\n", vdev);
6078 free(dup);
6079 return;
6080 } else {
6081 if (vd->vdev_path)
6082 (void) fprintf(stderr, "Found vdev: %s\n",
6083 vd->vdev_path);
6084 else
6085 (void) fprintf(stderr, "Found vdev type: %s\n",
6086 vd->vdev_ops->vdev_op_type);
6087 }
6088
6089 psize = size;
6090 lsize = size;
6091
6092 pabd = abd_alloc_linear(SPA_MAXBLOCKSIZE, B_FALSE);
6093 lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
6094
6095 BP_ZERO(bp);
6096
6097 DVA_SET_VDEV(&dva[0], vd->vdev_id);
6098 DVA_SET_OFFSET(&dva[0], offset);
6099 DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
6100 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
6101
6102 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
6103
6104 BP_SET_LSIZE(bp, lsize);
6105 BP_SET_PSIZE(bp, psize);
6106 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
6107 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
6108 BP_SET_TYPE(bp, DMU_OT_NONE);
6109 BP_SET_LEVEL(bp, 0);
6110 BP_SET_DEDUP(bp, 0);
6111 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
6112
6113 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6114 zio = zio_root(spa, NULL, NULL, 0);
6115
6116 if (vd == vd->vdev_top) {
6117 /*
6118 * Treat this as a normal block read.
6119 */
6120 zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
6121 ZIO_PRIORITY_SYNC_READ,
6122 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
6123 } else {
6124 /*
6125 * Treat this as a vdev child I/O.
6126 */
6127 zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
6128 psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
6129 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE |
6130 ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
6131 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL,
6132 NULL, NULL));
6133 }
6134
6135 error = zio_wait(zio);
6136 spa_config_exit(spa, SCL_STATE, FTAG);
6137
6138 if (error) {
6139 (void) printf("Read of %s failed, error: %d\n", thing, error);
6140 goto out;
6141 }
6142
6143 if (flags & ZDB_FLAG_DECOMPRESS) {
6144 /*
6145 * We don't know how the data was compressed, so just try
6146 * every decompress function at every inflated blocksize.
6147 */
6148 enum zio_compress c;
6149 void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
6150 void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
6151
6152 abd_copy_to_buf(pbuf2, pabd, psize);
6153
6154 VERIFY0(abd_iterate_func(pabd, psize, SPA_MAXBLOCKSIZE - psize,
6155 random_get_pseudo_bytes_cb, NULL));
6156
6157 VERIFY0(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize,
6158 SPA_MAXBLOCKSIZE - psize));
6159
6160 for (lsize = SPA_MAXBLOCKSIZE; lsize > psize;
6161 lsize -= SPA_MINBLOCKSIZE) {
6162 for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) {
6163 if (zio_decompress_data(c, pabd,
6164 lbuf, psize, lsize) == 0 &&
6165 zio_decompress_data_buf(c, pbuf2,
6166 lbuf2, psize, lsize) == 0 &&
6167 bcmp(lbuf, lbuf2, lsize) == 0)
6168 break;
6169 }
6170 if (c != ZIO_COMPRESS_FUNCTIONS)
6171 break;
6172 lsize -= SPA_MINBLOCKSIZE;
6173 }
6174
6175 umem_free(pbuf2, SPA_MAXBLOCKSIZE);
6176 umem_free(lbuf2, SPA_MAXBLOCKSIZE);
6177
6178 if (lsize <= psize) {
6179 (void) printf("Decompress of %s failed\n", thing);
6180 goto out;
6181 }
6182 buf = lbuf;
6183 size = lsize;
6184 } else {
6185 buf = abd_to_buf(pabd);
6186 size = psize;
6187 }
6188
6189 if (flags & ZDB_FLAG_PRINT_BLKPTR)
6190 zdb_print_blkptr((blkptr_t *)(void *)
6191 ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
6192 else if (flags & ZDB_FLAG_RAW)
6193 zdb_dump_block_raw(buf, size, flags);
6194 else if (flags & ZDB_FLAG_INDIRECT)
6195 zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t),
6196 flags);
6197 else if (flags & ZDB_FLAG_GBH)
6198 zdb_dump_gbh(buf, flags);
6199 else
6200 zdb_dump_block(thing, buf, size, flags);
6201
6202 out:
6203 abd_free(pabd);
6204 umem_free(lbuf, SPA_MAXBLOCKSIZE);
6205 free(dup);
6206 }
6207
6208 static void
zdb_embedded_block(char * thing)6209 zdb_embedded_block(char *thing)
6210 {
6211 blkptr_t bp;
6212 unsigned long long *words = (void *)&bp;
6213 char *buf;
6214 int err;
6215
6216 bzero(&bp, sizeof (bp));
6217 err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
6218 "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
6219 words + 0, words + 1, words + 2, words + 3,
6220 words + 4, words + 5, words + 6, words + 7,
6221 words + 8, words + 9, words + 10, words + 11,
6222 words + 12, words + 13, words + 14, words + 15);
6223 if (err != 16) {
6224 (void) fprintf(stderr, "invalid input format\n");
6225 exit(1);
6226 }
6227 ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
6228 buf = malloc(SPA_MAXBLOCKSIZE);
6229 if (buf == NULL) {
6230 (void) fprintf(stderr, "out of memory\n");
6231 exit(1);
6232 }
6233 err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
6234 if (err != 0) {
6235 (void) fprintf(stderr, "decode failed: %u\n", err);
6236 exit(1);
6237 }
6238 zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
6239 free(buf);
6240 }
6241
6242 int
main(int argc,char ** argv)6243 main(int argc, char **argv)
6244 {
6245 int c;
6246 struct rlimit rl = { 1024, 1024 };
6247 spa_t *spa = NULL;
6248 objset_t *os = NULL;
6249 int dump_all = 1;
6250 int verbose = 0;
6251 int error = 0;
6252 char **searchdirs = NULL;
6253 int nsearch = 0;
6254 char *target, *target_pool;
6255 nvlist_t *policy = NULL;
6256 uint64_t max_txg = UINT64_MAX;
6257 int flags = ZFS_IMPORT_MISSING_LOG;
6258 int rewind = ZPOOL_NEVER_REWIND;
6259 char *spa_config_path_env;
6260 boolean_t target_is_spa = B_TRUE;
6261 nvlist_t *cfg = NULL;
6262
6263 (void) setrlimit(RLIMIT_NOFILE, &rl);
6264 (void) enable_extended_FILE_stdio(-1, -1);
6265
6266 dprintf_setup(&argc, argv);
6267
6268 /*
6269 * If there is an environment variable SPA_CONFIG_PATH it overrides
6270 * default spa_config_path setting. If -U flag is specified it will
6271 * override this environment variable settings once again.
6272 */
6273 spa_config_path_env = getenv("SPA_CONFIG_PATH");
6274 if (spa_config_path_env != NULL)
6275 spa_config_path = spa_config_path_env;
6276
6277 /*
6278 * For performance reasons, we set this tunable down. We do so before
6279 * the arg parsing section so that the user can override this value if
6280 * they choose.
6281 */
6282 zfs_btree_verify_intensity = 3;
6283
6284 while ((c = getopt(argc, argv,
6285 "AbcCdDeEFGhiI:klLmMo:Op:PqRsSt:uU:vVx:X")) != -1) {
6286 switch (c) {
6287 case 'b':
6288 case 'c':
6289 case 'C':
6290 case 'd':
6291 case 'D':
6292 case 'E':
6293 case 'G':
6294 case 'h':
6295 case 'i':
6296 case 'l':
6297 case 'm':
6298 case 'M':
6299 case 'O':
6300 case 'R':
6301 case 's':
6302 case 'S':
6303 case 'u':
6304 dump_opt[c]++;
6305 dump_all = 0;
6306 break;
6307 case 'A':
6308 case 'e':
6309 case 'F':
6310 case 'k':
6311 case 'L':
6312 case 'P':
6313 case 'q':
6314 case 'X':
6315 dump_opt[c]++;
6316 break;
6317 /* NB: Sort single match options below. */
6318 case 'I':
6319 max_inflight = strtoull(optarg, NULL, 0);
6320 if (max_inflight == 0) {
6321 (void) fprintf(stderr, "maximum number "
6322 "of inflight I/Os must be greater "
6323 "than 0\n");
6324 usage();
6325 }
6326 break;
6327 case 'o':
6328 error = set_global_var(optarg);
6329 if (error != 0)
6330 usage();
6331 break;
6332 case 'p':
6333 if (searchdirs == NULL) {
6334 searchdirs = umem_alloc(sizeof (char *),
6335 UMEM_NOFAIL);
6336 } else {
6337 char **tmp = umem_alloc((nsearch + 1) *
6338 sizeof (char *), UMEM_NOFAIL);
6339 bcopy(searchdirs, tmp, nsearch *
6340 sizeof (char *));
6341 umem_free(searchdirs,
6342 nsearch * sizeof (char *));
6343 searchdirs = tmp;
6344 }
6345 searchdirs[nsearch++] = optarg;
6346 break;
6347 case 't':
6348 max_txg = strtoull(optarg, NULL, 0);
6349 if (max_txg < TXG_INITIAL) {
6350 (void) fprintf(stderr, "incorrect txg "
6351 "specified: %s\n", optarg);
6352 usage();
6353 }
6354 break;
6355 case 'U':
6356 spa_config_path = optarg;
6357 if (spa_config_path[0] != '/') {
6358 (void) fprintf(stderr,
6359 "cachefile must be an absolute path "
6360 "(i.e. start with a slash)\n");
6361 usage();
6362 }
6363 break;
6364 case 'v':
6365 verbose++;
6366 break;
6367 case 'V':
6368 flags = ZFS_IMPORT_VERBATIM;
6369 break;
6370 case 'x':
6371 vn_dumpdir = optarg;
6372 break;
6373 default:
6374 usage();
6375 break;
6376 }
6377 }
6378
6379 if (!dump_opt['e'] && searchdirs != NULL) {
6380 (void) fprintf(stderr, "-p option requires use of -e\n");
6381 usage();
6382 }
6383
6384 /*
6385 * ZDB does not typically re-read blocks; therefore limit the ARC
6386 * to 256 MB, which can be used entirely for metadata.
6387 */
6388 zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024;
6389
6390 /*
6391 * "zdb -c" uses checksum-verifying scrub i/os which are async reads.
6392 * "zdb -b" uses traversal prefetch which uses async reads.
6393 * For good performance, let several of them be active at once.
6394 */
6395 zfs_vdev_async_read_max_active = 10;
6396
6397 /*
6398 * Disable reference tracking for better performance.
6399 */
6400 reference_tracking_enable = B_FALSE;
6401
6402 /*
6403 * Do not fail spa_load when spa_load_verify fails. This is needed
6404 * to load non-idle pools.
6405 */
6406 spa_load_verify_dryrun = B_TRUE;
6407
6408 kernel_init(FREAD);
6409
6410 if (dump_all)
6411 verbose = MAX(verbose, 1);
6412
6413 for (c = 0; c < 256; c++) {
6414 if (dump_all && strchr("AeEFklLOPRSX", c) == NULL)
6415 dump_opt[c] = 1;
6416 if (dump_opt[c])
6417 dump_opt[c] += verbose;
6418 }
6419
6420 aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2);
6421 zfs_recover = (dump_opt['A'] > 1);
6422
6423 argc -= optind;
6424 argv += optind;
6425
6426 if (argc < 2 && dump_opt['R'])
6427 usage();
6428
6429 if (dump_opt['E']) {
6430 if (argc != 1)
6431 usage();
6432 zdb_embedded_block(argv[0]);
6433 return (0);
6434 }
6435
6436 if (argc < 1) {
6437 if (!dump_opt['e'] && dump_opt['C']) {
6438 dump_cachefile(spa_config_path);
6439 return (0);
6440 }
6441 usage();
6442 }
6443
6444 if (dump_opt['l'])
6445 return (dump_label(argv[0]));
6446
6447 if (dump_opt['O']) {
6448 if (argc != 2)
6449 usage();
6450 dump_opt['v'] = verbose + 3;
6451 return (dump_path(argv[0], argv[1]));
6452 }
6453
6454 if (dump_opt['X'] || dump_opt['F'])
6455 rewind = ZPOOL_DO_REWIND |
6456 (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
6457
6458 if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
6459 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
6460 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
6461 fatal("internal error: %s", strerror(ENOMEM));
6462
6463 error = 0;
6464 target = argv[0];
6465
6466 if (strpbrk(target, "/@") != NULL) {
6467 size_t targetlen;
6468
6469 target_pool = strdup(target);
6470 *strpbrk(target_pool, "/@") = '\0';
6471
6472 target_is_spa = B_FALSE;
6473 targetlen = strlen(target);
6474 if (targetlen && target[targetlen - 1] == '/')
6475 target[targetlen - 1] = '\0';
6476 } else {
6477 target_pool = target;
6478 }
6479
6480 if (dump_opt['e']) {
6481 importargs_t args = { 0 };
6482
6483 args.paths = nsearch;
6484 args.path = searchdirs;
6485 args.can_be_active = B_TRUE;
6486
6487 error = zpool_find_config(NULL, target_pool, &cfg, &args,
6488 &libzpool_config_ops);
6489
6490 if (error == 0) {
6491
6492 if (nvlist_add_nvlist(cfg,
6493 ZPOOL_LOAD_POLICY, policy) != 0) {
6494 fatal("can't open '%s': %s",
6495 target, strerror(ENOMEM));
6496 }
6497
6498 if (dump_opt['C'] > 1) {
6499 (void) printf("\nConfiguration for import:\n");
6500 dump_nvlist(cfg, 8);
6501 }
6502
6503 /*
6504 * Disable the activity check to allow examination of
6505 * active pools.
6506 */
6507 error = spa_import(target_pool, cfg, NULL,
6508 flags | ZFS_IMPORT_SKIP_MMP);
6509 }
6510 }
6511
6512 char *checkpoint_pool = NULL;
6513 char *checkpoint_target = NULL;
6514 if (dump_opt['k']) {
6515 checkpoint_pool = import_checkpointed_state(target, cfg,
6516 &checkpoint_target);
6517
6518 if (checkpoint_target != NULL)
6519 target = checkpoint_target;
6520
6521 }
6522
6523 if (error == 0) {
6524 if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
6525 ASSERT(checkpoint_pool != NULL);
6526 ASSERT(checkpoint_target == NULL);
6527
6528 error = spa_open(checkpoint_pool, &spa, FTAG);
6529 if (error != 0) {
6530 fatal("Tried to open pool \"%s\" but "
6531 "spa_open() failed with error %d\n",
6532 checkpoint_pool, error);
6533 }
6534
6535 } else if (target_is_spa || dump_opt['R']) {
6536 zdb_set_skip_mmp(target);
6537 error = spa_open_rewind(target, &spa, FTAG, policy,
6538 NULL);
6539 if (error) {
6540 /*
6541 * If we're missing the log device then
6542 * try opening the pool after clearing the
6543 * log state.
6544 */
6545 mutex_enter(&spa_namespace_lock);
6546 if ((spa = spa_lookup(target)) != NULL &&
6547 spa->spa_log_state == SPA_LOG_MISSING) {
6548 spa->spa_log_state = SPA_LOG_CLEAR;
6549 error = 0;
6550 }
6551 mutex_exit(&spa_namespace_lock);
6552
6553 if (!error) {
6554 error = spa_open_rewind(target, &spa,
6555 FTAG, policy, NULL);
6556 }
6557 }
6558 } else {
6559 zdb_set_skip_mmp(target);
6560 error = open_objset(target, DMU_OST_ANY, FTAG, &os);
6561 }
6562 }
6563 nvlist_free(policy);
6564
6565 if (error)
6566 fatal("can't open '%s': %s", target, strerror(error));
6567
6568 argv++;
6569 argc--;
6570 if (!dump_opt['R']) {
6571 if (argc > 0) {
6572 zopt_objects = argc;
6573 zopt_object = calloc(zopt_objects, sizeof (uint64_t));
6574 for (unsigned i = 0; i < zopt_objects; i++) {
6575 errno = 0;
6576 zopt_object[i] = strtoull(argv[i], NULL, 0);
6577 if (zopt_object[i] == 0 && errno != 0)
6578 fatal("bad number %s: %s",
6579 argv[i], strerror(errno));
6580 }
6581 }
6582 if (os != NULL) {
6583 dump_dir(os);
6584 } else if (zopt_objects > 0 && !dump_opt['m']) {
6585 dump_dir(spa->spa_meta_objset);
6586 } else {
6587 dump_zpool(spa);
6588 }
6589 } else {
6590 flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
6591 flagbits['c'] = ZDB_FLAG_CHECKSUM;
6592 flagbits['d'] = ZDB_FLAG_DECOMPRESS;
6593 flagbits['e'] = ZDB_FLAG_BSWAP;
6594 flagbits['g'] = ZDB_FLAG_GBH;
6595 flagbits['i'] = ZDB_FLAG_INDIRECT;
6596 flagbits['p'] = ZDB_FLAG_PHYS;
6597 flagbits['r'] = ZDB_FLAG_RAW;
6598
6599 for (int i = 0; i < argc; i++)
6600 zdb_read_block(argv[i], spa);
6601 }
6602
6603 if (dump_opt['k']) {
6604 free(checkpoint_pool);
6605 if (!target_is_spa)
6606 free(checkpoint_target);
6607 }
6608
6609 if (os != NULL)
6610 close_objset(os, FTAG);
6611 else
6612 spa_close(spa, FTAG);
6613
6614 fuid_table_destroy();
6615
6616 dump_debug_buffer();
6617
6618 kernel_fini();
6619
6620 return (error);
6621 }
6622