1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2021 by Delphix. All rights reserved.
24 * Copyright 2016 Gary Mills
25 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
26 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
27 * Copyright 2019 Joyent, Inc.
28 */
29
30 #include <sys/dsl_scan.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_synctask.h>
36 #include <sys/dnode.h>
37 #include <sys/dmu_tx.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/arc.h>
40 #include <sys/arc_impl.h>
41 #include <sys/zap.h>
42 #include <sys/zio.h>
43 #include <sys/zfs_context.h>
44 #include <sys/fs/zfs.h>
45 #include <sys/zfs_znode.h>
46 #include <sys/spa_impl.h>
47 #include <sys/vdev_impl.h>
48 #include <sys/zil_impl.h>
49 #include <sys/zio_checksum.h>
50 #include <sys/brt.h>
51 #include <sys/ddt.h>
52 #include <sys/sa.h>
53 #include <sys/sa_impl.h>
54 #include <sys/zfeature.h>
55 #include <sys/abd.h>
56 #include <sys/range_tree.h>
57 #include <sys/dbuf.h>
58 #ifdef _KERNEL
59 #include <sys/zfs_vfsops.h>
60 #endif
61
62 /*
63 * Grand theory statement on scan queue sorting
64 *
65 * Scanning is implemented by recursively traversing all indirection levels
66 * in an object and reading all blocks referenced from said objects. This
67 * results in us approximately traversing the object from lowest logical
68 * offset to the highest. For best performance, we would want the logical
69 * blocks to be physically contiguous. However, this is frequently not the
70 * case with pools given the allocation patterns of copy-on-write filesystems.
71 * So instead, we put the I/Os into a reordering queue and issue them in a
72 * way that will most benefit physical disks (LBA-order).
73 *
74 * Queue management:
75 *
76 * Ideally, we would want to scan all metadata and queue up all block I/O
77 * prior to starting to issue it, because that allows us to do an optimal
78 * sorting job. This can however consume large amounts of memory. Therefore
79 * we continuously monitor the size of the queues and constrain them to 5%
80 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
81 * limit, we clear out a few of the largest extents at the head of the queues
82 * to make room for more scanning. Hopefully, these extents will be fairly
83 * large and contiguous, allowing us to approach sequential I/O throughput
84 * even without a fully sorted tree.
85 *
86 * Metadata scanning takes place in dsl_scan_visit(), which is called from
87 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all
88 * metadata on the pool, or we need to make room in memory because our
89 * queues are too large, dsl_scan_visit() is postponed and
90 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
91 * that metadata scanning and queued I/O issuing are mutually exclusive. This
92 * allows us to provide maximum sequential I/O throughput for the majority of
93 * I/O's issued since sequential I/O performance is significantly negatively
94 * impacted if it is interleaved with random I/O.
95 *
96 * Implementation Notes
97 *
98 * One side effect of the queued scanning algorithm is that the scanning code
99 * needs to be notified whenever a block is freed. This is needed to allow
100 * the scanning code to remove these I/Os from the issuing queue. Additionally,
101 * we do not attempt to queue gang blocks to be issued sequentially since this
102 * is very hard to do and would have an extremely limited performance benefit.
103 * Instead, we simply issue gang I/Os as soon as we find them using the legacy
104 * algorithm.
105 *
106 * Backwards compatibility
107 *
108 * This new algorithm is backwards compatible with the legacy on-disk data
109 * structures (and therefore does not require a new feature flag).
110 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
111 * will stop scanning metadata (in logical order) and wait for all outstanding
112 * sorted I/O to complete. Once this is done, we write out a checkpoint
113 * bookmark, indicating that we have scanned everything logically before it.
114 * If the pool is imported on a machine without the new sorting algorithm,
115 * the scan simply resumes from the last checkpoint using the legacy algorithm.
116 */
117
118 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
119 const zbookmark_phys_t *);
120
121 static scan_cb_t dsl_scan_scrub_cb;
122
123 static int scan_ds_queue_compare(const void *a, const void *b);
124 static int scan_prefetch_queue_compare(const void *a, const void *b);
125 static void scan_ds_queue_clear(dsl_scan_t *scn);
126 static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn);
127 static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
128 uint64_t *txg);
129 static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
130 static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
131 static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
132 static uint64_t dsl_scan_count_data_disks(spa_t *spa);
133 static void read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb);
134
135 extern uint_t zfs_vdev_async_write_active_min_dirty_percent;
136 static int zfs_scan_blkstats = 0;
137
138 /*
139 * 'zpool status' uses bytes processed per pass to report throughput and
140 * estimate time remaining. We define a pass to start when the scanning
141 * phase completes for a sequential resilver. Optionally, this value
142 * may be used to reset the pass statistics every N txgs to provide an
143 * estimated completion time based on currently observed performance.
144 */
145 static uint_t zfs_scan_report_txgs = 0;
146
147 /*
148 * By default zfs will check to ensure it is not over the hard memory
149 * limit before each txg. If finer-grained control of this is needed
150 * this value can be set to 1 to enable checking before scanning each
151 * block.
152 */
153 static int zfs_scan_strict_mem_lim = B_FALSE;
154
155 /*
156 * Maximum number of parallelly executed bytes per leaf vdev. We attempt
157 * to strike a balance here between keeping the vdev queues full of I/Os
158 * at all times and not overflowing the queues to cause long latency,
159 * which would cause long txg sync times. No matter what, we will not
160 * overload the drives with I/O, since that is protected by
161 * zfs_vdev_scrub_max_active.
162 */
163 static uint64_t zfs_scan_vdev_limit = 16 << 20;
164
165 static uint_t zfs_scan_issue_strategy = 0;
166
167 /* don't queue & sort zios, go direct */
168 static int zfs_scan_legacy = B_FALSE;
169 static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
170
171 /*
172 * fill_weight is non-tunable at runtime, so we copy it at module init from
173 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
174 * break queue sorting.
175 */
176 static uint_t zfs_scan_fill_weight = 3;
177 static uint64_t fill_weight;
178
179 /* See dsl_scan_should_clear() for details on the memory limit tunables */
180 static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
181 static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
182
183
184 /* fraction of physmem */
185 static uint_t zfs_scan_mem_lim_fact = 20;
186
187 /* fraction of mem lim above */
188 static uint_t zfs_scan_mem_lim_soft_fact = 20;
189
190 /* minimum milliseconds to scrub per txg */
191 static uint_t zfs_scrub_min_time_ms = 1000;
192
193 /* minimum milliseconds to obsolete per txg */
194 static uint_t zfs_obsolete_min_time_ms = 500;
195
196 /* minimum milliseconds to free per txg */
197 static uint_t zfs_free_min_time_ms = 1000;
198
199 /* minimum milliseconds to resilver per txg */
200 static uint_t zfs_resilver_min_time_ms = 3000;
201
202 static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */
203 int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
204 static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
205 static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
206 static const ddt_class_t zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
207 /* max number of blocks to free in a single TXG */
208 static uint64_t zfs_async_block_max_blocks = UINT64_MAX;
209 /* max number of dedup blocks to free in a single TXG */
210 static uint64_t zfs_max_async_dedup_frees = 100000;
211
212 /* set to disable resilver deferring */
213 static int zfs_resilver_disable_defer = B_FALSE;
214
215 /*
216 * We wait a few txgs after importing a pool to begin scanning so that
217 * the import / mounting code isn't held up by scrub / resilver IO.
218 * Unfortunately, it is a bit difficult to determine exactly how long
219 * this will take since userspace will trigger fs mounts asynchronously
220 * and the kernel will create zvol minors asynchronously. As a result,
221 * the value provided here is a bit arbitrary, but represents a
222 * reasonable estimate of how many txgs it will take to finish fully
223 * importing a pool
224 */
225 #define SCAN_IMPORT_WAIT_TXGS 5
226
227 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
228 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
229 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
230
231 /*
232 * Enable/disable the processing of the free_bpobj object.
233 */
234 static int zfs_free_bpobj_enabled = 1;
235
236 /* Error blocks to be scrubbed in one txg. */
237 static uint_t zfs_scrub_error_blocks_per_txg = 1 << 12;
238
239 /* the order has to match pool_scan_type */
240 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
241 NULL,
242 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
243 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
244 };
245
246 /* In core node for the scn->scn_queue. Represents a dataset to be scanned */
247 typedef struct {
248 uint64_t sds_dsobj;
249 uint64_t sds_txg;
250 avl_node_t sds_node;
251 } scan_ds_t;
252
253 /*
254 * This controls what conditions are placed on dsl_scan_sync_state():
255 * SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0
256 * SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0.
257 * SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise
258 * write out the scn_phys_cached version.
259 * See dsl_scan_sync_state for details.
260 */
261 typedef enum {
262 SYNC_OPTIONAL,
263 SYNC_MANDATORY,
264 SYNC_CACHED
265 } state_sync_type_t;
266
267 /*
268 * This struct represents the minimum information needed to reconstruct a
269 * zio for sequential scanning. This is useful because many of these will
270 * accumulate in the sequential IO queues before being issued, so saving
271 * memory matters here.
272 */
273 typedef struct scan_io {
274 /* fields from blkptr_t */
275 uint64_t sio_blk_prop;
276 uint64_t sio_phys_birth;
277 uint64_t sio_birth;
278 zio_cksum_t sio_cksum;
279 uint32_t sio_nr_dvas;
280
281 /* fields from zio_t */
282 uint32_t sio_flags;
283 zbookmark_phys_t sio_zb;
284
285 /* members for queue sorting */
286 union {
287 avl_node_t sio_addr_node; /* link into issuing queue */
288 list_node_t sio_list_node; /* link for issuing to disk */
289 } sio_nodes;
290
291 /*
292 * There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
293 * depending on how many were in the original bp. Only the
294 * first DVA is really used for sorting and issuing purposes.
295 * The other DVAs (if provided) simply exist so that the zio
296 * layer can find additional copies to repair from in the
297 * event of an error. This array must go at the end of the
298 * struct to allow this for the variable number of elements.
299 */
300 dva_t sio_dva[];
301 } scan_io_t;
302
303 #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
304 #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
305 #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0])
306 #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0])
307 #define SIO_GET_END_OFFSET(sio) \
308 (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
309 #define SIO_GET_MUSED(sio) \
310 (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
311
312 struct dsl_scan_io_queue {
313 dsl_scan_t *q_scn; /* associated dsl_scan_t */
314 vdev_t *q_vd; /* top-level vdev that this queue represents */
315 zio_t *q_zio; /* scn_zio_root child for waiting on IO */
316
317 /* trees used for sorting I/Os and extents of I/Os */
318 range_tree_t *q_exts_by_addr;
319 zfs_btree_t q_exts_by_size;
320 avl_tree_t q_sios_by_addr;
321 uint64_t q_sio_memused;
322 uint64_t q_last_ext_addr;
323
324 /* members for zio rate limiting */
325 uint64_t q_maxinflight_bytes;
326 uint64_t q_inflight_bytes;
327 kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
328
329 /* per txg statistics */
330 uint64_t q_total_seg_size_this_txg;
331 uint64_t q_segs_this_txg;
332 uint64_t q_total_zio_size_this_txg;
333 uint64_t q_zios_this_txg;
334 };
335
336 /* private data for dsl_scan_prefetch_cb() */
337 typedef struct scan_prefetch_ctx {
338 zfs_refcount_t spc_refcnt; /* refcount for memory management */
339 dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */
340 boolean_t spc_root; /* is this prefetch for an objset? */
341 uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */
342 uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */
343 } scan_prefetch_ctx_t;
344
345 /* private data for dsl_scan_prefetch() */
346 typedef struct scan_prefetch_issue_ctx {
347 avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */
348 scan_prefetch_ctx_t *spic_spc; /* spc for the callback */
349 blkptr_t spic_bp; /* bp to prefetch */
350 zbookmark_phys_t spic_zb; /* bookmark to prefetch */
351 } scan_prefetch_issue_ctx_t;
352
353 static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
354 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
355 static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
356 scan_io_t *sio);
357
358 static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
359 static void scan_io_queues_destroy(dsl_scan_t *scn);
360
361 static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP];
362
363 /* sio->sio_nr_dvas must be set so we know which cache to free from */
364 static void
sio_free(scan_io_t * sio)365 sio_free(scan_io_t *sio)
366 {
367 ASSERT3U(sio->sio_nr_dvas, >, 0);
368 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
369
370 kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio);
371 }
372
373 /* It is up to the caller to set sio->sio_nr_dvas for freeing */
374 static scan_io_t *
sio_alloc(unsigned short nr_dvas)375 sio_alloc(unsigned short nr_dvas)
376 {
377 ASSERT3U(nr_dvas, >, 0);
378 ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP);
379
380 return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP));
381 }
382
383 void
scan_init(void)384 scan_init(void)
385 {
386 /*
387 * This is used in ext_size_compare() to weight segments
388 * based on how sparse they are. This cannot be changed
389 * mid-scan and the tree comparison functions don't currently
390 * have a mechanism for passing additional context to the
391 * compare functions. Thus we store this value globally and
392 * we only allow it to be set at module initialization time
393 */
394 fill_weight = zfs_scan_fill_weight;
395
396 for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
397 char name[36];
398
399 (void) snprintf(name, sizeof (name), "sio_cache_%d", i);
400 sio_cache[i] = kmem_cache_create(name,
401 (sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
402 0, NULL, NULL, NULL, NULL, NULL, 0);
403 }
404 }
405
406 void
scan_fini(void)407 scan_fini(void)
408 {
409 for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
410 kmem_cache_destroy(sio_cache[i]);
411 }
412 }
413
414 static inline boolean_t
dsl_scan_is_running(const dsl_scan_t * scn)415 dsl_scan_is_running(const dsl_scan_t *scn)
416 {
417 return (scn->scn_phys.scn_state == DSS_SCANNING);
418 }
419
420 boolean_t
dsl_scan_resilvering(dsl_pool_t * dp)421 dsl_scan_resilvering(dsl_pool_t *dp)
422 {
423 return (dsl_scan_is_running(dp->dp_scan) &&
424 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
425 }
426
427 static inline void
sio2bp(const scan_io_t * sio,blkptr_t * bp)428 sio2bp(const scan_io_t *sio, blkptr_t *bp)
429 {
430 memset(bp, 0, sizeof (*bp));
431 bp->blk_prop = sio->sio_blk_prop;
432 BP_SET_PHYSICAL_BIRTH(bp, sio->sio_phys_birth);
433 BP_SET_LOGICAL_BIRTH(bp, sio->sio_birth);
434 bp->blk_fill = 1; /* we always only work with data pointers */
435 bp->blk_cksum = sio->sio_cksum;
436
437 ASSERT3U(sio->sio_nr_dvas, >, 0);
438 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
439
440 memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t));
441 }
442
443 static inline void
bp2sio(const blkptr_t * bp,scan_io_t * sio,int dva_i)444 bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
445 {
446 sio->sio_blk_prop = bp->blk_prop;
447 sio->sio_phys_birth = BP_GET_PHYSICAL_BIRTH(bp);
448 sio->sio_birth = BP_GET_LOGICAL_BIRTH(bp);
449 sio->sio_cksum = bp->blk_cksum;
450 sio->sio_nr_dvas = BP_GET_NDVAS(bp);
451
452 /*
453 * Copy the DVAs to the sio. We need all copies of the block so
454 * that the self healing code can use the alternate copies if the
455 * first is corrupted. We want the DVA at index dva_i to be first
456 * in the sio since this is the primary one that we want to issue.
457 */
458 for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) {
459 sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas];
460 }
461 }
462
463 int
dsl_scan_init(dsl_pool_t * dp,uint64_t txg)464 dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
465 {
466 int err;
467 dsl_scan_t *scn;
468 spa_t *spa = dp->dp_spa;
469 uint64_t f;
470
471 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
472 scn->scn_dp = dp;
473
474 /*
475 * It's possible that we're resuming a scan after a reboot so
476 * make sure that the scan_async_destroying flag is initialized
477 * appropriately.
478 */
479 ASSERT(!scn->scn_async_destroying);
480 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
481 SPA_FEATURE_ASYNC_DESTROY);
482
483 /*
484 * Calculate the max number of in-flight bytes for pool-wide
485 * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
486 * Limits for the issuing phase are done per top-level vdev and
487 * are handled separately.
488 */
489 scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
490 zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
491
492 avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
493 offsetof(scan_ds_t, sds_node));
494 mutex_init(&scn->scn_queue_lock, NULL, MUTEX_DEFAULT, NULL);
495 avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
496 sizeof (scan_prefetch_issue_ctx_t),
497 offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
498
499 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
500 "scrub_func", sizeof (uint64_t), 1, &f);
501 if (err == 0) {
502 /*
503 * There was an old-style scrub in progress. Restart a
504 * new-style scrub from the beginning.
505 */
506 scn->scn_restart_txg = txg;
507 zfs_dbgmsg("old-style scrub was in progress for %s; "
508 "restarting new-style scrub in txg %llu",
509 spa->spa_name,
510 (longlong_t)scn->scn_restart_txg);
511
512 /*
513 * Load the queue obj from the old location so that it
514 * can be freed by dsl_scan_done().
515 */
516 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
517 "scrub_queue", sizeof (uint64_t), 1,
518 &scn->scn_phys.scn_queue_obj);
519 } else {
520 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
521 DMU_POOL_ERRORSCRUB, sizeof (uint64_t),
522 ERRORSCRUB_PHYS_NUMINTS, &scn->errorscrub_phys);
523
524 if (err != 0 && err != ENOENT)
525 return (err);
526
527 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
528 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
529 &scn->scn_phys);
530
531 /*
532 * Detect if the pool contains the signature of #2094. If it
533 * does properly update the scn->scn_phys structure and notify
534 * the administrator by setting an errata for the pool.
535 */
536 if (err == EOVERFLOW) {
537 uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
538 VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
539 VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
540 (23 * sizeof (uint64_t)));
541
542 err = zap_lookup(dp->dp_meta_objset,
543 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
544 sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
545 if (err == 0) {
546 uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
547
548 if (overflow & ~DSL_SCAN_FLAGS_MASK ||
549 scn->scn_async_destroying) {
550 spa->spa_errata =
551 ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
552 return (EOVERFLOW);
553 }
554
555 memcpy(&scn->scn_phys, zaptmp,
556 SCAN_PHYS_NUMINTS * sizeof (uint64_t));
557 scn->scn_phys.scn_flags = overflow;
558
559 /* Required scrub already in progress. */
560 if (scn->scn_phys.scn_state == DSS_FINISHED ||
561 scn->scn_phys.scn_state == DSS_CANCELED)
562 spa->spa_errata =
563 ZPOOL_ERRATA_ZOL_2094_SCRUB;
564 }
565 }
566
567 if (err == ENOENT)
568 return (0);
569 else if (err)
570 return (err);
571
572 /*
573 * We might be restarting after a reboot, so jump the issued
574 * counter to how far we've scanned. We know we're consistent
575 * up to here.
576 */
577 scn->scn_issued_before_pass = scn->scn_phys.scn_examined -
578 scn->scn_phys.scn_skipped;
579
580 if (dsl_scan_is_running(scn) &&
581 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
582 /*
583 * A new-type scrub was in progress on an old
584 * pool, and the pool was accessed by old
585 * software. Restart from the beginning, since
586 * the old software may have changed the pool in
587 * the meantime.
588 */
589 scn->scn_restart_txg = txg;
590 zfs_dbgmsg("new-style scrub for %s was modified "
591 "by old software; restarting in txg %llu",
592 spa->spa_name,
593 (longlong_t)scn->scn_restart_txg);
594 } else if (dsl_scan_resilvering(dp)) {
595 /*
596 * If a resilver is in progress and there are already
597 * errors, restart it instead of finishing this scan and
598 * then restarting it. If there haven't been any errors
599 * then remember that the incore DTL is valid.
600 */
601 if (scn->scn_phys.scn_errors > 0) {
602 scn->scn_restart_txg = txg;
603 zfs_dbgmsg("resilver can't excise DTL_MISSING "
604 "when finished; restarting on %s in txg "
605 "%llu",
606 spa->spa_name,
607 (u_longlong_t)scn->scn_restart_txg);
608 } else {
609 /* it's safe to excise DTL when finished */
610 spa->spa_scrub_started = B_TRUE;
611 }
612 }
613 }
614
615 memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
616
617 /* reload the queue into the in-core state */
618 if (scn->scn_phys.scn_queue_obj != 0) {
619 zap_cursor_t zc;
620 zap_attribute_t za;
621
622 for (zap_cursor_init(&zc, dp->dp_meta_objset,
623 scn->scn_phys.scn_queue_obj);
624 zap_cursor_retrieve(&zc, &za) == 0;
625 (void) zap_cursor_advance(&zc)) {
626 scan_ds_queue_insert(scn,
627 zfs_strtonum(za.za_name, NULL),
628 za.za_first_integer);
629 }
630 zap_cursor_fini(&zc);
631 }
632
633 ddt_walk_init(spa, scn->scn_phys.scn_max_txg);
634
635 spa_scan_stat_init(spa);
636 vdev_scan_stat_init(spa->spa_root_vdev);
637
638 return (0);
639 }
640
641 void
dsl_scan_fini(dsl_pool_t * dp)642 dsl_scan_fini(dsl_pool_t *dp)
643 {
644 if (dp->dp_scan != NULL) {
645 dsl_scan_t *scn = dp->dp_scan;
646
647 if (scn->scn_taskq != NULL)
648 taskq_destroy(scn->scn_taskq);
649
650 scan_ds_queue_clear(scn);
651 avl_destroy(&scn->scn_queue);
652 mutex_destroy(&scn->scn_queue_lock);
653 scan_ds_prefetch_queue_clear(scn);
654 avl_destroy(&scn->scn_prefetch_queue);
655
656 kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
657 dp->dp_scan = NULL;
658 }
659 }
660
661 static boolean_t
dsl_scan_restarting(dsl_scan_t * scn,dmu_tx_t * tx)662 dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
663 {
664 return (scn->scn_restart_txg != 0 &&
665 scn->scn_restart_txg <= tx->tx_txg);
666 }
667
668 boolean_t
dsl_scan_resilver_scheduled(dsl_pool_t * dp)669 dsl_scan_resilver_scheduled(dsl_pool_t *dp)
670 {
671 return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) ||
672 (spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER));
673 }
674
675 boolean_t
dsl_scan_scrubbing(const dsl_pool_t * dp)676 dsl_scan_scrubbing(const dsl_pool_t *dp)
677 {
678 dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
679
680 return (scn_phys->scn_state == DSS_SCANNING &&
681 scn_phys->scn_func == POOL_SCAN_SCRUB);
682 }
683
684 boolean_t
dsl_errorscrubbing(const dsl_pool_t * dp)685 dsl_errorscrubbing(const dsl_pool_t *dp)
686 {
687 dsl_errorscrub_phys_t *errorscrub_phys = &dp->dp_scan->errorscrub_phys;
688
689 return (errorscrub_phys->dep_state == DSS_ERRORSCRUBBING &&
690 errorscrub_phys->dep_func == POOL_SCAN_ERRORSCRUB);
691 }
692
693 boolean_t
dsl_errorscrub_is_paused(const dsl_scan_t * scn)694 dsl_errorscrub_is_paused(const dsl_scan_t *scn)
695 {
696 return (dsl_errorscrubbing(scn->scn_dp) &&
697 scn->errorscrub_phys.dep_paused_flags);
698 }
699
700 boolean_t
dsl_scan_is_paused_scrub(const dsl_scan_t * scn)701 dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
702 {
703 return (dsl_scan_scrubbing(scn->scn_dp) &&
704 scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
705 }
706
707 static void
dsl_errorscrub_sync_state(dsl_scan_t * scn,dmu_tx_t * tx)708 dsl_errorscrub_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
709 {
710 scn->errorscrub_phys.dep_cursor =
711 zap_cursor_serialize(&scn->errorscrub_cursor);
712
713 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
714 DMU_POOL_DIRECTORY_OBJECT,
715 DMU_POOL_ERRORSCRUB, sizeof (uint64_t), ERRORSCRUB_PHYS_NUMINTS,
716 &scn->errorscrub_phys, tx));
717 }
718
719 static void
dsl_errorscrub_setup_sync(void * arg,dmu_tx_t * tx)720 dsl_errorscrub_setup_sync(void *arg, dmu_tx_t *tx)
721 {
722 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
723 pool_scan_func_t *funcp = arg;
724 dsl_pool_t *dp = scn->scn_dp;
725 spa_t *spa = dp->dp_spa;
726
727 ASSERT(!dsl_scan_is_running(scn));
728 ASSERT(!dsl_errorscrubbing(scn->scn_dp));
729 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
730
731 memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
732 scn->errorscrub_phys.dep_func = *funcp;
733 scn->errorscrub_phys.dep_state = DSS_ERRORSCRUBBING;
734 scn->errorscrub_phys.dep_start_time = gethrestime_sec();
735 scn->errorscrub_phys.dep_to_examine = spa_get_last_errlog_size(spa);
736 scn->errorscrub_phys.dep_examined = 0;
737 scn->errorscrub_phys.dep_errors = 0;
738 scn->errorscrub_phys.dep_cursor = 0;
739 zap_cursor_init_serialized(&scn->errorscrub_cursor,
740 spa->spa_meta_objset, spa->spa_errlog_last,
741 scn->errorscrub_phys.dep_cursor);
742
743 vdev_config_dirty(spa->spa_root_vdev);
744 spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_START);
745
746 dsl_errorscrub_sync_state(scn, tx);
747
748 spa_history_log_internal(spa, "error scrub setup", tx,
749 "func=%u mintxg=%u maxtxg=%llu",
750 *funcp, 0, (u_longlong_t)tx->tx_txg);
751 }
752
753 static int
dsl_errorscrub_setup_check(void * arg,dmu_tx_t * tx)754 dsl_errorscrub_setup_check(void *arg, dmu_tx_t *tx)
755 {
756 (void) arg;
757 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
758
759 if (dsl_scan_is_running(scn) || (dsl_errorscrubbing(scn->scn_dp))) {
760 return (SET_ERROR(EBUSY));
761 }
762
763 if (spa_get_last_errlog_size(scn->scn_dp->dp_spa) == 0) {
764 return (ECANCELED);
765 }
766 return (0);
767 }
768
769 /*
770 * Writes out a persistent dsl_scan_phys_t record to the pool directory.
771 * Because we can be running in the block sorting algorithm, we do not always
772 * want to write out the record, only when it is "safe" to do so. This safety
773 * condition is achieved by making sure that the sorting queues are empty
774 * (scn_queues_pending == 0). When this condition is not true, the sync'd state
775 * is inconsistent with how much actual scanning progress has been made. The
776 * kind of sync to be performed is specified by the sync_type argument. If the
777 * sync is optional, we only sync if the queues are empty. If the sync is
778 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The
779 * third possible state is a "cached" sync. This is done in response to:
780 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
781 * destroyed, so we wouldn't be able to restart scanning from it.
782 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
783 * superseded by a newer snapshot.
784 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
785 * swapped with its clone.
786 * In all cases, a cached sync simply rewrites the last record we've written,
787 * just slightly modified. For the modifications that are performed to the
788 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
789 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
790 */
791 static void
dsl_scan_sync_state(dsl_scan_t * scn,dmu_tx_t * tx,state_sync_type_t sync_type)792 dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
793 {
794 int i;
795 spa_t *spa = scn->scn_dp->dp_spa;
796
797 ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0);
798 if (scn->scn_queues_pending == 0) {
799 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
800 vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
801 dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
802
803 if (q == NULL)
804 continue;
805
806 mutex_enter(&vd->vdev_scan_io_queue_lock);
807 ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
808 ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==,
809 NULL);
810 ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
811 mutex_exit(&vd->vdev_scan_io_queue_lock);
812 }
813
814 if (scn->scn_phys.scn_queue_obj != 0)
815 scan_ds_queue_sync(scn, tx);
816 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
817 DMU_POOL_DIRECTORY_OBJECT,
818 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
819 &scn->scn_phys, tx));
820 memcpy(&scn->scn_phys_cached, &scn->scn_phys,
821 sizeof (scn->scn_phys));
822
823 if (scn->scn_checkpointing)
824 zfs_dbgmsg("finish scan checkpoint for %s",
825 spa->spa_name);
826
827 scn->scn_checkpointing = B_FALSE;
828 scn->scn_last_checkpoint = ddi_get_lbolt();
829 } else if (sync_type == SYNC_CACHED) {
830 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
831 DMU_POOL_DIRECTORY_OBJECT,
832 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
833 &scn->scn_phys_cached, tx));
834 }
835 }
836
837 int
dsl_scan_setup_check(void * arg,dmu_tx_t * tx)838 dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
839 {
840 (void) arg;
841 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
842 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
843
844 if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd) ||
845 dsl_errorscrubbing(scn->scn_dp))
846 return (SET_ERROR(EBUSY));
847
848 return (0);
849 }
850
851 void
dsl_scan_setup_sync(void * arg,dmu_tx_t * tx)852 dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
853 {
854 (void) arg;
855 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
856 pool_scan_func_t *funcp = arg;
857 dmu_object_type_t ot = 0;
858 dsl_pool_t *dp = scn->scn_dp;
859 spa_t *spa = dp->dp_spa;
860
861 ASSERT(!dsl_scan_is_running(scn));
862 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
863 memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
864
865 /*
866 * If we are starting a fresh scrub, we erase the error scrub
867 * information from disk.
868 */
869 memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
870 dsl_errorscrub_sync_state(scn, tx);
871
872 scn->scn_phys.scn_func = *funcp;
873 scn->scn_phys.scn_state = DSS_SCANNING;
874 scn->scn_phys.scn_min_txg = 0;
875 scn->scn_phys.scn_max_txg = tx->tx_txg;
876 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
877 scn->scn_phys.scn_start_time = gethrestime_sec();
878 scn->scn_phys.scn_errors = 0;
879 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
880 scn->scn_issued_before_pass = 0;
881 scn->scn_restart_txg = 0;
882 scn->scn_done_txg = 0;
883 scn->scn_last_checkpoint = 0;
884 scn->scn_checkpointing = B_FALSE;
885 spa_scan_stat_init(spa);
886 vdev_scan_stat_init(spa->spa_root_vdev);
887
888 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
889 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
890
891 /* rewrite all disk labels */
892 vdev_config_dirty(spa->spa_root_vdev);
893
894 if (vdev_resilver_needed(spa->spa_root_vdev,
895 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
896 nvlist_t *aux = fnvlist_alloc();
897 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
898 "healing");
899 spa_event_notify(spa, NULL, aux,
900 ESC_ZFS_RESILVER_START);
901 nvlist_free(aux);
902 } else {
903 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
904 }
905
906 spa->spa_scrub_started = B_TRUE;
907 /*
908 * If this is an incremental scrub, limit the DDT scrub phase
909 * to just the auto-ditto class (for correctness); the rest
910 * of the scrub should go faster using top-down pruning.
911 */
912 if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
913 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
914
915 /*
916 * When starting a resilver clear any existing rebuild state.
917 * This is required to prevent stale rebuild status from
918 * being reported when a rebuild is run, then a resilver and
919 * finally a scrub. In which case only the scrub status
920 * should be reported by 'zpool status'.
921 */
922 if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) {
923 vdev_t *rvd = spa->spa_root_vdev;
924 for (uint64_t i = 0; i < rvd->vdev_children; i++) {
925 vdev_t *vd = rvd->vdev_child[i];
926 vdev_rebuild_clear_sync(
927 (void *)(uintptr_t)vd->vdev_id, tx);
928 }
929 }
930 }
931
932 /* back to the generic stuff */
933
934 if (zfs_scan_blkstats) {
935 if (dp->dp_blkstats == NULL) {
936 dp->dp_blkstats =
937 vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
938 }
939 memset(&dp->dp_blkstats->zab_type, 0,
940 sizeof (dp->dp_blkstats->zab_type));
941 } else {
942 if (dp->dp_blkstats) {
943 vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
944 dp->dp_blkstats = NULL;
945 }
946 }
947
948 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
949 ot = DMU_OT_ZAP_OTHER;
950
951 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
952 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
953
954 memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
955
956 ddt_walk_init(spa, scn->scn_phys.scn_max_txg);
957
958 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
959
960 spa_history_log_internal(spa, "scan setup", tx,
961 "func=%u mintxg=%llu maxtxg=%llu",
962 *funcp, (u_longlong_t)scn->scn_phys.scn_min_txg,
963 (u_longlong_t)scn->scn_phys.scn_max_txg);
964 }
965
966 /*
967 * Called by ZFS_IOC_POOL_SCRUB and ZFS_IOC_POOL_SCAN ioctl to start a scrub,
968 * error scrub or resilver. Can also be called to resume a paused scrub or
969 * error scrub.
970 */
971 int
dsl_scan(dsl_pool_t * dp,pool_scan_func_t func)972 dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
973 {
974 spa_t *spa = dp->dp_spa;
975 dsl_scan_t *scn = dp->dp_scan;
976
977 /*
978 * Purge all vdev caches and probe all devices. We do this here
979 * rather than in sync context because this requires a writer lock
980 * on the spa_config lock, which we can't do from sync context. The
981 * spa_scrub_reopen flag indicates that vdev_open() should not
982 * attempt to start another scrub.
983 */
984 spa_vdev_state_enter(spa, SCL_NONE);
985 spa->spa_scrub_reopen = B_TRUE;
986 vdev_reopen(spa->spa_root_vdev);
987 spa->spa_scrub_reopen = B_FALSE;
988 (void) spa_vdev_state_exit(spa, NULL, 0);
989
990 if (func == POOL_SCAN_RESILVER) {
991 dsl_scan_restart_resilver(spa->spa_dsl_pool, 0);
992 return (0);
993 }
994
995 if (func == POOL_SCAN_ERRORSCRUB) {
996 if (dsl_errorscrub_is_paused(dp->dp_scan)) {
997 /*
998 * got error scrub start cmd, resume paused error scrub.
999 */
1000 int err = dsl_scrub_set_pause_resume(scn->scn_dp,
1001 POOL_SCRUB_NORMAL);
1002 if (err == 0) {
1003 spa_event_notify(spa, NULL, NULL,
1004 ESC_ZFS_ERRORSCRUB_RESUME);
1005 return (ECANCELED);
1006 }
1007 return (SET_ERROR(err));
1008 }
1009
1010 return (dsl_sync_task(spa_name(dp->dp_spa),
1011 dsl_errorscrub_setup_check, dsl_errorscrub_setup_sync,
1012 &func, 0, ZFS_SPACE_CHECK_RESERVED));
1013 }
1014
1015 if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
1016 /* got scrub start cmd, resume paused scrub */
1017 int err = dsl_scrub_set_pause_resume(scn->scn_dp,
1018 POOL_SCRUB_NORMAL);
1019 if (err == 0) {
1020 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
1021 return (SET_ERROR(ECANCELED));
1022 }
1023 return (SET_ERROR(err));
1024 }
1025
1026 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
1027 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
1028 }
1029
1030 static void
dsl_errorscrub_done(dsl_scan_t * scn,boolean_t complete,dmu_tx_t * tx)1031 dsl_errorscrub_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
1032 {
1033 dsl_pool_t *dp = scn->scn_dp;
1034 spa_t *spa = dp->dp_spa;
1035
1036 if (complete) {
1037 spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_FINISH);
1038 spa_history_log_internal(spa, "error scrub done", tx,
1039 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
1040 } else {
1041 spa_history_log_internal(spa, "error scrub canceled", tx,
1042 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
1043 }
1044
1045 scn->errorscrub_phys.dep_state = complete ? DSS_FINISHED : DSS_CANCELED;
1046 spa->spa_scrub_active = B_FALSE;
1047 spa_errlog_rotate(spa);
1048 scn->errorscrub_phys.dep_end_time = gethrestime_sec();
1049 zap_cursor_fini(&scn->errorscrub_cursor);
1050
1051 if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
1052 spa->spa_errata = 0;
1053
1054 ASSERT(!dsl_errorscrubbing(scn->scn_dp));
1055 }
1056
1057 static void
dsl_scan_done(dsl_scan_t * scn,boolean_t complete,dmu_tx_t * tx)1058 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
1059 {
1060 static const char *old_names[] = {
1061 "scrub_bookmark",
1062 "scrub_ddt_bookmark",
1063 "scrub_ddt_class_max",
1064 "scrub_queue",
1065 "scrub_min_txg",
1066 "scrub_max_txg",
1067 "scrub_func",
1068 "scrub_errors",
1069 NULL
1070 };
1071
1072 dsl_pool_t *dp = scn->scn_dp;
1073 spa_t *spa = dp->dp_spa;
1074 int i;
1075
1076 /* Remove any remnants of an old-style scrub. */
1077 for (i = 0; old_names[i]; i++) {
1078 (void) zap_remove(dp->dp_meta_objset,
1079 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
1080 }
1081
1082 if (scn->scn_phys.scn_queue_obj != 0) {
1083 VERIFY0(dmu_object_free(dp->dp_meta_objset,
1084 scn->scn_phys.scn_queue_obj, tx));
1085 scn->scn_phys.scn_queue_obj = 0;
1086 }
1087 scan_ds_queue_clear(scn);
1088 scan_ds_prefetch_queue_clear(scn);
1089
1090 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
1091
1092 /*
1093 * If we were "restarted" from a stopped state, don't bother
1094 * with anything else.
1095 */
1096 if (!dsl_scan_is_running(scn)) {
1097 ASSERT(!scn->scn_is_sorted);
1098 return;
1099 }
1100
1101 if (scn->scn_is_sorted) {
1102 scan_io_queues_destroy(scn);
1103 scn->scn_is_sorted = B_FALSE;
1104
1105 if (scn->scn_taskq != NULL) {
1106 taskq_destroy(scn->scn_taskq);
1107 scn->scn_taskq = NULL;
1108 }
1109 }
1110
1111 scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
1112
1113 spa_notify_waiters(spa);
1114
1115 if (dsl_scan_restarting(scn, tx))
1116 spa_history_log_internal(spa, "scan aborted, restarting", tx,
1117 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
1118 else if (!complete)
1119 spa_history_log_internal(spa, "scan cancelled", tx,
1120 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
1121 else
1122 spa_history_log_internal(spa, "scan done", tx,
1123 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
1124
1125 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
1126 spa->spa_scrub_active = B_FALSE;
1127
1128 /*
1129 * If the scrub/resilver completed, update all DTLs to
1130 * reflect this. Whether it succeeded or not, vacate
1131 * all temporary scrub DTLs.
1132 *
1133 * As the scrub does not currently support traversing
1134 * data that have been freed but are part of a checkpoint,
1135 * we don't mark the scrub as done in the DTLs as faults
1136 * may still exist in those vdevs.
1137 */
1138 if (complete &&
1139 !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
1140 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
1141 scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE);
1142
1143 if (scn->scn_phys.scn_min_txg) {
1144 nvlist_t *aux = fnvlist_alloc();
1145 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
1146 "healing");
1147 spa_event_notify(spa, NULL, aux,
1148 ESC_ZFS_RESILVER_FINISH);
1149 nvlist_free(aux);
1150 } else {
1151 spa_event_notify(spa, NULL, NULL,
1152 ESC_ZFS_SCRUB_FINISH);
1153 }
1154 } else {
1155 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
1156 0, B_TRUE, B_FALSE);
1157 }
1158 spa_errlog_rotate(spa);
1159
1160 /*
1161 * Don't clear flag until after vdev_dtl_reassess to ensure that
1162 * DTL_MISSING will get updated when possible.
1163 */
1164 spa->spa_scrub_started = B_FALSE;
1165
1166 /*
1167 * We may have finished replacing a device.
1168 * Let the async thread assess this and handle the detach.
1169 */
1170 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
1171
1172 /*
1173 * Clear any resilver_deferred flags in the config.
1174 * If there are drives that need resilvering, kick
1175 * off an asynchronous request to start resilver.
1176 * vdev_clear_resilver_deferred() may update the config
1177 * before the resilver can restart. In the event of
1178 * a crash during this period, the spa loading code
1179 * will find the drives that need to be resilvered
1180 * and start the resilver then.
1181 */
1182 if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) &&
1183 vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) {
1184 spa_history_log_internal(spa,
1185 "starting deferred resilver", tx, "errors=%llu",
1186 (u_longlong_t)spa_approx_errlog_size(spa));
1187 spa_async_request(spa, SPA_ASYNC_RESILVER);
1188 }
1189
1190 /* Clear recent error events (i.e. duplicate events tracking) */
1191 if (complete)
1192 zfs_ereport_clear(spa, NULL);
1193 }
1194
1195 scn->scn_phys.scn_end_time = gethrestime_sec();
1196
1197 if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
1198 spa->spa_errata = 0;
1199
1200 ASSERT(!dsl_scan_is_running(scn));
1201 }
1202
1203 static int
dsl_errorscrub_pause_resume_check(void * arg,dmu_tx_t * tx)1204 dsl_errorscrub_pause_resume_check(void *arg, dmu_tx_t *tx)
1205 {
1206 pool_scrub_cmd_t *cmd = arg;
1207 dsl_pool_t *dp = dmu_tx_pool(tx);
1208 dsl_scan_t *scn = dp->dp_scan;
1209
1210 if (*cmd == POOL_SCRUB_PAUSE) {
1211 /*
1212 * can't pause a error scrub when there is no in-progress
1213 * error scrub.
1214 */
1215 if (!dsl_errorscrubbing(dp))
1216 return (SET_ERROR(ENOENT));
1217
1218 /* can't pause a paused error scrub */
1219 if (dsl_errorscrub_is_paused(scn))
1220 return (SET_ERROR(EBUSY));
1221 } else if (*cmd != POOL_SCRUB_NORMAL) {
1222 return (SET_ERROR(ENOTSUP));
1223 }
1224
1225 return (0);
1226 }
1227
1228 static void
dsl_errorscrub_pause_resume_sync(void * arg,dmu_tx_t * tx)1229 dsl_errorscrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
1230 {
1231 pool_scrub_cmd_t *cmd = arg;
1232 dsl_pool_t *dp = dmu_tx_pool(tx);
1233 spa_t *spa = dp->dp_spa;
1234 dsl_scan_t *scn = dp->dp_scan;
1235
1236 if (*cmd == POOL_SCRUB_PAUSE) {
1237 spa->spa_scan_pass_errorscrub_pause = gethrestime_sec();
1238 scn->errorscrub_phys.dep_paused_flags = B_TRUE;
1239 dsl_errorscrub_sync_state(scn, tx);
1240 spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_PAUSED);
1241 } else {
1242 ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
1243 if (dsl_errorscrub_is_paused(scn)) {
1244 /*
1245 * We need to keep track of how much time we spend
1246 * paused per pass so that we can adjust the error scrub
1247 * rate shown in the output of 'zpool status'.
1248 */
1249 spa->spa_scan_pass_errorscrub_spent_paused +=
1250 gethrestime_sec() -
1251 spa->spa_scan_pass_errorscrub_pause;
1252
1253 spa->spa_scan_pass_errorscrub_pause = 0;
1254 scn->errorscrub_phys.dep_paused_flags = B_FALSE;
1255
1256 zap_cursor_init_serialized(
1257 &scn->errorscrub_cursor,
1258 spa->spa_meta_objset, spa->spa_errlog_last,
1259 scn->errorscrub_phys.dep_cursor);
1260
1261 dsl_errorscrub_sync_state(scn, tx);
1262 }
1263 }
1264 }
1265
1266 static int
dsl_errorscrub_cancel_check(void * arg,dmu_tx_t * tx)1267 dsl_errorscrub_cancel_check(void *arg, dmu_tx_t *tx)
1268 {
1269 (void) arg;
1270 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
1271 /* can't cancel a error scrub when there is no one in-progress */
1272 if (!dsl_errorscrubbing(scn->scn_dp))
1273 return (SET_ERROR(ENOENT));
1274 return (0);
1275 }
1276
1277 static void
dsl_errorscrub_cancel_sync(void * arg,dmu_tx_t * tx)1278 dsl_errorscrub_cancel_sync(void *arg, dmu_tx_t *tx)
1279 {
1280 (void) arg;
1281 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
1282
1283 dsl_errorscrub_done(scn, B_FALSE, tx);
1284 dsl_errorscrub_sync_state(scn, tx);
1285 spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL,
1286 ESC_ZFS_ERRORSCRUB_ABORT);
1287 }
1288
1289 static int
dsl_scan_cancel_check(void * arg,dmu_tx_t * tx)1290 dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
1291 {
1292 (void) arg;
1293 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
1294
1295 if (!dsl_scan_is_running(scn))
1296 return (SET_ERROR(ENOENT));
1297 return (0);
1298 }
1299
1300 static void
dsl_scan_cancel_sync(void * arg,dmu_tx_t * tx)1301 dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
1302 {
1303 (void) arg;
1304 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
1305
1306 dsl_scan_done(scn, B_FALSE, tx);
1307 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
1308 spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
1309 }
1310
1311 int
dsl_scan_cancel(dsl_pool_t * dp)1312 dsl_scan_cancel(dsl_pool_t *dp)
1313 {
1314 if (dsl_errorscrubbing(dp)) {
1315 return (dsl_sync_task(spa_name(dp->dp_spa),
1316 dsl_errorscrub_cancel_check, dsl_errorscrub_cancel_sync,
1317 NULL, 3, ZFS_SPACE_CHECK_RESERVED));
1318 }
1319 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
1320 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
1321 }
1322
1323 static int
dsl_scrub_pause_resume_check(void * arg,dmu_tx_t * tx)1324 dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
1325 {
1326 pool_scrub_cmd_t *cmd = arg;
1327 dsl_pool_t *dp = dmu_tx_pool(tx);
1328 dsl_scan_t *scn = dp->dp_scan;
1329
1330 if (*cmd == POOL_SCRUB_PAUSE) {
1331 /* can't pause a scrub when there is no in-progress scrub */
1332 if (!dsl_scan_scrubbing(dp))
1333 return (SET_ERROR(ENOENT));
1334
1335 /* can't pause a paused scrub */
1336 if (dsl_scan_is_paused_scrub(scn))
1337 return (SET_ERROR(EBUSY));
1338 } else if (*cmd != POOL_SCRUB_NORMAL) {
1339 return (SET_ERROR(ENOTSUP));
1340 }
1341
1342 return (0);
1343 }
1344
1345 static void
dsl_scrub_pause_resume_sync(void * arg,dmu_tx_t * tx)1346 dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
1347 {
1348 pool_scrub_cmd_t *cmd = arg;
1349 dsl_pool_t *dp = dmu_tx_pool(tx);
1350 spa_t *spa = dp->dp_spa;
1351 dsl_scan_t *scn = dp->dp_scan;
1352
1353 if (*cmd == POOL_SCRUB_PAUSE) {
1354 /* can't pause a scrub when there is no in-progress scrub */
1355 spa->spa_scan_pass_scrub_pause = gethrestime_sec();
1356 scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
1357 scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
1358 dsl_scan_sync_state(scn, tx, SYNC_CACHED);
1359 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
1360 spa_notify_waiters(spa);
1361 } else {
1362 ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
1363 if (dsl_scan_is_paused_scrub(scn)) {
1364 /*
1365 * We need to keep track of how much time we spend
1366 * paused per pass so that we can adjust the scrub rate
1367 * shown in the output of 'zpool status'
1368 */
1369 spa->spa_scan_pass_scrub_spent_paused +=
1370 gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
1371 spa->spa_scan_pass_scrub_pause = 0;
1372 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
1373 scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED;
1374 dsl_scan_sync_state(scn, tx, SYNC_CACHED);
1375 }
1376 }
1377 }
1378
1379 /*
1380 * Set scrub pause/resume state if it makes sense to do so
1381 */
1382 int
dsl_scrub_set_pause_resume(const dsl_pool_t * dp,pool_scrub_cmd_t cmd)1383 dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
1384 {
1385 if (dsl_errorscrubbing(dp)) {
1386 return (dsl_sync_task(spa_name(dp->dp_spa),
1387 dsl_errorscrub_pause_resume_check,
1388 dsl_errorscrub_pause_resume_sync, &cmd, 3,
1389 ZFS_SPACE_CHECK_RESERVED));
1390 }
1391 return (dsl_sync_task(spa_name(dp->dp_spa),
1392 dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
1393 ZFS_SPACE_CHECK_RESERVED));
1394 }
1395
1396
1397 /* start a new scan, or restart an existing one. */
1398 void
dsl_scan_restart_resilver(dsl_pool_t * dp,uint64_t txg)1399 dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
1400 {
1401 if (txg == 0) {
1402 dmu_tx_t *tx;
1403 tx = dmu_tx_create_dd(dp->dp_mos_dir);
1404 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
1405
1406 txg = dmu_tx_get_txg(tx);
1407 dp->dp_scan->scn_restart_txg = txg;
1408 dmu_tx_commit(tx);
1409 } else {
1410 dp->dp_scan->scn_restart_txg = txg;
1411 }
1412 zfs_dbgmsg("restarting resilver for %s at txg=%llu",
1413 dp->dp_spa->spa_name, (longlong_t)txg);
1414 }
1415
1416 void
dsl_free(dsl_pool_t * dp,uint64_t txg,const blkptr_t * bp)1417 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
1418 {
1419 zio_free(dp->dp_spa, txg, bp);
1420 }
1421
1422 void
dsl_free_sync(zio_t * pio,dsl_pool_t * dp,uint64_t txg,const blkptr_t * bpp)1423 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
1424 {
1425 ASSERT(dsl_pool_sync_context(dp));
1426 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
1427 }
1428
1429 static int
scan_ds_queue_compare(const void * a,const void * b)1430 scan_ds_queue_compare(const void *a, const void *b)
1431 {
1432 const scan_ds_t *sds_a = a, *sds_b = b;
1433
1434 if (sds_a->sds_dsobj < sds_b->sds_dsobj)
1435 return (-1);
1436 if (sds_a->sds_dsobj == sds_b->sds_dsobj)
1437 return (0);
1438 return (1);
1439 }
1440
1441 static void
scan_ds_queue_clear(dsl_scan_t * scn)1442 scan_ds_queue_clear(dsl_scan_t *scn)
1443 {
1444 void *cookie = NULL;
1445 scan_ds_t *sds;
1446 while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
1447 kmem_free(sds, sizeof (*sds));
1448 }
1449 }
1450
1451 static boolean_t
scan_ds_queue_contains(dsl_scan_t * scn,uint64_t dsobj,uint64_t * txg)1452 scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
1453 {
1454 scan_ds_t srch, *sds;
1455
1456 srch.sds_dsobj = dsobj;
1457 sds = avl_find(&scn->scn_queue, &srch, NULL);
1458 if (sds != NULL && txg != NULL)
1459 *txg = sds->sds_txg;
1460 return (sds != NULL);
1461 }
1462
1463 static void
scan_ds_queue_insert(dsl_scan_t * scn,uint64_t dsobj,uint64_t txg)1464 scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
1465 {
1466 scan_ds_t *sds;
1467 avl_index_t where;
1468
1469 sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
1470 sds->sds_dsobj = dsobj;
1471 sds->sds_txg = txg;
1472
1473 VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
1474 avl_insert(&scn->scn_queue, sds, where);
1475 }
1476
1477 static void
scan_ds_queue_remove(dsl_scan_t * scn,uint64_t dsobj)1478 scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
1479 {
1480 scan_ds_t srch, *sds;
1481
1482 srch.sds_dsobj = dsobj;
1483
1484 sds = avl_find(&scn->scn_queue, &srch, NULL);
1485 VERIFY(sds != NULL);
1486 avl_remove(&scn->scn_queue, sds);
1487 kmem_free(sds, sizeof (*sds));
1488 }
1489
1490 static void
scan_ds_queue_sync(dsl_scan_t * scn,dmu_tx_t * tx)1491 scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
1492 {
1493 dsl_pool_t *dp = scn->scn_dp;
1494 spa_t *spa = dp->dp_spa;
1495 dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
1496 DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
1497
1498 ASSERT0(scn->scn_queues_pending);
1499 ASSERT(scn->scn_phys.scn_queue_obj != 0);
1500
1501 VERIFY0(dmu_object_free(dp->dp_meta_objset,
1502 scn->scn_phys.scn_queue_obj, tx));
1503 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
1504 DMU_OT_NONE, 0, tx);
1505 for (scan_ds_t *sds = avl_first(&scn->scn_queue);
1506 sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
1507 VERIFY0(zap_add_int_key(dp->dp_meta_objset,
1508 scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
1509 sds->sds_txg, tx));
1510 }
1511 }
1512
1513 /*
1514 * Computes the memory limit state that we're currently in. A sorted scan
1515 * needs quite a bit of memory to hold the sorting queue, so we need to
1516 * reasonably constrain the size so it doesn't impact overall system
1517 * performance. We compute two limits:
1518 * 1) Hard memory limit: if the amount of memory used by the sorting
1519 * queues on a pool gets above this value, we stop the metadata
1520 * scanning portion and start issuing the queued up and sorted
1521 * I/Os to reduce memory usage.
1522 * This limit is calculated as a fraction of physmem (by default 5%).
1523 * We constrain the lower bound of the hard limit to an absolute
1524 * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
1525 * the upper bound to 5% of the total pool size - no chance we'll
1526 * ever need that much memory, but just to keep the value in check.
1527 * 2) Soft memory limit: once we hit the hard memory limit, we start
1528 * issuing I/O to reduce queue memory usage, but we don't want to
1529 * completely empty out the queues, since we might be able to find I/Os
1530 * that will fill in the gaps of our non-sequential IOs at some point
1531 * in the future. So we stop the issuing of I/Os once the amount of
1532 * memory used drops below the soft limit (at which point we stop issuing
1533 * I/O and start scanning metadata again).
1534 *
1535 * This limit is calculated by subtracting a fraction of the hard
1536 * limit from the hard limit. By default this fraction is 5%, so
1537 * the soft limit is 95% of the hard limit. We cap the size of the
1538 * difference between the hard and soft limits at an absolute
1539 * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
1540 * sufficient to not cause too frequent switching between the
1541 * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
1542 * worth of queues is about 1.2 GiB of on-pool data, so scanning
1543 * that should take at least a decent fraction of a second).
1544 */
1545 static boolean_t
dsl_scan_should_clear(dsl_scan_t * scn)1546 dsl_scan_should_clear(dsl_scan_t *scn)
1547 {
1548 spa_t *spa = scn->scn_dp->dp_spa;
1549 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
1550 uint64_t alloc, mlim_hard, mlim_soft, mused;
1551
1552 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
1553 alloc += metaslab_class_get_alloc(spa_special_class(spa));
1554 alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
1555
1556 mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
1557 zfs_scan_mem_lim_min);
1558 mlim_hard = MIN(mlim_hard, alloc / 20);
1559 mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
1560 zfs_scan_mem_lim_soft_max);
1561 mused = 0;
1562 for (uint64_t i = 0; i < rvd->vdev_children; i++) {
1563 vdev_t *tvd = rvd->vdev_child[i];
1564 dsl_scan_io_queue_t *queue;
1565
1566 mutex_enter(&tvd->vdev_scan_io_queue_lock);
1567 queue = tvd->vdev_scan_io_queue;
1568 if (queue != NULL) {
1569 /*
1570 * # of extents in exts_by_addr = # in exts_by_size.
1571 * B-tree efficiency is ~75%, but can be as low as 50%.
1572 */
1573 mused += zfs_btree_numnodes(&queue->q_exts_by_size) *
1574 ((sizeof (range_seg_gap_t) + sizeof (uint64_t)) *
1575 3 / 2) + queue->q_sio_memused;
1576 }
1577 mutex_exit(&tvd->vdev_scan_io_queue_lock);
1578 }
1579
1580 dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
1581
1582 if (mused == 0)
1583 ASSERT0(scn->scn_queues_pending);
1584
1585 /*
1586 * If we are above our hard limit, we need to clear out memory.
1587 * If we are below our soft limit, we need to accumulate sequential IOs.
1588 * Otherwise, we should keep doing whatever we are currently doing.
1589 */
1590 if (mused >= mlim_hard)
1591 return (B_TRUE);
1592 else if (mused < mlim_soft)
1593 return (B_FALSE);
1594 else
1595 return (scn->scn_clearing);
1596 }
1597
1598 static boolean_t
dsl_scan_check_suspend(dsl_scan_t * scn,const zbookmark_phys_t * zb)1599 dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
1600 {
1601 /* we never skip user/group accounting objects */
1602 if (zb && (int64_t)zb->zb_object < 0)
1603 return (B_FALSE);
1604
1605 if (scn->scn_suspending)
1606 return (B_TRUE); /* we're already suspending */
1607
1608 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
1609 return (B_FALSE); /* we're resuming */
1610
1611 /* We only know how to resume from level-0 and objset blocks. */
1612 if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL))
1613 return (B_FALSE);
1614
1615 /*
1616 * We suspend if:
1617 * - we have scanned for at least the minimum time (default 1 sec
1618 * for scrub, 3 sec for resilver), and either we have sufficient
1619 * dirty data that we are starting to write more quickly
1620 * (default 30%), someone is explicitly waiting for this txg
1621 * to complete, or we have used up all of the time in the txg
1622 * timeout (default 5 sec).
1623 * or
1624 * - the spa is shutting down because this pool is being exported
1625 * or the machine is rebooting.
1626 * or
1627 * - the scan queue has reached its memory use limit
1628 */
1629 uint64_t curr_time_ns = gethrtime();
1630 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
1631 uint64_t sync_time_ns = curr_time_ns -
1632 scn->scn_dp->dp_spa->spa_sync_starttime;
1633 uint64_t dirty_min_bytes = zfs_dirty_data_max *
1634 zfs_vdev_async_write_active_min_dirty_percent / 100;
1635 uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
1636 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
1637
1638 if ((NSEC2MSEC(scan_time_ns) > mintime &&
1639 (scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
1640 txg_sync_waiting(scn->scn_dp) ||
1641 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
1642 spa_shutting_down(scn->scn_dp->dp_spa) ||
1643 (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn)) ||
1644 !ddt_walk_ready(scn->scn_dp->dp_spa)) {
1645 if (zb && zb->zb_level == ZB_ROOT_LEVEL) {
1646 dprintf("suspending at first available bookmark "
1647 "%llx/%llx/%llx/%llx\n",
1648 (longlong_t)zb->zb_objset,
1649 (longlong_t)zb->zb_object,
1650 (longlong_t)zb->zb_level,
1651 (longlong_t)zb->zb_blkid);
1652 SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
1653 zb->zb_objset, 0, 0, 0);
1654 } else if (zb != NULL) {
1655 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
1656 (longlong_t)zb->zb_objset,
1657 (longlong_t)zb->zb_object,
1658 (longlong_t)zb->zb_level,
1659 (longlong_t)zb->zb_blkid);
1660 scn->scn_phys.scn_bookmark = *zb;
1661 } else {
1662 #ifdef ZFS_DEBUG
1663 dsl_scan_phys_t *scnp = &scn->scn_phys;
1664 dprintf("suspending at at DDT bookmark "
1665 "%llx/%llx/%llx/%llx\n",
1666 (longlong_t)scnp->scn_ddt_bookmark.ddb_class,
1667 (longlong_t)scnp->scn_ddt_bookmark.ddb_type,
1668 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
1669 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
1670 #endif
1671 }
1672 scn->scn_suspending = B_TRUE;
1673 return (B_TRUE);
1674 }
1675 return (B_FALSE);
1676 }
1677
1678 static boolean_t
dsl_error_scrub_check_suspend(dsl_scan_t * scn,const zbookmark_phys_t * zb)1679 dsl_error_scrub_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
1680 {
1681 /*
1682 * We suspend if:
1683 * - we have scrubbed for at least the minimum time (default 1 sec
1684 * for error scrub), someone is explicitly waiting for this txg
1685 * to complete, or we have used up all of the time in the txg
1686 * timeout (default 5 sec).
1687 * or
1688 * - the spa is shutting down because this pool is being exported
1689 * or the machine is rebooting.
1690 */
1691 uint64_t curr_time_ns = gethrtime();
1692 uint64_t error_scrub_time_ns = curr_time_ns - scn->scn_sync_start_time;
1693 uint64_t sync_time_ns = curr_time_ns -
1694 scn->scn_dp->dp_spa->spa_sync_starttime;
1695 int mintime = zfs_scrub_min_time_ms;
1696
1697 if ((NSEC2MSEC(error_scrub_time_ns) > mintime &&
1698 (txg_sync_waiting(scn->scn_dp) ||
1699 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
1700 spa_shutting_down(scn->scn_dp->dp_spa)) {
1701 if (zb) {
1702 dprintf("error scrub suspending at bookmark "
1703 "%llx/%llx/%llx/%llx\n",
1704 (longlong_t)zb->zb_objset,
1705 (longlong_t)zb->zb_object,
1706 (longlong_t)zb->zb_level,
1707 (longlong_t)zb->zb_blkid);
1708 }
1709 return (B_TRUE);
1710 }
1711 return (B_FALSE);
1712 }
1713
1714 typedef struct zil_scan_arg {
1715 dsl_pool_t *zsa_dp;
1716 zil_header_t *zsa_zh;
1717 } zil_scan_arg_t;
1718
1719 static int
dsl_scan_zil_block(zilog_t * zilog,const blkptr_t * bp,void * arg,uint64_t claim_txg)1720 dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
1721 uint64_t claim_txg)
1722 {
1723 (void) zilog;
1724 zil_scan_arg_t *zsa = arg;
1725 dsl_pool_t *dp = zsa->zsa_dp;
1726 dsl_scan_t *scn = dp->dp_scan;
1727 zil_header_t *zh = zsa->zsa_zh;
1728 zbookmark_phys_t zb;
1729
1730 ASSERT(!BP_IS_REDACTED(bp));
1731 if (BP_IS_HOLE(bp) ||
1732 BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg)
1733 return (0);
1734
1735 /*
1736 * One block ("stubby") can be allocated a long time ago; we
1737 * want to visit that one because it has been allocated
1738 * (on-disk) even if it hasn't been claimed (even though for
1739 * scrub there's nothing to do to it).
1740 */
1741 if (claim_txg == 0 &&
1742 BP_GET_LOGICAL_BIRTH(bp) >= spa_min_claim_txg(dp->dp_spa))
1743 return (0);
1744
1745 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1746 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
1747
1748 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
1749 return (0);
1750 }
1751
1752 static int
dsl_scan_zil_record(zilog_t * zilog,const lr_t * lrc,void * arg,uint64_t claim_txg)1753 dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
1754 uint64_t claim_txg)
1755 {
1756 (void) zilog;
1757 if (lrc->lrc_txtype == TX_WRITE) {
1758 zil_scan_arg_t *zsa = arg;
1759 dsl_pool_t *dp = zsa->zsa_dp;
1760 dsl_scan_t *scn = dp->dp_scan;
1761 zil_header_t *zh = zsa->zsa_zh;
1762 const lr_write_t *lr = (const lr_write_t *)lrc;
1763 const blkptr_t *bp = &lr->lr_blkptr;
1764 zbookmark_phys_t zb;
1765
1766 ASSERT(!BP_IS_REDACTED(bp));
1767 if (BP_IS_HOLE(bp) ||
1768 BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg)
1769 return (0);
1770
1771 /*
1772 * birth can be < claim_txg if this record's txg is
1773 * already txg sync'ed (but this log block contains
1774 * other records that are not synced)
1775 */
1776 if (claim_txg == 0 || BP_GET_LOGICAL_BIRTH(bp) < claim_txg)
1777 return (0);
1778
1779 ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
1780 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1781 lr->lr_foid, ZB_ZIL_LEVEL,
1782 lr->lr_offset / BP_GET_LSIZE(bp));
1783
1784 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
1785 }
1786 return (0);
1787 }
1788
1789 static void
dsl_scan_zil(dsl_pool_t * dp,zil_header_t * zh)1790 dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
1791 {
1792 uint64_t claim_txg = zh->zh_claim_txg;
1793 zil_scan_arg_t zsa = { dp, zh };
1794 zilog_t *zilog;
1795
1796 ASSERT(spa_writeable(dp->dp_spa));
1797
1798 /*
1799 * We only want to visit blocks that have been claimed but not yet
1800 * replayed (or, in read-only mode, blocks that *would* be claimed).
1801 */
1802 if (claim_txg == 0)
1803 return;
1804
1805 zilog = zil_alloc(dp->dp_meta_objset, zh);
1806
1807 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
1808 claim_txg, B_FALSE);
1809
1810 zil_free(zilog);
1811 }
1812
1813 /*
1814 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
1815 * here is to sort the AVL tree by the order each block will be needed.
1816 */
1817 static int
scan_prefetch_queue_compare(const void * a,const void * b)1818 scan_prefetch_queue_compare(const void *a, const void *b)
1819 {
1820 const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
1821 const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
1822 const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
1823
1824 return (zbookmark_compare(spc_a->spc_datablkszsec,
1825 spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
1826 spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
1827 }
1828
1829 static void
scan_prefetch_ctx_rele(scan_prefetch_ctx_t * spc,const void * tag)1830 scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag)
1831 {
1832 if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
1833 zfs_refcount_destroy(&spc->spc_refcnt);
1834 kmem_free(spc, sizeof (scan_prefetch_ctx_t));
1835 }
1836 }
1837
1838 static scan_prefetch_ctx_t *
scan_prefetch_ctx_create(dsl_scan_t * scn,dnode_phys_t * dnp,const void * tag)1839 scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag)
1840 {
1841 scan_prefetch_ctx_t *spc;
1842
1843 spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
1844 zfs_refcount_create(&spc->spc_refcnt);
1845 zfs_refcount_add(&spc->spc_refcnt, tag);
1846 spc->spc_scn = scn;
1847 if (dnp != NULL) {
1848 spc->spc_datablkszsec = dnp->dn_datablkszsec;
1849 spc->spc_indblkshift = dnp->dn_indblkshift;
1850 spc->spc_root = B_FALSE;
1851 } else {
1852 spc->spc_datablkszsec = 0;
1853 spc->spc_indblkshift = 0;
1854 spc->spc_root = B_TRUE;
1855 }
1856
1857 return (spc);
1858 }
1859
1860 static void
scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t * spc,const void * tag)1861 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag)
1862 {
1863 zfs_refcount_add(&spc->spc_refcnt, tag);
1864 }
1865
1866 static void
scan_ds_prefetch_queue_clear(dsl_scan_t * scn)1867 scan_ds_prefetch_queue_clear(dsl_scan_t *scn)
1868 {
1869 spa_t *spa = scn->scn_dp->dp_spa;
1870 void *cookie = NULL;
1871 scan_prefetch_issue_ctx_t *spic = NULL;
1872
1873 mutex_enter(&spa->spa_scrub_lock);
1874 while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue,
1875 &cookie)) != NULL) {
1876 scan_prefetch_ctx_rele(spic->spic_spc, scn);
1877 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1878 }
1879 mutex_exit(&spa->spa_scrub_lock);
1880 }
1881
1882 static boolean_t
dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t * spc,const zbookmark_phys_t * zb)1883 dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
1884 const zbookmark_phys_t *zb)
1885 {
1886 zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
1887 dnode_phys_t tmp_dnp;
1888 dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
1889
1890 if (zb->zb_objset != last_zb->zb_objset)
1891 return (B_TRUE);
1892 if ((int64_t)zb->zb_object < 0)
1893 return (B_FALSE);
1894
1895 tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
1896 tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
1897
1898 if (zbookmark_subtree_completed(dnp, zb, last_zb))
1899 return (B_TRUE);
1900
1901 return (B_FALSE);
1902 }
1903
1904 static void
dsl_scan_prefetch(scan_prefetch_ctx_t * spc,blkptr_t * bp,zbookmark_phys_t * zb)1905 dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
1906 {
1907 avl_index_t idx;
1908 dsl_scan_t *scn = spc->spc_scn;
1909 spa_t *spa = scn->scn_dp->dp_spa;
1910 scan_prefetch_issue_ctx_t *spic;
1911
1912 if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp))
1913 return;
1914
1915 if (BP_IS_HOLE(bp) ||
1916 BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg ||
1917 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
1918 BP_GET_TYPE(bp) != DMU_OT_OBJSET))
1919 return;
1920
1921 if (dsl_scan_check_prefetch_resume(spc, zb))
1922 return;
1923
1924 scan_prefetch_ctx_add_ref(spc, scn);
1925 spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
1926 spic->spic_spc = spc;
1927 spic->spic_bp = *bp;
1928 spic->spic_zb = *zb;
1929
1930 /*
1931 * Add the IO to the queue of blocks to prefetch. This allows us to
1932 * prioritize blocks that we will need first for the main traversal
1933 * thread.
1934 */
1935 mutex_enter(&spa->spa_scrub_lock);
1936 if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
1937 /* this block is already queued for prefetch */
1938 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1939 scan_prefetch_ctx_rele(spc, scn);
1940 mutex_exit(&spa->spa_scrub_lock);
1941 return;
1942 }
1943
1944 avl_insert(&scn->scn_prefetch_queue, spic, idx);
1945 cv_broadcast(&spa->spa_scrub_io_cv);
1946 mutex_exit(&spa->spa_scrub_lock);
1947 }
1948
1949 static void
dsl_scan_prefetch_dnode(dsl_scan_t * scn,dnode_phys_t * dnp,uint64_t objset,uint64_t object)1950 dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
1951 uint64_t objset, uint64_t object)
1952 {
1953 int i;
1954 zbookmark_phys_t zb;
1955 scan_prefetch_ctx_t *spc;
1956
1957 if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1958 return;
1959
1960 SET_BOOKMARK(&zb, objset, object, 0, 0);
1961
1962 spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
1963
1964 for (i = 0; i < dnp->dn_nblkptr; i++) {
1965 zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
1966 zb.zb_blkid = i;
1967 dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
1968 }
1969
1970 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1971 zb.zb_level = 0;
1972 zb.zb_blkid = DMU_SPILL_BLKID;
1973 dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb);
1974 }
1975
1976 scan_prefetch_ctx_rele(spc, FTAG);
1977 }
1978
1979 static void
dsl_scan_prefetch_cb(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * bp,arc_buf_t * buf,void * private)1980 dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1981 arc_buf_t *buf, void *private)
1982 {
1983 (void) zio;
1984 scan_prefetch_ctx_t *spc = private;
1985 dsl_scan_t *scn = spc->spc_scn;
1986 spa_t *spa = scn->scn_dp->dp_spa;
1987
1988 /* broadcast that the IO has completed for rate limiting purposes */
1989 mutex_enter(&spa->spa_scrub_lock);
1990 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
1991 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
1992 cv_broadcast(&spa->spa_scrub_io_cv);
1993 mutex_exit(&spa->spa_scrub_lock);
1994
1995 /* if there was an error or we are done prefetching, just cleanup */
1996 if (buf == NULL || scn->scn_prefetch_stop)
1997 goto out;
1998
1999 if (BP_GET_LEVEL(bp) > 0) {
2000 int i;
2001 blkptr_t *cbp;
2002 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
2003 zbookmark_phys_t czb;
2004
2005 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
2006 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
2007 zb->zb_level - 1, zb->zb_blkid * epb + i);
2008 dsl_scan_prefetch(spc, cbp, &czb);
2009 }
2010 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
2011 dnode_phys_t *cdnp;
2012 int i;
2013 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
2014
2015 for (i = 0, cdnp = buf->b_data; i < epb;
2016 i += cdnp->dn_extra_slots + 1,
2017 cdnp += cdnp->dn_extra_slots + 1) {
2018 dsl_scan_prefetch_dnode(scn, cdnp,
2019 zb->zb_objset, zb->zb_blkid * epb + i);
2020 }
2021 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
2022 objset_phys_t *osp = buf->b_data;
2023
2024 dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
2025 zb->zb_objset, DMU_META_DNODE_OBJECT);
2026
2027 if (OBJSET_BUF_HAS_USERUSED(buf)) {
2028 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) {
2029 dsl_scan_prefetch_dnode(scn,
2030 &osp->os_projectused_dnode, zb->zb_objset,
2031 DMU_PROJECTUSED_OBJECT);
2032 }
2033 dsl_scan_prefetch_dnode(scn,
2034 &osp->os_groupused_dnode, zb->zb_objset,
2035 DMU_GROUPUSED_OBJECT);
2036 dsl_scan_prefetch_dnode(scn,
2037 &osp->os_userused_dnode, zb->zb_objset,
2038 DMU_USERUSED_OBJECT);
2039 }
2040 }
2041
2042 out:
2043 if (buf != NULL)
2044 arc_buf_destroy(buf, private);
2045 scan_prefetch_ctx_rele(spc, scn);
2046 }
2047
2048 static void
dsl_scan_prefetch_thread(void * arg)2049 dsl_scan_prefetch_thread(void *arg)
2050 {
2051 dsl_scan_t *scn = arg;
2052 spa_t *spa = scn->scn_dp->dp_spa;
2053 scan_prefetch_issue_ctx_t *spic;
2054
2055 /* loop until we are told to stop */
2056 while (!scn->scn_prefetch_stop) {
2057 arc_flags_t flags = ARC_FLAG_NOWAIT |
2058 ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
2059 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
2060
2061 mutex_enter(&spa->spa_scrub_lock);
2062
2063 /*
2064 * Wait until we have an IO to issue and are not above our
2065 * maximum in flight limit.
2066 */
2067 while (!scn->scn_prefetch_stop &&
2068 (avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
2069 spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
2070 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2071 }
2072
2073 /* recheck if we should stop since we waited for the cv */
2074 if (scn->scn_prefetch_stop) {
2075 mutex_exit(&spa->spa_scrub_lock);
2076 break;
2077 }
2078
2079 /* remove the prefetch IO from the tree */
2080 spic = avl_first(&scn->scn_prefetch_queue);
2081 spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
2082 avl_remove(&scn->scn_prefetch_queue, spic);
2083
2084 mutex_exit(&spa->spa_scrub_lock);
2085
2086 if (BP_IS_PROTECTED(&spic->spic_bp)) {
2087 ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE ||
2088 BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET);
2089 ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0);
2090 zio_flags |= ZIO_FLAG_RAW;
2091 }
2092
2093 /* We don't need data L1 buffer since we do not prefetch L0. */
2094 blkptr_t *bp = &spic->spic_bp;
2095 if (BP_GET_LEVEL(bp) == 1 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
2096 BP_GET_TYPE(bp) != DMU_OT_OBJSET)
2097 flags |= ARC_FLAG_NO_BUF;
2098
2099 /* issue the prefetch asynchronously */
2100 (void) arc_read(scn->scn_zio_root, spa, bp,
2101 dsl_scan_prefetch_cb, spic->spic_spc, ZIO_PRIORITY_SCRUB,
2102 zio_flags, &flags, &spic->spic_zb);
2103
2104 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
2105 }
2106
2107 ASSERT(scn->scn_prefetch_stop);
2108
2109 /* free any prefetches we didn't get to complete */
2110 mutex_enter(&spa->spa_scrub_lock);
2111 while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
2112 avl_remove(&scn->scn_prefetch_queue, spic);
2113 scan_prefetch_ctx_rele(spic->spic_spc, scn);
2114 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
2115 }
2116 ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
2117 mutex_exit(&spa->spa_scrub_lock);
2118 }
2119
2120 static boolean_t
dsl_scan_check_resume(dsl_scan_t * scn,const dnode_phys_t * dnp,const zbookmark_phys_t * zb)2121 dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
2122 const zbookmark_phys_t *zb)
2123 {
2124 /*
2125 * We never skip over user/group accounting objects (obj<0)
2126 */
2127 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
2128 (int64_t)zb->zb_object >= 0) {
2129 /*
2130 * If we already visited this bp & everything below (in
2131 * a prior txg sync), don't bother doing it again.
2132 */
2133 if (zbookmark_subtree_completed(dnp, zb,
2134 &scn->scn_phys.scn_bookmark))
2135 return (B_TRUE);
2136
2137 /*
2138 * If we found the block we're trying to resume from, or
2139 * we went past it, zero it out to indicate that it's OK
2140 * to start checking for suspending again.
2141 */
2142 if (zbookmark_subtree_tbd(dnp, zb,
2143 &scn->scn_phys.scn_bookmark)) {
2144 dprintf("resuming at %llx/%llx/%llx/%llx\n",
2145 (longlong_t)zb->zb_objset,
2146 (longlong_t)zb->zb_object,
2147 (longlong_t)zb->zb_level,
2148 (longlong_t)zb->zb_blkid);
2149 memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb));
2150 }
2151 }
2152 return (B_FALSE);
2153 }
2154
2155 static void dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb,
2156 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
2157 dmu_objset_type_t ostype, dmu_tx_t *tx);
2158 inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
2159 dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
2160 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
2161
2162 /*
2163 * Return nonzero on i/o error.
2164 * Return new buf to write out in *bufp.
2165 */
2166 inline __attribute__((always_inline)) static int
dsl_scan_recurse(dsl_scan_t * scn,dsl_dataset_t * ds,dmu_objset_type_t ostype,dnode_phys_t * dnp,const blkptr_t * bp,const zbookmark_phys_t * zb,dmu_tx_t * tx)2167 dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
2168 dnode_phys_t *dnp, const blkptr_t *bp,
2169 const zbookmark_phys_t *zb, dmu_tx_t *tx)
2170 {
2171 dsl_pool_t *dp = scn->scn_dp;
2172 spa_t *spa = dp->dp_spa;
2173 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
2174 int err;
2175
2176 ASSERT(!BP_IS_REDACTED(bp));
2177
2178 /*
2179 * There is an unlikely case of encountering dnodes with contradicting
2180 * dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created
2181 * or modified before commit 4254acb was merged. As it is not possible
2182 * to know which of the two is correct, report an error.
2183 */
2184 if (dnp != NULL &&
2185 dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) {
2186 scn->scn_phys.scn_errors++;
2187 spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp));
2188 return (SET_ERROR(EINVAL));
2189 }
2190
2191 if (BP_GET_LEVEL(bp) > 0) {
2192 arc_flags_t flags = ARC_FLAG_WAIT;
2193 int i;
2194 blkptr_t *cbp;
2195 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
2196 arc_buf_t *buf;
2197
2198 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
2199 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
2200 if (err) {
2201 scn->scn_phys.scn_errors++;
2202 return (err);
2203 }
2204 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
2205 zbookmark_phys_t czb;
2206
2207 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
2208 zb->zb_level - 1,
2209 zb->zb_blkid * epb + i);
2210 dsl_scan_visitbp(cbp, &czb, dnp,
2211 ds, scn, ostype, tx);
2212 }
2213 arc_buf_destroy(buf, &buf);
2214 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
2215 arc_flags_t flags = ARC_FLAG_WAIT;
2216 dnode_phys_t *cdnp;
2217 int i;
2218 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
2219 arc_buf_t *buf;
2220
2221 if (BP_IS_PROTECTED(bp)) {
2222 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
2223 zio_flags |= ZIO_FLAG_RAW;
2224 }
2225
2226 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
2227 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
2228 if (err) {
2229 scn->scn_phys.scn_errors++;
2230 return (err);
2231 }
2232 for (i = 0, cdnp = buf->b_data; i < epb;
2233 i += cdnp->dn_extra_slots + 1,
2234 cdnp += cdnp->dn_extra_slots + 1) {
2235 dsl_scan_visitdnode(scn, ds, ostype,
2236 cdnp, zb->zb_blkid * epb + i, tx);
2237 }
2238
2239 arc_buf_destroy(buf, &buf);
2240 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
2241 arc_flags_t flags = ARC_FLAG_WAIT;
2242 objset_phys_t *osp;
2243 arc_buf_t *buf;
2244
2245 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
2246 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
2247 if (err) {
2248 scn->scn_phys.scn_errors++;
2249 return (err);
2250 }
2251
2252 osp = buf->b_data;
2253
2254 dsl_scan_visitdnode(scn, ds, osp->os_type,
2255 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
2256
2257 if (OBJSET_BUF_HAS_USERUSED(buf)) {
2258 /*
2259 * We also always visit user/group/project accounting
2260 * objects, and never skip them, even if we are
2261 * suspending. This is necessary so that the
2262 * space deltas from this txg get integrated.
2263 */
2264 if (OBJSET_BUF_HAS_PROJECTUSED(buf))
2265 dsl_scan_visitdnode(scn, ds, osp->os_type,
2266 &osp->os_projectused_dnode,
2267 DMU_PROJECTUSED_OBJECT, tx);
2268 dsl_scan_visitdnode(scn, ds, osp->os_type,
2269 &osp->os_groupused_dnode,
2270 DMU_GROUPUSED_OBJECT, tx);
2271 dsl_scan_visitdnode(scn, ds, osp->os_type,
2272 &osp->os_userused_dnode,
2273 DMU_USERUSED_OBJECT, tx);
2274 }
2275 arc_buf_destroy(buf, &buf);
2276 } else if (!zfs_blkptr_verify(spa, bp,
2277 BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
2278 /*
2279 * Sanity check the block pointer contents, this is handled
2280 * by arc_read() for the cases above.
2281 */
2282 scn->scn_phys.scn_errors++;
2283 spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp));
2284 return (SET_ERROR(EINVAL));
2285 }
2286
2287 return (0);
2288 }
2289
2290 inline __attribute__((always_inline)) static void
dsl_scan_visitdnode(dsl_scan_t * scn,dsl_dataset_t * ds,dmu_objset_type_t ostype,dnode_phys_t * dnp,uint64_t object,dmu_tx_t * tx)2291 dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
2292 dmu_objset_type_t ostype, dnode_phys_t *dnp,
2293 uint64_t object, dmu_tx_t *tx)
2294 {
2295 int j;
2296
2297 for (j = 0; j < dnp->dn_nblkptr; j++) {
2298 zbookmark_phys_t czb;
2299
2300 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
2301 dnp->dn_nlevels - 1, j);
2302 dsl_scan_visitbp(&dnp->dn_blkptr[j],
2303 &czb, dnp, ds, scn, ostype, tx);
2304 }
2305
2306 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
2307 zbookmark_phys_t czb;
2308 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
2309 0, DMU_SPILL_BLKID);
2310 dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
2311 &czb, dnp, ds, scn, ostype, tx);
2312 }
2313 }
2314
2315 /*
2316 * The arguments are in this order because mdb can only print the
2317 * first 5; we want them to be useful.
2318 */
2319 static void
dsl_scan_visitbp(const blkptr_t * bp,const zbookmark_phys_t * zb,dnode_phys_t * dnp,dsl_dataset_t * ds,dsl_scan_t * scn,dmu_objset_type_t ostype,dmu_tx_t * tx)2320 dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb,
2321 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
2322 dmu_objset_type_t ostype, dmu_tx_t *tx)
2323 {
2324 dsl_pool_t *dp = scn->scn_dp;
2325
2326 if (dsl_scan_check_suspend(scn, zb))
2327 return;
2328
2329 if (dsl_scan_check_resume(scn, dnp, zb))
2330 return;
2331
2332 scn->scn_visited_this_txg++;
2333
2334 if (BP_IS_HOLE(bp)) {
2335 scn->scn_holes_this_txg++;
2336 return;
2337 }
2338
2339 if (BP_IS_REDACTED(bp)) {
2340 ASSERT(dsl_dataset_feature_is_active(ds,
2341 SPA_FEATURE_REDACTED_DATASETS));
2342 return;
2343 }
2344
2345 /*
2346 * Check if this block contradicts any filesystem flags.
2347 */
2348 spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS;
2349 if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE)
2350 ASSERT(dsl_dataset_feature_is_active(ds, f));
2351
2352 f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
2353 if (f != SPA_FEATURE_NONE)
2354 ASSERT(dsl_dataset_feature_is_active(ds, f));
2355
2356 f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
2357 if (f != SPA_FEATURE_NONE)
2358 ASSERT(dsl_dataset_feature_is_active(ds, f));
2359
2360 if (BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) {
2361 scn->scn_lt_min_this_txg++;
2362 return;
2363 }
2364
2365 if (dsl_scan_recurse(scn, ds, ostype, dnp, bp, zb, tx) != 0)
2366 return;
2367
2368 /*
2369 * If dsl_scan_ddt() has already visited this block, it will have
2370 * already done any translations or scrubbing, so don't call the
2371 * callback again.
2372 */
2373 if (ddt_class_contains(dp->dp_spa,
2374 scn->scn_phys.scn_ddt_class_max, bp)) {
2375 scn->scn_ddt_contained_this_txg++;
2376 return;
2377 }
2378
2379 /*
2380 * If this block is from the future (after cur_max_txg), then we
2381 * are doing this on behalf of a deleted snapshot, and we will
2382 * revisit the future block on the next pass of this dataset.
2383 * Don't scan it now unless we need to because something
2384 * under it was modified.
2385 */
2386 if (BP_GET_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
2387 scn->scn_gt_max_this_txg++;
2388 return;
2389 }
2390
2391 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
2392 }
2393
2394 static void
dsl_scan_visit_rootbp(dsl_scan_t * scn,dsl_dataset_t * ds,blkptr_t * bp,dmu_tx_t * tx)2395 dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
2396 dmu_tx_t *tx)
2397 {
2398 zbookmark_phys_t zb;
2399 scan_prefetch_ctx_t *spc;
2400
2401 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
2402 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
2403
2404 if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
2405 SET_BOOKMARK(&scn->scn_prefetch_bookmark,
2406 zb.zb_objset, 0, 0, 0);
2407 } else {
2408 scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
2409 }
2410
2411 scn->scn_objsets_visited_this_txg++;
2412
2413 spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
2414 dsl_scan_prefetch(spc, bp, &zb);
2415 scan_prefetch_ctx_rele(spc, FTAG);
2416
2417 dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
2418
2419 dprintf_ds(ds, "finished scan%s", "");
2420 }
2421
2422 static void
ds_destroyed_scn_phys(dsl_dataset_t * ds,dsl_scan_phys_t * scn_phys)2423 ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
2424 {
2425 if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
2426 if (ds->ds_is_snapshot) {
2427 /*
2428 * Note:
2429 * - scn_cur_{min,max}_txg stays the same.
2430 * - Setting the flag is not really necessary if
2431 * scn_cur_max_txg == scn_max_txg, because there
2432 * is nothing after this snapshot that we care
2433 * about. However, we set it anyway and then
2434 * ignore it when we retraverse it in
2435 * dsl_scan_visitds().
2436 */
2437 scn_phys->scn_bookmark.zb_objset =
2438 dsl_dataset_phys(ds)->ds_next_snap_obj;
2439 zfs_dbgmsg("destroying ds %llu on %s; currently "
2440 "traversing; reset zb_objset to %llu",
2441 (u_longlong_t)ds->ds_object,
2442 ds->ds_dir->dd_pool->dp_spa->spa_name,
2443 (u_longlong_t)dsl_dataset_phys(ds)->
2444 ds_next_snap_obj);
2445 scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
2446 } else {
2447 SET_BOOKMARK(&scn_phys->scn_bookmark,
2448 ZB_DESTROYED_OBJSET, 0, 0, 0);
2449 zfs_dbgmsg("destroying ds %llu on %s; currently "
2450 "traversing; reset bookmark to -1,0,0,0",
2451 (u_longlong_t)ds->ds_object,
2452 ds->ds_dir->dd_pool->dp_spa->spa_name);
2453 }
2454 }
2455 }
2456
2457 /*
2458 * Invoked when a dataset is destroyed. We need to make sure that:
2459 *
2460 * 1) If it is the dataset that was currently being scanned, we write
2461 * a new dsl_scan_phys_t and marking the objset reference in it
2462 * as destroyed.
2463 * 2) Remove it from the work queue, if it was present.
2464 *
2465 * If the dataset was actually a snapshot, instead of marking the dataset
2466 * as destroyed, we instead substitute the next snapshot in line.
2467 */
2468 void
dsl_scan_ds_destroyed(dsl_dataset_t * ds,dmu_tx_t * tx)2469 dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
2470 {
2471 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2472 dsl_scan_t *scn = dp->dp_scan;
2473 uint64_t mintxg;
2474
2475 if (!dsl_scan_is_running(scn))
2476 return;
2477
2478 ds_destroyed_scn_phys(ds, &scn->scn_phys);
2479 ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
2480
2481 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
2482 scan_ds_queue_remove(scn, ds->ds_object);
2483 if (ds->ds_is_snapshot)
2484 scan_ds_queue_insert(scn,
2485 dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
2486 }
2487
2488 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2489 ds->ds_object, &mintxg) == 0) {
2490 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
2491 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2492 scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
2493 if (ds->ds_is_snapshot) {
2494 /*
2495 * We keep the same mintxg; it could be >
2496 * ds_creation_txg if the previous snapshot was
2497 * deleted too.
2498 */
2499 VERIFY(zap_add_int_key(dp->dp_meta_objset,
2500 scn->scn_phys.scn_queue_obj,
2501 dsl_dataset_phys(ds)->ds_next_snap_obj,
2502 mintxg, tx) == 0);
2503 zfs_dbgmsg("destroying ds %llu on %s; in queue; "
2504 "replacing with %llu",
2505 (u_longlong_t)ds->ds_object,
2506 dp->dp_spa->spa_name,
2507 (u_longlong_t)dsl_dataset_phys(ds)->
2508 ds_next_snap_obj);
2509 } else {
2510 zfs_dbgmsg("destroying ds %llu on %s; in queue; "
2511 "removing",
2512 (u_longlong_t)ds->ds_object,
2513 dp->dp_spa->spa_name);
2514 }
2515 }
2516
2517 /*
2518 * dsl_scan_sync() should be called after this, and should sync
2519 * out our changed state, but just to be safe, do it here.
2520 */
2521 dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2522 }
2523
2524 static void
ds_snapshotted_bookmark(dsl_dataset_t * ds,zbookmark_phys_t * scn_bookmark)2525 ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
2526 {
2527 if (scn_bookmark->zb_objset == ds->ds_object) {
2528 scn_bookmark->zb_objset =
2529 dsl_dataset_phys(ds)->ds_prev_snap_obj;
2530 zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; "
2531 "reset zb_objset to %llu",
2532 (u_longlong_t)ds->ds_object,
2533 ds->ds_dir->dd_pool->dp_spa->spa_name,
2534 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
2535 }
2536 }
2537
2538 /*
2539 * Called when a dataset is snapshotted. If we were currently traversing
2540 * this snapshot, we reset our bookmark to point at the newly created
2541 * snapshot. We also modify our work queue to remove the old snapshot and
2542 * replace with the new one.
2543 */
2544 void
dsl_scan_ds_snapshotted(dsl_dataset_t * ds,dmu_tx_t * tx)2545 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
2546 {
2547 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2548 dsl_scan_t *scn = dp->dp_scan;
2549 uint64_t mintxg;
2550
2551 if (!dsl_scan_is_running(scn))
2552 return;
2553
2554 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
2555
2556 ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
2557 ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
2558
2559 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
2560 scan_ds_queue_remove(scn, ds->ds_object);
2561 scan_ds_queue_insert(scn,
2562 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
2563 }
2564
2565 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2566 ds->ds_object, &mintxg) == 0) {
2567 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2568 scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
2569 VERIFY(zap_add_int_key(dp->dp_meta_objset,
2570 scn->scn_phys.scn_queue_obj,
2571 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
2572 zfs_dbgmsg("snapshotting ds %llu on %s; in queue; "
2573 "replacing with %llu",
2574 (u_longlong_t)ds->ds_object,
2575 dp->dp_spa->spa_name,
2576 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
2577 }
2578
2579 dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2580 }
2581
2582 static void
ds_clone_swapped_bookmark(dsl_dataset_t * ds1,dsl_dataset_t * ds2,zbookmark_phys_t * scn_bookmark)2583 ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
2584 zbookmark_phys_t *scn_bookmark)
2585 {
2586 if (scn_bookmark->zb_objset == ds1->ds_object) {
2587 scn_bookmark->zb_objset = ds2->ds_object;
2588 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
2589 "reset zb_objset to %llu",
2590 (u_longlong_t)ds1->ds_object,
2591 ds1->ds_dir->dd_pool->dp_spa->spa_name,
2592 (u_longlong_t)ds2->ds_object);
2593 } else if (scn_bookmark->zb_objset == ds2->ds_object) {
2594 scn_bookmark->zb_objset = ds1->ds_object;
2595 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
2596 "reset zb_objset to %llu",
2597 (u_longlong_t)ds2->ds_object,
2598 ds2->ds_dir->dd_pool->dp_spa->spa_name,
2599 (u_longlong_t)ds1->ds_object);
2600 }
2601 }
2602
2603 /*
2604 * Called when an origin dataset and its clone are swapped. If we were
2605 * currently traversing the dataset, we need to switch to traversing the
2606 * newly promoted clone.
2607 */
2608 void
dsl_scan_ds_clone_swapped(dsl_dataset_t * ds1,dsl_dataset_t * ds2,dmu_tx_t * tx)2609 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
2610 {
2611 dsl_pool_t *dp = ds1->ds_dir->dd_pool;
2612 dsl_scan_t *scn = dp->dp_scan;
2613 uint64_t mintxg1, mintxg2;
2614 boolean_t ds1_queued, ds2_queued;
2615
2616 if (!dsl_scan_is_running(scn))
2617 return;
2618
2619 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
2620 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
2621
2622 /*
2623 * Handle the in-memory scan queue.
2624 */
2625 ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1);
2626 ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2);
2627
2628 /* Sanity checking. */
2629 if (ds1_queued) {
2630 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2631 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2632 }
2633 if (ds2_queued) {
2634 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2635 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2636 }
2637
2638 if (ds1_queued && ds2_queued) {
2639 /*
2640 * If both are queued, we don't need to do anything.
2641 * The swapping code below would not handle this case correctly,
2642 * since we can't insert ds2 if it is already there. That's
2643 * because scan_ds_queue_insert() prohibits a duplicate insert
2644 * and panics.
2645 */
2646 } else if (ds1_queued) {
2647 scan_ds_queue_remove(scn, ds1->ds_object);
2648 scan_ds_queue_insert(scn, ds2->ds_object, mintxg1);
2649 } else if (ds2_queued) {
2650 scan_ds_queue_remove(scn, ds2->ds_object);
2651 scan_ds_queue_insert(scn, ds1->ds_object, mintxg2);
2652 }
2653
2654 /*
2655 * Handle the on-disk scan queue.
2656 * The on-disk state is an out-of-date version of the in-memory state,
2657 * so the in-memory and on-disk values for ds1_queued and ds2_queued may
2658 * be different. Therefore we need to apply the swap logic to the
2659 * on-disk state independently of the in-memory state.
2660 */
2661 ds1_queued = zap_lookup_int_key(dp->dp_meta_objset,
2662 scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0;
2663 ds2_queued = zap_lookup_int_key(dp->dp_meta_objset,
2664 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0;
2665
2666 /* Sanity checking. */
2667 if (ds1_queued) {
2668 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2669 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2670 }
2671 if (ds2_queued) {
2672 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2673 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2674 }
2675
2676 if (ds1_queued && ds2_queued) {
2677 /*
2678 * If both are queued, we don't need to do anything.
2679 * Alternatively, we could check for EEXIST from
2680 * zap_add_int_key() and back out to the original state, but
2681 * that would be more work than checking for this case upfront.
2682 */
2683 } else if (ds1_queued) {
2684 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
2685 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
2686 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
2687 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx));
2688 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
2689 "replacing with %llu",
2690 (u_longlong_t)ds1->ds_object,
2691 dp->dp_spa->spa_name,
2692 (u_longlong_t)ds2->ds_object);
2693 } else if (ds2_queued) {
2694 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
2695 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
2696 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
2697 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx));
2698 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
2699 "replacing with %llu",
2700 (u_longlong_t)ds2->ds_object,
2701 dp->dp_spa->spa_name,
2702 (u_longlong_t)ds1->ds_object);
2703 }
2704
2705 dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2706 }
2707
2708 static int
enqueue_clones_cb(dsl_pool_t * dp,dsl_dataset_t * hds,void * arg)2709 enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
2710 {
2711 uint64_t originobj = *(uint64_t *)arg;
2712 dsl_dataset_t *ds;
2713 int err;
2714 dsl_scan_t *scn = dp->dp_scan;
2715
2716 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
2717 return (0);
2718
2719 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
2720 if (err)
2721 return (err);
2722
2723 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
2724 dsl_dataset_t *prev;
2725 err = dsl_dataset_hold_obj(dp,
2726 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2727
2728 dsl_dataset_rele(ds, FTAG);
2729 if (err)
2730 return (err);
2731 ds = prev;
2732 }
2733 mutex_enter(&scn->scn_queue_lock);
2734 scan_ds_queue_insert(scn, ds->ds_object,
2735 dsl_dataset_phys(ds)->ds_prev_snap_txg);
2736 mutex_exit(&scn->scn_queue_lock);
2737 dsl_dataset_rele(ds, FTAG);
2738 return (0);
2739 }
2740
2741 static void
dsl_scan_visitds(dsl_scan_t * scn,uint64_t dsobj,dmu_tx_t * tx)2742 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
2743 {
2744 dsl_pool_t *dp = scn->scn_dp;
2745 dsl_dataset_t *ds;
2746
2747 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
2748
2749 if (scn->scn_phys.scn_cur_min_txg >=
2750 scn->scn_phys.scn_max_txg) {
2751 /*
2752 * This can happen if this snapshot was created after the
2753 * scan started, and we already completed a previous snapshot
2754 * that was created after the scan started. This snapshot
2755 * only references blocks with:
2756 *
2757 * birth < our ds_creation_txg
2758 * cur_min_txg is no less than ds_creation_txg.
2759 * We have already visited these blocks.
2760 * or
2761 * birth > scn_max_txg
2762 * The scan requested not to visit these blocks.
2763 *
2764 * Subsequent snapshots (and clones) can reference our
2765 * blocks, or blocks with even higher birth times.
2766 * Therefore we do not need to visit them either,
2767 * so we do not add them to the work queue.
2768 *
2769 * Note that checking for cur_min_txg >= cur_max_txg
2770 * is not sufficient, because in that case we may need to
2771 * visit subsequent snapshots. This happens when min_txg > 0,
2772 * which raises cur_min_txg. In this case we will visit
2773 * this dataset but skip all of its blocks, because the
2774 * rootbp's birth time is < cur_min_txg. Then we will
2775 * add the next snapshots/clones to the work queue.
2776 */
2777 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
2778 dsl_dataset_name(ds, dsname);
2779 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
2780 "cur_min_txg (%llu) >= max_txg (%llu)",
2781 (longlong_t)dsobj, dsname,
2782 (longlong_t)scn->scn_phys.scn_cur_min_txg,
2783 (longlong_t)scn->scn_phys.scn_max_txg);
2784 kmem_free(dsname, MAXNAMELEN);
2785
2786 goto out;
2787 }
2788
2789 /*
2790 * Only the ZIL in the head (non-snapshot) is valid. Even though
2791 * snapshots can have ZIL block pointers (which may be the same
2792 * BP as in the head), they must be ignored. In addition, $ORIGIN
2793 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't
2794 * need to look for a ZIL in it either. So we traverse the ZIL here,
2795 * rather than in scan_recurse(), because the regular snapshot
2796 * block-sharing rules don't apply to it.
2797 */
2798 if (!dsl_dataset_is_snapshot(ds) &&
2799 (dp->dp_origin_snap == NULL ||
2800 ds->ds_dir != dp->dp_origin_snap->ds_dir)) {
2801 objset_t *os;
2802 if (dmu_objset_from_ds(ds, &os) != 0) {
2803 goto out;
2804 }
2805 dsl_scan_zil(dp, &os->os_zil_header);
2806 }
2807
2808 /*
2809 * Iterate over the bps in this ds.
2810 */
2811 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2812 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2813 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
2814 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2815
2816 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
2817 dsl_dataset_name(ds, dsname);
2818 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
2819 "suspending=%u",
2820 (longlong_t)dsobj, dsname,
2821 (longlong_t)scn->scn_phys.scn_cur_min_txg,
2822 (longlong_t)scn->scn_phys.scn_cur_max_txg,
2823 (int)scn->scn_suspending);
2824 kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
2825
2826 if (scn->scn_suspending)
2827 goto out;
2828
2829 /*
2830 * We've finished this pass over this dataset.
2831 */
2832
2833 /*
2834 * If we did not completely visit this dataset, do another pass.
2835 */
2836 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
2837 zfs_dbgmsg("incomplete pass on %s; visiting again",
2838 dp->dp_spa->spa_name);
2839 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
2840 scan_ds_queue_insert(scn, ds->ds_object,
2841 scn->scn_phys.scn_cur_max_txg);
2842 goto out;
2843 }
2844
2845 /*
2846 * Add descendant datasets to work queue.
2847 */
2848 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
2849 scan_ds_queue_insert(scn,
2850 dsl_dataset_phys(ds)->ds_next_snap_obj,
2851 dsl_dataset_phys(ds)->ds_creation_txg);
2852 }
2853 if (dsl_dataset_phys(ds)->ds_num_children > 1) {
2854 boolean_t usenext = B_FALSE;
2855 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
2856 uint64_t count;
2857 /*
2858 * A bug in a previous version of the code could
2859 * cause upgrade_clones_cb() to not set
2860 * ds_next_snap_obj when it should, leading to a
2861 * missing entry. Therefore we can only use the
2862 * next_clones_obj when its count is correct.
2863 */
2864 int err = zap_count(dp->dp_meta_objset,
2865 dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
2866 if (err == 0 &&
2867 count == dsl_dataset_phys(ds)->ds_num_children - 1)
2868 usenext = B_TRUE;
2869 }
2870
2871 if (usenext) {
2872 zap_cursor_t zc;
2873 zap_attribute_t za;
2874 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2875 dsl_dataset_phys(ds)->ds_next_clones_obj);
2876 zap_cursor_retrieve(&zc, &za) == 0;
2877 (void) zap_cursor_advance(&zc)) {
2878 scan_ds_queue_insert(scn,
2879 zfs_strtonum(za.za_name, NULL),
2880 dsl_dataset_phys(ds)->ds_creation_txg);
2881 }
2882 zap_cursor_fini(&zc);
2883 } else {
2884 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2885 enqueue_clones_cb, &ds->ds_object,
2886 DS_FIND_CHILDREN));
2887 }
2888 }
2889
2890 out:
2891 dsl_dataset_rele(ds, FTAG);
2892 }
2893
2894 static int
enqueue_cb(dsl_pool_t * dp,dsl_dataset_t * hds,void * arg)2895 enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
2896 {
2897 (void) arg;
2898 dsl_dataset_t *ds;
2899 int err;
2900 dsl_scan_t *scn = dp->dp_scan;
2901
2902 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
2903 if (err)
2904 return (err);
2905
2906 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
2907 dsl_dataset_t *prev;
2908 err = dsl_dataset_hold_obj(dp,
2909 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2910 if (err) {
2911 dsl_dataset_rele(ds, FTAG);
2912 return (err);
2913 }
2914
2915 /*
2916 * If this is a clone, we don't need to worry about it for now.
2917 */
2918 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
2919 dsl_dataset_rele(ds, FTAG);
2920 dsl_dataset_rele(prev, FTAG);
2921 return (0);
2922 }
2923 dsl_dataset_rele(ds, FTAG);
2924 ds = prev;
2925 }
2926
2927 mutex_enter(&scn->scn_queue_lock);
2928 scan_ds_queue_insert(scn, ds->ds_object,
2929 dsl_dataset_phys(ds)->ds_prev_snap_txg);
2930 mutex_exit(&scn->scn_queue_lock);
2931 dsl_dataset_rele(ds, FTAG);
2932 return (0);
2933 }
2934
2935 void
dsl_scan_ddt_entry(dsl_scan_t * scn,enum zio_checksum checksum,ddt_t * ddt,ddt_lightweight_entry_t * ddlwe,dmu_tx_t * tx)2936 dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
2937 ddt_t *ddt, ddt_lightweight_entry_t *ddlwe, dmu_tx_t *tx)
2938 {
2939 (void) tx;
2940 const ddt_key_t *ddk = &ddlwe->ddlwe_key;
2941 blkptr_t bp;
2942 zbookmark_phys_t zb = { 0 };
2943
2944 if (!dsl_scan_is_running(scn))
2945 return;
2946
2947 /*
2948 * This function is special because it is the only thing
2949 * that can add scan_io_t's to the vdev scan queues from
2950 * outside dsl_scan_sync(). For the most part this is ok
2951 * as long as it is called from within syncing context.
2952 * However, dsl_scan_sync() expects that no new sio's will
2953 * be added between when all the work for a scan is done
2954 * and the next txg when the scan is actually marked as
2955 * completed. This check ensures we do not issue new sio's
2956 * during this period.
2957 */
2958 if (scn->scn_done_txg != 0)
2959 return;
2960
2961 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
2962 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
2963 uint64_t phys_birth = ddt_phys_birth(&ddlwe->ddlwe_phys, v);
2964
2965 if (phys_birth == 0 || phys_birth > scn->scn_phys.scn_max_txg)
2966 continue;
2967 ddt_bp_create(checksum, ddk, &ddlwe->ddlwe_phys, v, &bp);
2968
2969 scn->scn_visited_this_txg++;
2970 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
2971 }
2972 }
2973
2974 /*
2975 * Scrub/dedup interaction.
2976 *
2977 * If there are N references to a deduped block, we don't want to scrub it
2978 * N times -- ideally, we should scrub it exactly once.
2979 *
2980 * We leverage the fact that the dde's replication class (ddt_class_t)
2981 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
2982 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
2983 *
2984 * To prevent excess scrubbing, the scrub begins by walking the DDT
2985 * to find all blocks with refcnt > 1, and scrubs each of these once.
2986 * Since there are two replication classes which contain blocks with
2987 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
2988 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
2989 *
2990 * There would be nothing more to say if a block's refcnt couldn't change
2991 * during a scrub, but of course it can so we must account for changes
2992 * in a block's replication class.
2993 *
2994 * Here's an example of what can occur:
2995 *
2996 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
2997 * when visited during the top-down scrub phase, it will be scrubbed twice.
2998 * This negates our scrub optimization, but is otherwise harmless.
2999 *
3000 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
3001 * on each visit during the top-down scrub phase, it will never be scrubbed.
3002 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
3003 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
3004 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
3005 * while a scrub is in progress, it scrubs the block right then.
3006 */
3007 static void
dsl_scan_ddt(dsl_scan_t * scn,dmu_tx_t * tx)3008 dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
3009 {
3010 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
3011 ddt_lightweight_entry_t ddlwe = {0};
3012 int error;
3013 uint64_t n = 0;
3014
3015 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &ddlwe)) == 0) {
3016 ddt_t *ddt;
3017
3018 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
3019 break;
3020 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
3021 (longlong_t)ddb->ddb_class,
3022 (longlong_t)ddb->ddb_type,
3023 (longlong_t)ddb->ddb_checksum,
3024 (longlong_t)ddb->ddb_cursor);
3025
3026 /* There should be no pending changes to the dedup table */
3027 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
3028 ASSERT(avl_first(&ddt->ddt_tree) == NULL);
3029
3030 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, ddt, &ddlwe, tx);
3031 n++;
3032
3033 if (dsl_scan_check_suspend(scn, NULL))
3034 break;
3035 }
3036
3037 if (error == EAGAIN) {
3038 dsl_scan_check_suspend(scn, NULL);
3039 error = 0;
3040
3041 zfs_dbgmsg("waiting for ddt to become ready for scan "
3042 "on %s with class_max = %u; suspending=%u",
3043 scn->scn_dp->dp_spa->spa_name,
3044 (int)scn->scn_phys.scn_ddt_class_max,
3045 (int)scn->scn_suspending);
3046 } else
3047 zfs_dbgmsg("scanned %llu ddt entries on %s with "
3048 "class_max = %u; suspending=%u", (longlong_t)n,
3049 scn->scn_dp->dp_spa->spa_name,
3050 (int)scn->scn_phys.scn_ddt_class_max,
3051 (int)scn->scn_suspending);
3052
3053 ASSERT(error == 0 || error == ENOENT);
3054 ASSERT(error != ENOENT ||
3055 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
3056 }
3057
3058 static uint64_t
dsl_scan_ds_maxtxg(dsl_dataset_t * ds)3059 dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
3060 {
3061 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
3062 if (ds->ds_is_snapshot)
3063 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
3064 return (smt);
3065 }
3066
3067 static void
dsl_scan_visit(dsl_scan_t * scn,dmu_tx_t * tx)3068 dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
3069 {
3070 scan_ds_t *sds;
3071 dsl_pool_t *dp = scn->scn_dp;
3072
3073 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
3074 scn->scn_phys.scn_ddt_class_max) {
3075 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
3076 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
3077 dsl_scan_ddt(scn, tx);
3078 if (scn->scn_suspending)
3079 return;
3080 }
3081
3082 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
3083 /* First do the MOS & ORIGIN */
3084
3085 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
3086 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
3087 dsl_scan_visit_rootbp(scn, NULL,
3088 &dp->dp_meta_rootbp, tx);
3089 if (scn->scn_suspending)
3090 return;
3091
3092 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
3093 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
3094 enqueue_cb, NULL, DS_FIND_CHILDREN));
3095 } else {
3096 dsl_scan_visitds(scn,
3097 dp->dp_origin_snap->ds_object, tx);
3098 }
3099 ASSERT(!scn->scn_suspending);
3100 } else if (scn->scn_phys.scn_bookmark.zb_objset !=
3101 ZB_DESTROYED_OBJSET) {
3102 uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
3103 /*
3104 * If we were suspended, continue from here. Note if the
3105 * ds we were suspended on was deleted, the zb_objset may
3106 * be -1, so we will skip this and find a new objset
3107 * below.
3108 */
3109 dsl_scan_visitds(scn, dsobj, tx);
3110 if (scn->scn_suspending)
3111 return;
3112 }
3113
3114 /*
3115 * In case we suspended right at the end of the ds, zero the
3116 * bookmark so we don't think that we're still trying to resume.
3117 */
3118 memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t));
3119
3120 /*
3121 * Keep pulling things out of the dataset avl queue. Updates to the
3122 * persistent zap-object-as-queue happen only at checkpoints.
3123 */
3124 while ((sds = avl_first(&scn->scn_queue)) != NULL) {
3125 dsl_dataset_t *ds;
3126 uint64_t dsobj = sds->sds_dsobj;
3127 uint64_t txg = sds->sds_txg;
3128
3129 /* dequeue and free the ds from the queue */
3130 scan_ds_queue_remove(scn, dsobj);
3131 sds = NULL;
3132
3133 /* set up min / max txg */
3134 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
3135 if (txg != 0) {
3136 scn->scn_phys.scn_cur_min_txg =
3137 MAX(scn->scn_phys.scn_min_txg, txg);
3138 } else {
3139 scn->scn_phys.scn_cur_min_txg =
3140 MAX(scn->scn_phys.scn_min_txg,
3141 dsl_dataset_phys(ds)->ds_prev_snap_txg);
3142 }
3143 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
3144 dsl_dataset_rele(ds, FTAG);
3145
3146 dsl_scan_visitds(scn, dsobj, tx);
3147 if (scn->scn_suspending)
3148 return;
3149 }
3150
3151 /* No more objsets to fetch, we're done */
3152 scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
3153 ASSERT0(scn->scn_suspending);
3154 }
3155
3156 static uint64_t
dsl_scan_count_data_disks(spa_t * spa)3157 dsl_scan_count_data_disks(spa_t *spa)
3158 {
3159 vdev_t *rvd = spa->spa_root_vdev;
3160 uint64_t i, leaves = 0;
3161
3162 for (i = 0; i < rvd->vdev_children; i++) {
3163 vdev_t *vd = rvd->vdev_child[i];
3164 if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache)
3165 continue;
3166 leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd);
3167 }
3168 return (leaves);
3169 }
3170
3171 static void
scan_io_queues_update_zio_stats(dsl_scan_io_queue_t * q,const blkptr_t * bp)3172 scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
3173 {
3174 int i;
3175 uint64_t cur_size = 0;
3176
3177 for (i = 0; i < BP_GET_NDVAS(bp); i++) {
3178 cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
3179 }
3180
3181 q->q_total_zio_size_this_txg += cur_size;
3182 q->q_zios_this_txg++;
3183 }
3184
3185 static void
scan_io_queues_update_seg_stats(dsl_scan_io_queue_t * q,uint64_t start,uint64_t end)3186 scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
3187 uint64_t end)
3188 {
3189 q->q_total_seg_size_this_txg += end - start;
3190 q->q_segs_this_txg++;
3191 }
3192
3193 static boolean_t
scan_io_queue_check_suspend(dsl_scan_t * scn)3194 scan_io_queue_check_suspend(dsl_scan_t *scn)
3195 {
3196 /* See comment in dsl_scan_check_suspend() */
3197 uint64_t curr_time_ns = gethrtime();
3198 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
3199 uint64_t sync_time_ns = curr_time_ns -
3200 scn->scn_dp->dp_spa->spa_sync_starttime;
3201 uint64_t dirty_min_bytes = zfs_dirty_data_max *
3202 zfs_vdev_async_write_active_min_dirty_percent / 100;
3203 uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
3204 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
3205
3206 return ((NSEC2MSEC(scan_time_ns) > mintime &&
3207 (scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
3208 txg_sync_waiting(scn->scn_dp) ||
3209 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
3210 spa_shutting_down(scn->scn_dp->dp_spa));
3211 }
3212
3213 /*
3214 * Given a list of scan_io_t's in io_list, this issues the I/Os out to
3215 * disk. This consumes the io_list and frees the scan_io_t's. This is
3216 * called when emptying queues, either when we're up against the memory
3217 * limit or when we have finished scanning. Returns B_TRUE if we stopped
3218 * processing the list before we finished. Any sios that were not issued
3219 * will remain in the io_list.
3220 */
3221 static boolean_t
scan_io_queue_issue(dsl_scan_io_queue_t * queue,list_t * io_list)3222 scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
3223 {
3224 dsl_scan_t *scn = queue->q_scn;
3225 scan_io_t *sio;
3226 boolean_t suspended = B_FALSE;
3227
3228 while ((sio = list_head(io_list)) != NULL) {
3229 blkptr_t bp;
3230
3231 if (scan_io_queue_check_suspend(scn)) {
3232 suspended = B_TRUE;
3233 break;
3234 }
3235
3236 sio2bp(sio, &bp);
3237 scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
3238 &sio->sio_zb, queue);
3239 (void) list_remove_head(io_list);
3240 scan_io_queues_update_zio_stats(queue, &bp);
3241 sio_free(sio);
3242 }
3243 return (suspended);
3244 }
3245
3246 /*
3247 * This function removes sios from an IO queue which reside within a given
3248 * range_seg_t and inserts them (in offset order) into a list. Note that
3249 * we only ever return a maximum of 32 sios at once. If there are more sios
3250 * to process within this segment that did not make it onto the list we
3251 * return B_TRUE and otherwise B_FALSE.
3252 */
3253 static boolean_t
scan_io_queue_gather(dsl_scan_io_queue_t * queue,range_seg_t * rs,list_t * list)3254 scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
3255 {
3256 scan_io_t *srch_sio, *sio, *next_sio;
3257 avl_index_t idx;
3258 uint_t num_sios = 0;
3259 int64_t bytes_issued = 0;
3260
3261 ASSERT(rs != NULL);
3262 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
3263
3264 srch_sio = sio_alloc(1);
3265 srch_sio->sio_nr_dvas = 1;
3266 SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr));
3267
3268 /*
3269 * The exact start of the extent might not contain any matching zios,
3270 * so if that's the case, examine the next one in the tree.
3271 */
3272 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
3273 sio_free(srch_sio);
3274
3275 if (sio == NULL)
3276 sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
3277
3278 while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
3279 queue->q_exts_by_addr) && num_sios <= 32) {
3280 ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs,
3281 queue->q_exts_by_addr));
3282 ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs,
3283 queue->q_exts_by_addr));
3284
3285 next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
3286 avl_remove(&queue->q_sios_by_addr, sio);
3287 if (avl_is_empty(&queue->q_sios_by_addr))
3288 atomic_add_64(&queue->q_scn->scn_queues_pending, -1);
3289 queue->q_sio_memused -= SIO_GET_MUSED(sio);
3290
3291 bytes_issued += SIO_GET_ASIZE(sio);
3292 num_sios++;
3293 list_insert_tail(list, sio);
3294 sio = next_sio;
3295 }
3296
3297 /*
3298 * We limit the number of sios we process at once to 32 to avoid
3299 * biting off more than we can chew. If we didn't take everything
3300 * in the segment we update it to reflect the work we were able to
3301 * complete. Otherwise, we remove it from the range tree entirely.
3302 */
3303 if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
3304 queue->q_exts_by_addr)) {
3305 range_tree_adjust_fill(queue->q_exts_by_addr, rs,
3306 -bytes_issued);
3307 range_tree_resize_segment(queue->q_exts_by_addr, rs,
3308 SIO_GET_OFFSET(sio), rs_get_end(rs,
3309 queue->q_exts_by_addr) - SIO_GET_OFFSET(sio));
3310 queue->q_last_ext_addr = SIO_GET_OFFSET(sio);
3311 return (B_TRUE);
3312 } else {
3313 uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr);
3314 uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr);
3315 range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart);
3316 queue->q_last_ext_addr = -1;
3317 return (B_FALSE);
3318 }
3319 }
3320
3321 /*
3322 * This is called from the queue emptying thread and selects the next
3323 * extent from which we are to issue I/Os. The behavior of this function
3324 * depends on the state of the scan, the current memory consumption and
3325 * whether or not we are performing a scan shutdown.
3326 * 1) We select extents in an elevator algorithm (LBA-order) if the scan
3327 * needs to perform a checkpoint
3328 * 2) We select the largest available extent if we are up against the
3329 * memory limit.
3330 * 3) Otherwise we don't select any extents.
3331 */
3332 static range_seg_t *
scan_io_queue_fetch_ext(dsl_scan_io_queue_t * queue)3333 scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
3334 {
3335 dsl_scan_t *scn = queue->q_scn;
3336 range_tree_t *rt = queue->q_exts_by_addr;
3337
3338 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
3339 ASSERT(scn->scn_is_sorted);
3340
3341 if (!scn->scn_checkpointing && !scn->scn_clearing)
3342 return (NULL);
3343
3344 /*
3345 * During normal clearing, we want to issue our largest segments
3346 * first, keeping IO as sequential as possible, and leaving the
3347 * smaller extents for later with the hope that they might eventually
3348 * grow to larger sequential segments. However, when the scan is
3349 * checkpointing, no new extents will be added to the sorting queue,
3350 * so the way we are sorted now is as good as it will ever get.
3351 * In this case, we instead switch to issuing extents in LBA order.
3352 */
3353 if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) ||
3354 zfs_scan_issue_strategy == 1)
3355 return (range_tree_first(rt));
3356
3357 /*
3358 * Try to continue previous extent if it is not completed yet. After
3359 * shrink in scan_io_queue_gather() it may no longer be the best, but
3360 * otherwise we leave shorter remnant every txg.
3361 */
3362 uint64_t start;
3363 uint64_t size = 1ULL << rt->rt_shift;
3364 range_seg_t *addr_rs;
3365 if (queue->q_last_ext_addr != -1) {
3366 start = queue->q_last_ext_addr;
3367 addr_rs = range_tree_find(rt, start, size);
3368 if (addr_rs != NULL)
3369 return (addr_rs);
3370 }
3371
3372 /*
3373 * Nothing to continue, so find new best extent.
3374 */
3375 uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL);
3376 if (v == NULL)
3377 return (NULL);
3378 queue->q_last_ext_addr = start = *v << rt->rt_shift;
3379
3380 /*
3381 * We need to get the original entry in the by_addr tree so we can
3382 * modify it.
3383 */
3384 addr_rs = range_tree_find(rt, start, size);
3385 ASSERT3P(addr_rs, !=, NULL);
3386 ASSERT3U(rs_get_start(addr_rs, rt), ==, start);
3387 ASSERT3U(rs_get_end(addr_rs, rt), >, start);
3388 return (addr_rs);
3389 }
3390
3391 static void
scan_io_queues_run_one(void * arg)3392 scan_io_queues_run_one(void *arg)
3393 {
3394 dsl_scan_io_queue_t *queue = arg;
3395 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
3396 boolean_t suspended = B_FALSE;
3397 range_seg_t *rs;
3398 scan_io_t *sio;
3399 zio_t *zio;
3400 list_t sio_list;
3401
3402 ASSERT(queue->q_scn->scn_is_sorted);
3403
3404 list_create(&sio_list, sizeof (scan_io_t),
3405 offsetof(scan_io_t, sio_nodes.sio_list_node));
3406 zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa,
3407 NULL, NULL, NULL, ZIO_FLAG_CANFAIL);
3408 mutex_enter(q_lock);
3409 queue->q_zio = zio;
3410
3411 /* Calculate maximum in-flight bytes for this vdev. */
3412 queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit *
3413 (vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd)));
3414
3415 /* reset per-queue scan statistics for this txg */
3416 queue->q_total_seg_size_this_txg = 0;
3417 queue->q_segs_this_txg = 0;
3418 queue->q_total_zio_size_this_txg = 0;
3419 queue->q_zios_this_txg = 0;
3420
3421 /* loop until we run out of time or sios */
3422 while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
3423 uint64_t seg_start = 0, seg_end = 0;
3424 boolean_t more_left;
3425
3426 ASSERT(list_is_empty(&sio_list));
3427
3428 /* loop while we still have sios left to process in this rs */
3429 do {
3430 scan_io_t *first_sio, *last_sio;
3431
3432 /*
3433 * We have selected which extent needs to be
3434 * processed next. Gather up the corresponding sios.
3435 */
3436 more_left = scan_io_queue_gather(queue, rs, &sio_list);
3437 ASSERT(!list_is_empty(&sio_list));
3438 first_sio = list_head(&sio_list);
3439 last_sio = list_tail(&sio_list);
3440
3441 seg_end = SIO_GET_END_OFFSET(last_sio);
3442 if (seg_start == 0)
3443 seg_start = SIO_GET_OFFSET(first_sio);
3444
3445 /*
3446 * Issuing sios can take a long time so drop the
3447 * queue lock. The sio queue won't be updated by
3448 * other threads since we're in syncing context so
3449 * we can be sure that our trees will remain exactly
3450 * as we left them.
3451 */
3452 mutex_exit(q_lock);
3453 suspended = scan_io_queue_issue(queue, &sio_list);
3454 mutex_enter(q_lock);
3455
3456 if (suspended)
3457 break;
3458 } while (more_left);
3459
3460 /* update statistics for debugging purposes */
3461 scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
3462
3463 if (suspended)
3464 break;
3465 }
3466
3467 /*
3468 * If we were suspended in the middle of processing,
3469 * requeue any unfinished sios and exit.
3470 */
3471 while ((sio = list_remove_head(&sio_list)) != NULL)
3472 scan_io_queue_insert_impl(queue, sio);
3473
3474 queue->q_zio = NULL;
3475 mutex_exit(q_lock);
3476 zio_nowait(zio);
3477 list_destroy(&sio_list);
3478 }
3479
3480 /*
3481 * Performs an emptying run on all scan queues in the pool. This just
3482 * punches out one thread per top-level vdev, each of which processes
3483 * only that vdev's scan queue. We can parallelize the I/O here because
3484 * we know that each queue's I/Os only affect its own top-level vdev.
3485 *
3486 * This function waits for the queue runs to complete, and must be
3487 * called from dsl_scan_sync (or in general, syncing context).
3488 */
3489 static void
scan_io_queues_run(dsl_scan_t * scn)3490 scan_io_queues_run(dsl_scan_t *scn)
3491 {
3492 spa_t *spa = scn->scn_dp->dp_spa;
3493
3494 ASSERT(scn->scn_is_sorted);
3495 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3496
3497 if (scn->scn_queues_pending == 0)
3498 return;
3499
3500 if (scn->scn_taskq == NULL) {
3501 int nthreads = spa->spa_root_vdev->vdev_children;
3502
3503 /*
3504 * We need to make this taskq *always* execute as many
3505 * threads in parallel as we have top-level vdevs and no
3506 * less, otherwise strange serialization of the calls to
3507 * scan_io_queues_run_one can occur during spa_sync runs
3508 * and that significantly impacts performance.
3509 */
3510 scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads,
3511 minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE);
3512 }
3513
3514 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
3515 vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
3516
3517 mutex_enter(&vd->vdev_scan_io_queue_lock);
3518 if (vd->vdev_scan_io_queue != NULL) {
3519 VERIFY(taskq_dispatch(scn->scn_taskq,
3520 scan_io_queues_run_one, vd->vdev_scan_io_queue,
3521 TQ_SLEEP) != TASKQID_INVALID);
3522 }
3523 mutex_exit(&vd->vdev_scan_io_queue_lock);
3524 }
3525
3526 /*
3527 * Wait for the queues to finish issuing their IOs for this run
3528 * before we return. There may still be IOs in flight at this
3529 * point.
3530 */
3531 taskq_wait(scn->scn_taskq);
3532 }
3533
3534 static boolean_t
dsl_scan_async_block_should_pause(dsl_scan_t * scn)3535 dsl_scan_async_block_should_pause(dsl_scan_t *scn)
3536 {
3537 uint64_t elapsed_nanosecs;
3538
3539 if (zfs_recover)
3540 return (B_FALSE);
3541
3542 if (zfs_async_block_max_blocks != 0 &&
3543 scn->scn_visited_this_txg >= zfs_async_block_max_blocks) {
3544 return (B_TRUE);
3545 }
3546
3547 if (zfs_max_async_dedup_frees != 0 &&
3548 scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) {
3549 return (B_TRUE);
3550 }
3551
3552 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
3553 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
3554 (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
3555 txg_sync_waiting(scn->scn_dp)) ||
3556 spa_shutting_down(scn->scn_dp->dp_spa));
3557 }
3558
3559 static int
dsl_scan_free_block_cb(void * arg,const blkptr_t * bp,dmu_tx_t * tx)3560 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
3561 {
3562 dsl_scan_t *scn = arg;
3563
3564 if (!scn->scn_is_bptree ||
3565 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
3566 if (dsl_scan_async_block_should_pause(scn))
3567 return (SET_ERROR(ERESTART));
3568 }
3569
3570 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
3571 dmu_tx_get_txg(tx), bp, 0));
3572 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
3573 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
3574 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
3575 scn->scn_visited_this_txg++;
3576 if (BP_GET_DEDUP(bp))
3577 scn->scn_dedup_frees_this_txg++;
3578 return (0);
3579 }
3580
3581 static void
dsl_scan_update_stats(dsl_scan_t * scn)3582 dsl_scan_update_stats(dsl_scan_t *scn)
3583 {
3584 spa_t *spa = scn->scn_dp->dp_spa;
3585 uint64_t i;
3586 uint64_t seg_size_total = 0, zio_size_total = 0;
3587 uint64_t seg_count_total = 0, zio_count_total = 0;
3588
3589 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
3590 vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
3591 dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
3592
3593 if (queue == NULL)
3594 continue;
3595
3596 seg_size_total += queue->q_total_seg_size_this_txg;
3597 zio_size_total += queue->q_total_zio_size_this_txg;
3598 seg_count_total += queue->q_segs_this_txg;
3599 zio_count_total += queue->q_zios_this_txg;
3600 }
3601
3602 if (seg_count_total == 0 || zio_count_total == 0) {
3603 scn->scn_avg_seg_size_this_txg = 0;
3604 scn->scn_avg_zio_size_this_txg = 0;
3605 scn->scn_segs_this_txg = 0;
3606 scn->scn_zios_this_txg = 0;
3607 return;
3608 }
3609
3610 scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
3611 scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
3612 scn->scn_segs_this_txg = seg_count_total;
3613 scn->scn_zios_this_txg = zio_count_total;
3614 }
3615
3616 static int
bpobj_dsl_scan_free_block_cb(void * arg,const blkptr_t * bp,boolean_t bp_freed,dmu_tx_t * tx)3617 bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
3618 dmu_tx_t *tx)
3619 {
3620 ASSERT(!bp_freed);
3621 return (dsl_scan_free_block_cb(arg, bp, tx));
3622 }
3623
3624 static int
dsl_scan_obsolete_block_cb(void * arg,const blkptr_t * bp,boolean_t bp_freed,dmu_tx_t * tx)3625 dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
3626 dmu_tx_t *tx)
3627 {
3628 ASSERT(!bp_freed);
3629 dsl_scan_t *scn = arg;
3630 const dva_t *dva = &bp->blk_dva[0];
3631
3632 if (dsl_scan_async_block_should_pause(scn))
3633 return (SET_ERROR(ERESTART));
3634
3635 spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
3636 DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
3637 DVA_GET_ASIZE(dva), tx);
3638 scn->scn_visited_this_txg++;
3639 return (0);
3640 }
3641
3642 boolean_t
dsl_scan_active(dsl_scan_t * scn)3643 dsl_scan_active(dsl_scan_t *scn)
3644 {
3645 spa_t *spa = scn->scn_dp->dp_spa;
3646 uint64_t used = 0, comp, uncomp;
3647 boolean_t clones_left;
3648
3649 if (spa->spa_load_state != SPA_LOAD_NONE)
3650 return (B_FALSE);
3651 if (spa_shutting_down(spa))
3652 return (B_FALSE);
3653 if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
3654 (scn->scn_async_destroying && !scn->scn_async_stalled))
3655 return (B_TRUE);
3656
3657 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
3658 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
3659 &used, &comp, &uncomp);
3660 }
3661 clones_left = spa_livelist_delete_check(spa);
3662 return ((used != 0) || (clones_left));
3663 }
3664
3665 boolean_t
dsl_errorscrub_active(dsl_scan_t * scn)3666 dsl_errorscrub_active(dsl_scan_t *scn)
3667 {
3668 spa_t *spa = scn->scn_dp->dp_spa;
3669 if (spa->spa_load_state != SPA_LOAD_NONE)
3670 return (B_FALSE);
3671 if (spa_shutting_down(spa))
3672 return (B_FALSE);
3673 if (dsl_errorscrubbing(scn->scn_dp))
3674 return (B_TRUE);
3675 return (B_FALSE);
3676 }
3677
3678 static boolean_t
dsl_scan_check_deferred(vdev_t * vd)3679 dsl_scan_check_deferred(vdev_t *vd)
3680 {
3681 boolean_t need_resilver = B_FALSE;
3682
3683 for (int c = 0; c < vd->vdev_children; c++) {
3684 need_resilver |=
3685 dsl_scan_check_deferred(vd->vdev_child[c]);
3686 }
3687
3688 if (!vdev_is_concrete(vd) || vd->vdev_aux ||
3689 !vd->vdev_ops->vdev_op_leaf)
3690 return (need_resilver);
3691
3692 if (!vd->vdev_resilver_deferred)
3693 need_resilver = B_TRUE;
3694
3695 return (need_resilver);
3696 }
3697
3698 static boolean_t
dsl_scan_need_resilver(spa_t * spa,const dva_t * dva,size_t psize,uint64_t phys_birth)3699 dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
3700 uint64_t phys_birth)
3701 {
3702 vdev_t *vd;
3703
3704 vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
3705
3706 if (vd->vdev_ops == &vdev_indirect_ops) {
3707 /*
3708 * The indirect vdev can point to multiple
3709 * vdevs. For simplicity, always create
3710 * the resilver zio_t. zio_vdev_io_start()
3711 * will bypass the child resilver i/o's if
3712 * they are on vdevs that don't have DTL's.
3713 */
3714 return (B_TRUE);
3715 }
3716
3717 if (DVA_GET_GANG(dva)) {
3718 /*
3719 * Gang members may be spread across multiple
3720 * vdevs, so the best estimate we have is the
3721 * scrub range, which has already been checked.
3722 * XXX -- it would be better to change our
3723 * allocation policy to ensure that all
3724 * gang members reside on the same vdev.
3725 */
3726 return (B_TRUE);
3727 }
3728
3729 /*
3730 * Check if the top-level vdev must resilver this offset.
3731 * When the offset does not intersect with a dirty leaf DTL
3732 * then it may be possible to skip the resilver IO. The psize
3733 * is provided instead of asize to simplify the check for RAIDZ.
3734 */
3735 if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth))
3736 return (B_FALSE);
3737
3738 /*
3739 * Check that this top-level vdev has a device under it which
3740 * is resilvering and is not deferred.
3741 */
3742 if (!dsl_scan_check_deferred(vd))
3743 return (B_FALSE);
3744
3745 return (B_TRUE);
3746 }
3747
3748 static int
dsl_process_async_destroys(dsl_pool_t * dp,dmu_tx_t * tx)3749 dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
3750 {
3751 dsl_scan_t *scn = dp->dp_scan;
3752 spa_t *spa = dp->dp_spa;
3753 int err = 0;
3754
3755 if (spa_suspend_async_destroy(spa))
3756 return (0);
3757
3758 if (zfs_free_bpobj_enabled &&
3759 spa_version(spa) >= SPA_VERSION_DEADLISTS) {
3760 scn->scn_is_bptree = B_FALSE;
3761 scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
3762 scn->scn_zio_root = zio_root(spa, NULL,
3763 NULL, ZIO_FLAG_MUSTSUCCEED);
3764 err = bpobj_iterate(&dp->dp_free_bpobj,
3765 bpobj_dsl_scan_free_block_cb, scn, tx);
3766 VERIFY0(zio_wait(scn->scn_zio_root));
3767 scn->scn_zio_root = NULL;
3768
3769 if (err != 0 && err != ERESTART)
3770 zfs_panic_recover("error %u from bpobj_iterate()", err);
3771 }
3772
3773 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
3774 ASSERT(scn->scn_async_destroying);
3775 scn->scn_is_bptree = B_TRUE;
3776 scn->scn_zio_root = zio_root(spa, NULL,
3777 NULL, ZIO_FLAG_MUSTSUCCEED);
3778 err = bptree_iterate(dp->dp_meta_objset,
3779 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
3780 VERIFY0(zio_wait(scn->scn_zio_root));
3781 scn->scn_zio_root = NULL;
3782
3783 if (err == EIO || err == ECKSUM) {
3784 err = 0;
3785 } else if (err != 0 && err != ERESTART) {
3786 zfs_panic_recover("error %u from "
3787 "traverse_dataset_destroyed()", err);
3788 }
3789
3790 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
3791 /* finished; deactivate async destroy feature */
3792 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
3793 ASSERT(!spa_feature_is_active(spa,
3794 SPA_FEATURE_ASYNC_DESTROY));
3795 VERIFY0(zap_remove(dp->dp_meta_objset,
3796 DMU_POOL_DIRECTORY_OBJECT,
3797 DMU_POOL_BPTREE_OBJ, tx));
3798 VERIFY0(bptree_free(dp->dp_meta_objset,
3799 dp->dp_bptree_obj, tx));
3800 dp->dp_bptree_obj = 0;
3801 scn->scn_async_destroying = B_FALSE;
3802 scn->scn_async_stalled = B_FALSE;
3803 } else {
3804 /*
3805 * If we didn't make progress, mark the async
3806 * destroy as stalled, so that we will not initiate
3807 * a spa_sync() on its behalf. Note that we only
3808 * check this if we are not finished, because if the
3809 * bptree had no blocks for us to visit, we can
3810 * finish without "making progress".
3811 */
3812 scn->scn_async_stalled =
3813 (scn->scn_visited_this_txg == 0);
3814 }
3815 }
3816 if (scn->scn_visited_this_txg) {
3817 zfs_dbgmsg("freed %llu blocks in %llums from "
3818 "free_bpobj/bptree on %s in txg %llu; err=%u",
3819 (longlong_t)scn->scn_visited_this_txg,
3820 (longlong_t)
3821 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
3822 spa->spa_name, (longlong_t)tx->tx_txg, err);
3823 scn->scn_visited_this_txg = 0;
3824 scn->scn_dedup_frees_this_txg = 0;
3825
3826 /*
3827 * Write out changes to the DDT and the BRT that may be required
3828 * as a result of the blocks freed. This ensures that the DDT
3829 * and the BRT are clean when a scrub/resilver runs.
3830 */
3831 ddt_sync(spa, tx->tx_txg);
3832 brt_sync(spa, tx->tx_txg);
3833 }
3834 if (err != 0)
3835 return (err);
3836 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
3837 zfs_free_leak_on_eio &&
3838 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
3839 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
3840 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
3841 /*
3842 * We have finished background destroying, but there is still
3843 * some space left in the dp_free_dir. Transfer this leaked
3844 * space to the dp_leak_dir.
3845 */
3846 if (dp->dp_leak_dir == NULL) {
3847 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
3848 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
3849 LEAK_DIR_NAME, tx);
3850 VERIFY0(dsl_pool_open_special_dir(dp,
3851 LEAK_DIR_NAME, &dp->dp_leak_dir));
3852 rrw_exit(&dp->dp_config_rwlock, FTAG);
3853 }
3854 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
3855 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
3856 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
3857 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
3858 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
3859 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
3860 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
3861 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
3862 }
3863
3864 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
3865 !spa_livelist_delete_check(spa)) {
3866 /* finished; verify that space accounting went to zero */
3867 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
3868 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
3869 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
3870 }
3871
3872 spa_notify_waiters(spa);
3873
3874 EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
3875 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3876 DMU_POOL_OBSOLETE_BPOBJ));
3877 if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
3878 ASSERT(spa_feature_is_active(dp->dp_spa,
3879 SPA_FEATURE_OBSOLETE_COUNTS));
3880
3881 scn->scn_is_bptree = B_FALSE;
3882 scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
3883 err = bpobj_iterate(&dp->dp_obsolete_bpobj,
3884 dsl_scan_obsolete_block_cb, scn, tx);
3885 if (err != 0 && err != ERESTART)
3886 zfs_panic_recover("error %u from bpobj_iterate()", err);
3887
3888 if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
3889 dsl_pool_destroy_obsolete_bpobj(dp, tx);
3890 }
3891 return (0);
3892 }
3893
3894 static void
name_to_bookmark(char * buf,zbookmark_phys_t * zb)3895 name_to_bookmark(char *buf, zbookmark_phys_t *zb)
3896 {
3897 zb->zb_objset = zfs_strtonum(buf, &buf);
3898 ASSERT(*buf == ':');
3899 zb->zb_object = zfs_strtonum(buf + 1, &buf);
3900 ASSERT(*buf == ':');
3901 zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
3902 ASSERT(*buf == ':');
3903 zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
3904 ASSERT(*buf == '\0');
3905 }
3906
3907 static void
name_to_object(char * buf,uint64_t * obj)3908 name_to_object(char *buf, uint64_t *obj)
3909 {
3910 *obj = zfs_strtonum(buf, &buf);
3911 ASSERT(*buf == '\0');
3912 }
3913
3914 static void
read_by_block_level(dsl_scan_t * scn,zbookmark_phys_t zb)3915 read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb)
3916 {
3917 dsl_pool_t *dp = scn->scn_dp;
3918 dsl_dataset_t *ds;
3919 objset_t *os;
3920 if (dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds) != 0)
3921 return;
3922
3923 if (dmu_objset_from_ds(ds, &os) != 0) {
3924 dsl_dataset_rele(ds, FTAG);
3925 return;
3926 }
3927
3928 /*
3929 * If the key is not loaded dbuf_dnode_findbp() will error out with
3930 * EACCES. However in that case dnode_hold() will eventually call
3931 * dbuf_read()->zio_wait() which may call spa_log_error(). This will
3932 * lead to a deadlock due to us holding the mutex spa_errlist_lock.
3933 * Avoid this by checking here if the keys are loaded, if not return.
3934 * If the keys are not loaded the head_errlog feature is meaningless
3935 * as we cannot figure out the birth txg of the block pointer.
3936 */
3937 if (dsl_dataset_get_keystatus(ds->ds_dir) ==
3938 ZFS_KEYSTATUS_UNAVAILABLE) {
3939 dsl_dataset_rele(ds, FTAG);
3940 return;
3941 }
3942
3943 dnode_t *dn;
3944 blkptr_t bp;
3945
3946 if (dnode_hold(os, zb.zb_object, FTAG, &dn) != 0) {
3947 dsl_dataset_rele(ds, FTAG);
3948 return;
3949 }
3950
3951 rw_enter(&dn->dn_struct_rwlock, RW_READER);
3952 int error = dbuf_dnode_findbp(dn, zb.zb_level, zb.zb_blkid, &bp, NULL,
3953 NULL);
3954
3955 if (error) {
3956 rw_exit(&dn->dn_struct_rwlock);
3957 dnode_rele(dn, FTAG);
3958 dsl_dataset_rele(ds, FTAG);
3959 return;
3960 }
3961
3962 if (!error && BP_IS_HOLE(&bp)) {
3963 rw_exit(&dn->dn_struct_rwlock);
3964 dnode_rele(dn, FTAG);
3965 dsl_dataset_rele(ds, FTAG);
3966 return;
3967 }
3968
3969 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW |
3970 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB;
3971
3972 /* If it's an intent log block, failure is expected. */
3973 if (zb.zb_level == ZB_ZIL_LEVEL)
3974 zio_flags |= ZIO_FLAG_SPECULATIVE;
3975
3976 ASSERT(!BP_IS_EMBEDDED(&bp));
3977 scan_exec_io(dp, &bp, zio_flags, &zb, NULL);
3978 rw_exit(&dn->dn_struct_rwlock);
3979 dnode_rele(dn, FTAG);
3980 dsl_dataset_rele(ds, FTAG);
3981 }
3982
3983 /*
3984 * We keep track of the scrubbed error blocks in "count". This will be used
3985 * when deciding whether we exceeded zfs_scrub_error_blocks_per_txg. This
3986 * function is modelled after check_filesystem().
3987 */
3988 static int
scrub_filesystem(spa_t * spa,uint64_t fs,zbookmark_err_phys_t * zep,int * count)3989 scrub_filesystem(spa_t *spa, uint64_t fs, zbookmark_err_phys_t *zep,
3990 int *count)
3991 {
3992 dsl_dataset_t *ds;
3993 dsl_pool_t *dp = spa->spa_dsl_pool;
3994 dsl_scan_t *scn = dp->dp_scan;
3995
3996 int error = dsl_dataset_hold_obj(dp, fs, FTAG, &ds);
3997 if (error != 0)
3998 return (error);
3999
4000 uint64_t latest_txg;
4001 uint64_t txg_to_consider = spa->spa_syncing_txg;
4002 boolean_t check_snapshot = B_TRUE;
4003
4004 error = find_birth_txg(ds, zep, &latest_txg);
4005
4006 /*
4007 * If find_birth_txg() errors out, then err on the side of caution and
4008 * proceed. In worst case scenario scrub all objects. If zep->zb_birth
4009 * is 0 (e.g. in case of encryption with unloaded keys) also proceed to
4010 * scrub all objects.
4011 */
4012 if (error == 0 && zep->zb_birth == latest_txg) {
4013 /* Block neither free nor re written. */
4014 zbookmark_phys_t zb;
4015 zep_to_zb(fs, zep, &zb);
4016 scn->scn_zio_root = zio_root(spa, NULL, NULL,
4017 ZIO_FLAG_CANFAIL);
4018 /* We have already acquired the config lock for spa */
4019 read_by_block_level(scn, zb);
4020
4021 (void) zio_wait(scn->scn_zio_root);
4022 scn->scn_zio_root = NULL;
4023
4024 scn->errorscrub_phys.dep_examined++;
4025 scn->errorscrub_phys.dep_to_examine--;
4026 (*count)++;
4027 if ((*count) == zfs_scrub_error_blocks_per_txg ||
4028 dsl_error_scrub_check_suspend(scn, &zb)) {
4029 dsl_dataset_rele(ds, FTAG);
4030 return (SET_ERROR(EFAULT));
4031 }
4032
4033 check_snapshot = B_FALSE;
4034 } else if (error == 0) {
4035 txg_to_consider = latest_txg;
4036 }
4037
4038 /*
4039 * Retrieve the number of snapshots if the dataset is not a snapshot.
4040 */
4041 uint64_t snap_count = 0;
4042 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
4043
4044 error = zap_count(spa->spa_meta_objset,
4045 dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
4046
4047 if (error != 0) {
4048 dsl_dataset_rele(ds, FTAG);
4049 return (error);
4050 }
4051 }
4052
4053 if (snap_count == 0) {
4054 /* Filesystem without snapshots. */
4055 dsl_dataset_rele(ds, FTAG);
4056 return (0);
4057 }
4058
4059 uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
4060 uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
4061
4062 dsl_dataset_rele(ds, FTAG);
4063
4064 /* Check only snapshots created from this file system. */
4065 while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
4066 snap_obj_txg <= txg_to_consider) {
4067
4068 error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds);
4069 if (error != 0)
4070 return (error);
4071
4072 if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != fs) {
4073 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
4074 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
4075 dsl_dataset_rele(ds, FTAG);
4076 continue;
4077 }
4078
4079 boolean_t affected = B_TRUE;
4080 if (check_snapshot) {
4081 uint64_t blk_txg;
4082 error = find_birth_txg(ds, zep, &blk_txg);
4083
4084 /*
4085 * Scrub the snapshot also when zb_birth == 0 or when
4086 * find_birth_txg() returns an error.
4087 */
4088 affected = (error == 0 && zep->zb_birth == blk_txg) ||
4089 (error != 0) || (zep->zb_birth == 0);
4090 }
4091
4092 /* Scrub snapshots. */
4093 if (affected) {
4094 zbookmark_phys_t zb;
4095 zep_to_zb(snap_obj, zep, &zb);
4096 scn->scn_zio_root = zio_root(spa, NULL, NULL,
4097 ZIO_FLAG_CANFAIL);
4098 /* We have already acquired the config lock for spa */
4099 read_by_block_level(scn, zb);
4100
4101 (void) zio_wait(scn->scn_zio_root);
4102 scn->scn_zio_root = NULL;
4103
4104 scn->errorscrub_phys.dep_examined++;
4105 scn->errorscrub_phys.dep_to_examine--;
4106 (*count)++;
4107 if ((*count) == zfs_scrub_error_blocks_per_txg ||
4108 dsl_error_scrub_check_suspend(scn, &zb)) {
4109 dsl_dataset_rele(ds, FTAG);
4110 return (EFAULT);
4111 }
4112 }
4113 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
4114 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
4115 dsl_dataset_rele(ds, FTAG);
4116 }
4117 return (0);
4118 }
4119
4120 void
dsl_errorscrub_sync(dsl_pool_t * dp,dmu_tx_t * tx)4121 dsl_errorscrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
4122 {
4123 spa_t *spa = dp->dp_spa;
4124 dsl_scan_t *scn = dp->dp_scan;
4125
4126 /*
4127 * Only process scans in sync pass 1.
4128 */
4129
4130 if (spa_sync_pass(spa) > 1)
4131 return;
4132
4133 /*
4134 * If the spa is shutting down, then stop scanning. This will
4135 * ensure that the scan does not dirty any new data during the
4136 * shutdown phase.
4137 */
4138 if (spa_shutting_down(spa))
4139 return;
4140
4141 if (!dsl_errorscrub_active(scn) || dsl_errorscrub_is_paused(scn)) {
4142 return;
4143 }
4144
4145 if (dsl_scan_resilvering(scn->scn_dp)) {
4146 /* cancel the error scrub if resilver started */
4147 dsl_scan_cancel(scn->scn_dp);
4148 return;
4149 }
4150
4151 spa->spa_scrub_active = B_TRUE;
4152 scn->scn_sync_start_time = gethrtime();
4153
4154 /*
4155 * zfs_scan_suspend_progress can be set to disable scrub progress.
4156 * See more detailed comment in dsl_scan_sync().
4157 */
4158 if (zfs_scan_suspend_progress) {
4159 uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
4160 int mintime = zfs_scrub_min_time_ms;
4161
4162 while (zfs_scan_suspend_progress &&
4163 !txg_sync_waiting(scn->scn_dp) &&
4164 !spa_shutting_down(scn->scn_dp->dp_spa) &&
4165 NSEC2MSEC(scan_time_ns) < mintime) {
4166 delay(hz);
4167 scan_time_ns = gethrtime() - scn->scn_sync_start_time;
4168 }
4169 return;
4170 }
4171
4172 int i = 0;
4173 zap_attribute_t *za;
4174 zbookmark_phys_t *zb;
4175 boolean_t limit_exceeded = B_FALSE;
4176
4177 za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
4178 zb = kmem_zalloc(sizeof (zbookmark_phys_t), KM_SLEEP);
4179
4180 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
4181 for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
4182 zap_cursor_advance(&scn->errorscrub_cursor)) {
4183 name_to_bookmark(za->za_name, zb);
4184
4185 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
4186 NULL, ZIO_FLAG_CANFAIL);
4187 dsl_pool_config_enter(dp, FTAG);
4188 read_by_block_level(scn, *zb);
4189 dsl_pool_config_exit(dp, FTAG);
4190
4191 (void) zio_wait(scn->scn_zio_root);
4192 scn->scn_zio_root = NULL;
4193
4194 scn->errorscrub_phys.dep_examined += 1;
4195 scn->errorscrub_phys.dep_to_examine -= 1;
4196 i++;
4197 if (i == zfs_scrub_error_blocks_per_txg ||
4198 dsl_error_scrub_check_suspend(scn, zb)) {
4199 limit_exceeded = B_TRUE;
4200 break;
4201 }
4202 }
4203
4204 if (!limit_exceeded)
4205 dsl_errorscrub_done(scn, B_TRUE, tx);
4206
4207 dsl_errorscrub_sync_state(scn, tx);
4208 kmem_free(za, sizeof (*za));
4209 kmem_free(zb, sizeof (*zb));
4210 return;
4211 }
4212
4213 int error = 0;
4214 for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
4215 zap_cursor_advance(&scn->errorscrub_cursor)) {
4216
4217 zap_cursor_t *head_ds_cursor;
4218 zap_attribute_t *head_ds_attr;
4219 zbookmark_err_phys_t head_ds_block;
4220
4221 head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
4222 head_ds_attr = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
4223
4224 uint64_t head_ds_err_obj = za->za_first_integer;
4225 uint64_t head_ds;
4226 name_to_object(za->za_name, &head_ds);
4227 boolean_t config_held = B_FALSE;
4228 uint64_t top_affected_fs;
4229
4230 for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset,
4231 head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor,
4232 head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) {
4233
4234 name_to_errphys(head_ds_attr->za_name, &head_ds_block);
4235
4236 /*
4237 * In case we are called from spa_sync the pool
4238 * config is already held.
4239 */
4240 if (!dsl_pool_config_held(dp)) {
4241 dsl_pool_config_enter(dp, FTAG);
4242 config_held = B_TRUE;
4243 }
4244
4245 error = find_top_affected_fs(spa,
4246 head_ds, &head_ds_block, &top_affected_fs);
4247 if (error)
4248 break;
4249
4250 error = scrub_filesystem(spa, top_affected_fs,
4251 &head_ds_block, &i);
4252
4253 if (error == SET_ERROR(EFAULT)) {
4254 limit_exceeded = B_TRUE;
4255 break;
4256 }
4257 }
4258
4259 zap_cursor_fini(head_ds_cursor);
4260 kmem_free(head_ds_cursor, sizeof (*head_ds_cursor));
4261 kmem_free(head_ds_attr, sizeof (*head_ds_attr));
4262
4263 if (config_held)
4264 dsl_pool_config_exit(dp, FTAG);
4265 }
4266
4267 kmem_free(za, sizeof (*za));
4268 kmem_free(zb, sizeof (*zb));
4269 if (!limit_exceeded)
4270 dsl_errorscrub_done(scn, B_TRUE, tx);
4271
4272 dsl_errorscrub_sync_state(scn, tx);
4273 }
4274
4275 /*
4276 * This is the primary entry point for scans that is called from syncing
4277 * context. Scans must happen entirely during syncing context so that we
4278 * can guarantee that blocks we are currently scanning will not change out
4279 * from under us. While a scan is active, this function controls how quickly
4280 * transaction groups proceed, instead of the normal handling provided by
4281 * txg_sync_thread().
4282 */
4283 void
dsl_scan_sync(dsl_pool_t * dp,dmu_tx_t * tx)4284 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
4285 {
4286 int err = 0;
4287 dsl_scan_t *scn = dp->dp_scan;
4288 spa_t *spa = dp->dp_spa;
4289 state_sync_type_t sync_type = SYNC_OPTIONAL;
4290
4291 if (spa->spa_resilver_deferred &&
4292 !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
4293 spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
4294
4295 /*
4296 * Check for scn_restart_txg before checking spa_load_state, so
4297 * that we can restart an old-style scan while the pool is being
4298 * imported (see dsl_scan_init). We also restart scans if there
4299 * is a deferred resilver and the user has manually disabled
4300 * deferred resilvers via the tunable.
4301 */
4302 if (dsl_scan_restarting(scn, tx) ||
4303 (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) {
4304 pool_scan_func_t func = POOL_SCAN_SCRUB;
4305 dsl_scan_done(scn, B_FALSE, tx);
4306 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
4307 func = POOL_SCAN_RESILVER;
4308 zfs_dbgmsg("restarting scan func=%u on %s txg=%llu",
4309 func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg);
4310 dsl_scan_setup_sync(&func, tx);
4311 }
4312
4313 /*
4314 * Only process scans in sync pass 1.
4315 */
4316 if (spa_sync_pass(spa) > 1)
4317 return;
4318
4319 /*
4320 * If the spa is shutting down, then stop scanning. This will
4321 * ensure that the scan does not dirty any new data during the
4322 * shutdown phase.
4323 */
4324 if (spa_shutting_down(spa))
4325 return;
4326
4327 /*
4328 * If the scan is inactive due to a stalled async destroy, try again.
4329 */
4330 if (!scn->scn_async_stalled && !dsl_scan_active(scn))
4331 return;
4332
4333 /* reset scan statistics */
4334 scn->scn_visited_this_txg = 0;
4335 scn->scn_dedup_frees_this_txg = 0;
4336 scn->scn_holes_this_txg = 0;
4337 scn->scn_lt_min_this_txg = 0;
4338 scn->scn_gt_max_this_txg = 0;
4339 scn->scn_ddt_contained_this_txg = 0;
4340 scn->scn_objsets_visited_this_txg = 0;
4341 scn->scn_avg_seg_size_this_txg = 0;
4342 scn->scn_segs_this_txg = 0;
4343 scn->scn_avg_zio_size_this_txg = 0;
4344 scn->scn_zios_this_txg = 0;
4345 scn->scn_suspending = B_FALSE;
4346 scn->scn_sync_start_time = gethrtime();
4347 spa->spa_scrub_active = B_TRUE;
4348
4349 /*
4350 * First process the async destroys. If we suspend, don't do
4351 * any scrubbing or resilvering. This ensures that there are no
4352 * async destroys while we are scanning, so the scan code doesn't
4353 * have to worry about traversing it. It is also faster to free the
4354 * blocks than to scrub them.
4355 */
4356 err = dsl_process_async_destroys(dp, tx);
4357 if (err != 0)
4358 return;
4359
4360 if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
4361 return;
4362
4363 /*
4364 * Wait a few txgs after importing to begin scanning so that
4365 * we can get the pool imported quickly.
4366 */
4367 if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
4368 return;
4369
4370 /*
4371 * zfs_scan_suspend_progress can be set to disable scan progress.
4372 * We don't want to spin the txg_sync thread, so we add a delay
4373 * here to simulate the time spent doing a scan. This is mostly
4374 * useful for testing and debugging.
4375 */
4376 if (zfs_scan_suspend_progress) {
4377 uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
4378 uint_t mintime = (scn->scn_phys.scn_func ==
4379 POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms :
4380 zfs_scrub_min_time_ms;
4381
4382 while (zfs_scan_suspend_progress &&
4383 !txg_sync_waiting(scn->scn_dp) &&
4384 !spa_shutting_down(scn->scn_dp->dp_spa) &&
4385 NSEC2MSEC(scan_time_ns) < mintime) {
4386 delay(hz);
4387 scan_time_ns = gethrtime() - scn->scn_sync_start_time;
4388 }
4389 return;
4390 }
4391
4392 /*
4393 * Disabled by default, set zfs_scan_report_txgs to report
4394 * average performance over the last zfs_scan_report_txgs TXGs.
4395 */
4396 if (zfs_scan_report_txgs != 0 &&
4397 tx->tx_txg % zfs_scan_report_txgs == 0) {
4398 scn->scn_issued_before_pass += spa->spa_scan_pass_issued;
4399 spa_scan_stat_init(spa);
4400 }
4401
4402 /*
4403 * It is possible to switch from unsorted to sorted at any time,
4404 * but afterwards the scan will remain sorted unless reloaded from
4405 * a checkpoint after a reboot.
4406 */
4407 if (!zfs_scan_legacy) {
4408 scn->scn_is_sorted = B_TRUE;
4409 if (scn->scn_last_checkpoint == 0)
4410 scn->scn_last_checkpoint = ddi_get_lbolt();
4411 }
4412
4413 /*
4414 * For sorted scans, determine what kind of work we will be doing
4415 * this txg based on our memory limitations and whether or not we
4416 * need to perform a checkpoint.
4417 */
4418 if (scn->scn_is_sorted) {
4419 /*
4420 * If we are over our checkpoint interval, set scn_clearing
4421 * so that we can begin checkpointing immediately. The
4422 * checkpoint allows us to save a consistent bookmark
4423 * representing how much data we have scrubbed so far.
4424 * Otherwise, use the memory limit to determine if we should
4425 * scan for metadata or start issue scrub IOs. We accumulate
4426 * metadata until we hit our hard memory limit at which point
4427 * we issue scrub IOs until we are at our soft memory limit.
4428 */
4429 if (scn->scn_checkpointing ||
4430 ddi_get_lbolt() - scn->scn_last_checkpoint >
4431 SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
4432 if (!scn->scn_checkpointing)
4433 zfs_dbgmsg("begin scan checkpoint for %s",
4434 spa->spa_name);
4435
4436 scn->scn_checkpointing = B_TRUE;
4437 scn->scn_clearing = B_TRUE;
4438 } else {
4439 boolean_t should_clear = dsl_scan_should_clear(scn);
4440 if (should_clear && !scn->scn_clearing) {
4441 zfs_dbgmsg("begin scan clearing for %s",
4442 spa->spa_name);
4443 scn->scn_clearing = B_TRUE;
4444 } else if (!should_clear && scn->scn_clearing) {
4445 zfs_dbgmsg("finish scan clearing for %s",
4446 spa->spa_name);
4447 scn->scn_clearing = B_FALSE;
4448 }
4449 }
4450 } else {
4451 ASSERT0(scn->scn_checkpointing);
4452 ASSERT0(scn->scn_clearing);
4453 }
4454
4455 if (!scn->scn_clearing && scn->scn_done_txg == 0) {
4456 /* Need to scan metadata for more blocks to scrub */
4457 dsl_scan_phys_t *scnp = &scn->scn_phys;
4458 taskqid_t prefetch_tqid;
4459
4460 /*
4461 * Calculate the max number of in-flight bytes for pool-wide
4462 * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
4463 * Limits for the issuing phase are done per top-level vdev and
4464 * are handled separately.
4465 */
4466 scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
4467 zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
4468
4469 if (scnp->scn_ddt_bookmark.ddb_class <=
4470 scnp->scn_ddt_class_max) {
4471 ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
4472 zfs_dbgmsg("doing scan sync for %s txg %llu; "
4473 "ddt bm=%llu/%llu/%llu/%llx",
4474 spa->spa_name,
4475 (longlong_t)tx->tx_txg,
4476 (longlong_t)scnp->scn_ddt_bookmark.ddb_class,
4477 (longlong_t)scnp->scn_ddt_bookmark.ddb_type,
4478 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
4479 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
4480 } else {
4481 zfs_dbgmsg("doing scan sync for %s txg %llu; "
4482 "bm=%llu/%llu/%llu/%llu",
4483 spa->spa_name,
4484 (longlong_t)tx->tx_txg,
4485 (longlong_t)scnp->scn_bookmark.zb_objset,
4486 (longlong_t)scnp->scn_bookmark.zb_object,
4487 (longlong_t)scnp->scn_bookmark.zb_level,
4488 (longlong_t)scnp->scn_bookmark.zb_blkid);
4489 }
4490
4491 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
4492 NULL, ZIO_FLAG_CANFAIL);
4493
4494 scn->scn_prefetch_stop = B_FALSE;
4495 prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
4496 dsl_scan_prefetch_thread, scn, TQ_SLEEP);
4497 ASSERT(prefetch_tqid != TASKQID_INVALID);
4498
4499 dsl_pool_config_enter(dp, FTAG);
4500 dsl_scan_visit(scn, tx);
4501 dsl_pool_config_exit(dp, FTAG);
4502
4503 mutex_enter(&dp->dp_spa->spa_scrub_lock);
4504 scn->scn_prefetch_stop = B_TRUE;
4505 cv_broadcast(&spa->spa_scrub_io_cv);
4506 mutex_exit(&dp->dp_spa->spa_scrub_lock);
4507
4508 taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
4509 (void) zio_wait(scn->scn_zio_root);
4510 scn->scn_zio_root = NULL;
4511
4512 zfs_dbgmsg("scan visited %llu blocks of %s in %llums "
4513 "(%llu os's, %llu holes, %llu < mintxg, "
4514 "%llu in ddt, %llu > maxtxg)",
4515 (longlong_t)scn->scn_visited_this_txg,
4516 spa->spa_name,
4517 (longlong_t)NSEC2MSEC(gethrtime() -
4518 scn->scn_sync_start_time),
4519 (longlong_t)scn->scn_objsets_visited_this_txg,
4520 (longlong_t)scn->scn_holes_this_txg,
4521 (longlong_t)scn->scn_lt_min_this_txg,
4522 (longlong_t)scn->scn_ddt_contained_this_txg,
4523 (longlong_t)scn->scn_gt_max_this_txg);
4524
4525 if (!scn->scn_suspending) {
4526 ASSERT0(avl_numnodes(&scn->scn_queue));
4527 scn->scn_done_txg = tx->tx_txg + 1;
4528 if (scn->scn_is_sorted) {
4529 scn->scn_checkpointing = B_TRUE;
4530 scn->scn_clearing = B_TRUE;
4531 scn->scn_issued_before_pass +=
4532 spa->spa_scan_pass_issued;
4533 spa_scan_stat_init(spa);
4534 }
4535 zfs_dbgmsg("scan complete for %s txg %llu",
4536 spa->spa_name,
4537 (longlong_t)tx->tx_txg);
4538 }
4539 } else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) {
4540 ASSERT(scn->scn_clearing);
4541
4542 /* need to issue scrubbing IOs from per-vdev queues */
4543 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
4544 NULL, ZIO_FLAG_CANFAIL);
4545 scan_io_queues_run(scn);
4546 (void) zio_wait(scn->scn_zio_root);
4547 scn->scn_zio_root = NULL;
4548
4549 /* calculate and dprintf the current memory usage */
4550 (void) dsl_scan_should_clear(scn);
4551 dsl_scan_update_stats(scn);
4552
4553 zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) "
4554 "in %llums (avg_block_size = %llu, avg_seg_size = %llu)",
4555 (longlong_t)scn->scn_zios_this_txg,
4556 spa->spa_name,
4557 (longlong_t)scn->scn_segs_this_txg,
4558 (longlong_t)NSEC2MSEC(gethrtime() -
4559 scn->scn_sync_start_time),
4560 (longlong_t)scn->scn_avg_zio_size_this_txg,
4561 (longlong_t)scn->scn_avg_seg_size_this_txg);
4562 } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
4563 /* Finished with everything. Mark the scrub as complete */
4564 zfs_dbgmsg("scan issuing complete txg %llu for %s",
4565 (longlong_t)tx->tx_txg,
4566 spa->spa_name);
4567 ASSERT3U(scn->scn_done_txg, !=, 0);
4568 ASSERT0(spa->spa_scrub_inflight);
4569 ASSERT0(scn->scn_queues_pending);
4570 dsl_scan_done(scn, B_TRUE, tx);
4571 sync_type = SYNC_MANDATORY;
4572 }
4573
4574 dsl_scan_sync_state(scn, tx, sync_type);
4575 }
4576
4577 static void
count_block_issued(spa_t * spa,const blkptr_t * bp,boolean_t all)4578 count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all)
4579 {
4580 /*
4581 * Don't count embedded bp's, since we already did the work of
4582 * scanning these when we scanned the containing block.
4583 */
4584 if (BP_IS_EMBEDDED(bp))
4585 return;
4586
4587 /*
4588 * Update the spa's stats on how many bytes we have issued.
4589 * Sequential scrubs create a zio for each DVA of the bp. Each
4590 * of these will include all DVAs for repair purposes, but the
4591 * zio code will only try the first one unless there is an issue.
4592 * Therefore, we should only count the first DVA for these IOs.
4593 */
4594 atomic_add_64(&spa->spa_scan_pass_issued,
4595 all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
4596 }
4597
4598 static void
count_block_skipped(dsl_scan_t * scn,const blkptr_t * bp,boolean_t all)4599 count_block_skipped(dsl_scan_t *scn, const blkptr_t *bp, boolean_t all)
4600 {
4601 if (BP_IS_EMBEDDED(bp))
4602 return;
4603 atomic_add_64(&scn->scn_phys.scn_skipped,
4604 all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
4605 }
4606
4607 static void
count_block(zfs_all_blkstats_t * zab,const blkptr_t * bp)4608 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
4609 {
4610 /*
4611 * If we resume after a reboot, zab will be NULL; don't record
4612 * incomplete stats in that case.
4613 */
4614 if (zab == NULL)
4615 return;
4616
4617 for (int i = 0; i < 4; i++) {
4618 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
4619 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
4620
4621 if (t & DMU_OT_NEWTYPE)
4622 t = DMU_OT_OTHER;
4623 zfs_blkstat_t *zb = &zab->zab_type[l][t];
4624 int equal;
4625
4626 zb->zb_count++;
4627 zb->zb_asize += BP_GET_ASIZE(bp);
4628 zb->zb_lsize += BP_GET_LSIZE(bp);
4629 zb->zb_psize += BP_GET_PSIZE(bp);
4630 zb->zb_gangs += BP_COUNT_GANG(bp);
4631
4632 switch (BP_GET_NDVAS(bp)) {
4633 case 2:
4634 if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
4635 DVA_GET_VDEV(&bp->blk_dva[1]))
4636 zb->zb_ditto_2_of_2_samevdev++;
4637 break;
4638 case 3:
4639 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
4640 DVA_GET_VDEV(&bp->blk_dva[1])) +
4641 (DVA_GET_VDEV(&bp->blk_dva[0]) ==
4642 DVA_GET_VDEV(&bp->blk_dva[2])) +
4643 (DVA_GET_VDEV(&bp->blk_dva[1]) ==
4644 DVA_GET_VDEV(&bp->blk_dva[2]));
4645 if (equal == 1)
4646 zb->zb_ditto_2_of_3_samevdev++;
4647 else if (equal == 3)
4648 zb->zb_ditto_3_of_3_samevdev++;
4649 break;
4650 }
4651 }
4652 }
4653
4654 static void
scan_io_queue_insert_impl(dsl_scan_io_queue_t * queue,scan_io_t * sio)4655 scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
4656 {
4657 avl_index_t idx;
4658 dsl_scan_t *scn = queue->q_scn;
4659
4660 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
4661
4662 if (unlikely(avl_is_empty(&queue->q_sios_by_addr)))
4663 atomic_add_64(&scn->scn_queues_pending, 1);
4664 if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
4665 /* block is already scheduled for reading */
4666 sio_free(sio);
4667 return;
4668 }
4669 avl_insert(&queue->q_sios_by_addr, sio, idx);
4670 queue->q_sio_memused += SIO_GET_MUSED(sio);
4671 range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio),
4672 SIO_GET_ASIZE(sio));
4673 }
4674
4675 /*
4676 * Given all the info we got from our metadata scanning process, we
4677 * construct a scan_io_t and insert it into the scan sorting queue. The
4678 * I/O must already be suitable for us to process. This is controlled
4679 * by dsl_scan_enqueue().
4680 */
4681 static void
scan_io_queue_insert(dsl_scan_io_queue_t * queue,const blkptr_t * bp,int dva_i,int zio_flags,const zbookmark_phys_t * zb)4682 scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
4683 int zio_flags, const zbookmark_phys_t *zb)
4684 {
4685 scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
4686
4687 ASSERT0(BP_IS_GANG(bp));
4688 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
4689
4690 bp2sio(bp, sio, dva_i);
4691 sio->sio_flags = zio_flags;
4692 sio->sio_zb = *zb;
4693
4694 queue->q_last_ext_addr = -1;
4695 scan_io_queue_insert_impl(queue, sio);
4696 }
4697
4698 /*
4699 * Given a set of I/O parameters as discovered by the metadata traversal
4700 * process, attempts to place the I/O into the sorted queues (if allowed),
4701 * or immediately executes the I/O.
4702 */
4703 static void
dsl_scan_enqueue(dsl_pool_t * dp,const blkptr_t * bp,int zio_flags,const zbookmark_phys_t * zb)4704 dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
4705 const zbookmark_phys_t *zb)
4706 {
4707 spa_t *spa = dp->dp_spa;
4708
4709 ASSERT(!BP_IS_EMBEDDED(bp));
4710
4711 /*
4712 * Gang blocks are hard to issue sequentially, so we just issue them
4713 * here immediately instead of queuing them.
4714 */
4715 if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
4716 scan_exec_io(dp, bp, zio_flags, zb, NULL);
4717 return;
4718 }
4719
4720 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
4721 dva_t dva;
4722 vdev_t *vdev;
4723
4724 dva = bp->blk_dva[i];
4725 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
4726 ASSERT(vdev != NULL);
4727
4728 mutex_enter(&vdev->vdev_scan_io_queue_lock);
4729 if (vdev->vdev_scan_io_queue == NULL)
4730 vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
4731 ASSERT(dp->dp_scan != NULL);
4732 scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
4733 i, zio_flags, zb);
4734 mutex_exit(&vdev->vdev_scan_io_queue_lock);
4735 }
4736 }
4737
4738 static int
dsl_scan_scrub_cb(dsl_pool_t * dp,const blkptr_t * bp,const zbookmark_phys_t * zb)4739 dsl_scan_scrub_cb(dsl_pool_t *dp,
4740 const blkptr_t *bp, const zbookmark_phys_t *zb)
4741 {
4742 dsl_scan_t *scn = dp->dp_scan;
4743 spa_t *spa = dp->dp_spa;
4744 uint64_t phys_birth = BP_GET_BIRTH(bp);
4745 size_t psize = BP_GET_PSIZE(bp);
4746 boolean_t needs_io = B_FALSE;
4747 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
4748
4749 count_block(dp->dp_blkstats, bp);
4750 if (phys_birth <= scn->scn_phys.scn_min_txg ||
4751 phys_birth >= scn->scn_phys.scn_max_txg) {
4752 count_block_skipped(scn, bp, B_TRUE);
4753 return (0);
4754 }
4755
4756 /* Embedded BP's have phys_birth==0, so we reject them above. */
4757 ASSERT(!BP_IS_EMBEDDED(bp));
4758
4759 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
4760 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
4761 zio_flags |= ZIO_FLAG_SCRUB;
4762 needs_io = B_TRUE;
4763 } else {
4764 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
4765 zio_flags |= ZIO_FLAG_RESILVER;
4766 needs_io = B_FALSE;
4767 }
4768
4769 /* If it's an intent log block, failure is expected. */
4770 if (zb->zb_level == ZB_ZIL_LEVEL)
4771 zio_flags |= ZIO_FLAG_SPECULATIVE;
4772
4773 for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
4774 const dva_t *dva = &bp->blk_dva[d];
4775
4776 /*
4777 * Keep track of how much data we've examined so that
4778 * zpool(8) status can make useful progress reports.
4779 */
4780 uint64_t asize = DVA_GET_ASIZE(dva);
4781 scn->scn_phys.scn_examined += asize;
4782 spa->spa_scan_pass_exam += asize;
4783
4784 /* if it's a resilver, this may not be in the target range */
4785 if (!needs_io)
4786 needs_io = dsl_scan_need_resilver(spa, dva, psize,
4787 phys_birth);
4788 }
4789
4790 if (needs_io && !zfs_no_scrub_io) {
4791 dsl_scan_enqueue(dp, bp, zio_flags, zb);
4792 } else {
4793 count_block_skipped(scn, bp, B_TRUE);
4794 }
4795
4796 /* do not relocate this block */
4797 return (0);
4798 }
4799
4800 static void
dsl_scan_scrub_done(zio_t * zio)4801 dsl_scan_scrub_done(zio_t *zio)
4802 {
4803 spa_t *spa = zio->io_spa;
4804 blkptr_t *bp = zio->io_bp;
4805 dsl_scan_io_queue_t *queue = zio->io_private;
4806
4807 abd_free(zio->io_abd);
4808
4809 if (queue == NULL) {
4810 mutex_enter(&spa->spa_scrub_lock);
4811 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
4812 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
4813 cv_broadcast(&spa->spa_scrub_io_cv);
4814 mutex_exit(&spa->spa_scrub_lock);
4815 } else {
4816 mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
4817 ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
4818 queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
4819 cv_broadcast(&queue->q_zio_cv);
4820 mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
4821 }
4822
4823 if (zio->io_error && (zio->io_error != ECKSUM ||
4824 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
4825 if (dsl_errorscrubbing(spa->spa_dsl_pool) &&
4826 !dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) {
4827 atomic_inc_64(&spa->spa_dsl_pool->dp_scan
4828 ->errorscrub_phys.dep_errors);
4829 } else {
4830 atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys
4831 .scn_errors);
4832 }
4833 }
4834 }
4835
4836 /*
4837 * Given a scanning zio's information, executes the zio. The zio need
4838 * not necessarily be only sortable, this function simply executes the
4839 * zio, no matter what it is. The optional queue argument allows the
4840 * caller to specify that they want per top level vdev IO rate limiting
4841 * instead of the legacy global limiting.
4842 */
4843 static void
scan_exec_io(dsl_pool_t * dp,const blkptr_t * bp,int zio_flags,const zbookmark_phys_t * zb,dsl_scan_io_queue_t * queue)4844 scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
4845 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
4846 {
4847 spa_t *spa = dp->dp_spa;
4848 dsl_scan_t *scn = dp->dp_scan;
4849 size_t size = BP_GET_PSIZE(bp);
4850 abd_t *data = abd_alloc_for_io(size, B_FALSE);
4851 zio_t *pio;
4852
4853 if (queue == NULL) {
4854 ASSERT3U(scn->scn_maxinflight_bytes, >, 0);
4855 mutex_enter(&spa->spa_scrub_lock);
4856 while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
4857 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
4858 spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
4859 mutex_exit(&spa->spa_scrub_lock);
4860 pio = scn->scn_zio_root;
4861 } else {
4862 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
4863
4864 ASSERT3U(queue->q_maxinflight_bytes, >, 0);
4865 mutex_enter(q_lock);
4866 while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
4867 cv_wait(&queue->q_zio_cv, q_lock);
4868 queue->q_inflight_bytes += BP_GET_PSIZE(bp);
4869 pio = queue->q_zio;
4870 mutex_exit(q_lock);
4871 }
4872
4873 ASSERT(pio != NULL);
4874 count_block_issued(spa, bp, queue == NULL);
4875 zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done,
4876 queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
4877 }
4878
4879 /*
4880 * This is the primary extent sorting algorithm. We balance two parameters:
4881 * 1) how many bytes of I/O are in an extent
4882 * 2) how well the extent is filled with I/O (as a fraction of its total size)
4883 * Since we allow extents to have gaps between their constituent I/Os, it's
4884 * possible to have a fairly large extent that contains the same amount of
4885 * I/O bytes than a much smaller extent, which just packs the I/O more tightly.
4886 * The algorithm sorts based on a score calculated from the extent's size,
4887 * the relative fill volume (in %) and a "fill weight" parameter that controls
4888 * the split between whether we prefer larger extents or more well populated
4889 * extents:
4890 *
4891 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
4892 *
4893 * Example:
4894 * 1) assume extsz = 64 MiB
4895 * 2) assume fill = 32 MiB (extent is half full)
4896 * 3) assume fill_weight = 3
4897 * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
4898 * SCORE = 32M + (50 * 3 * 32M) / 100
4899 * SCORE = 32M + (4800M / 100)
4900 * SCORE = 32M + 48M
4901 * ^ ^
4902 * | +--- final total relative fill-based score
4903 * +--------- final total fill-based score
4904 * SCORE = 80M
4905 *
4906 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
4907 * extents that are more completely filled (in a 3:2 ratio) vs just larger.
4908 * Note that as an optimization, we replace multiplication and division by
4909 * 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
4910 *
4911 * Since we do not care if one extent is only few percent better than another,
4912 * compress the score into 6 bits via binary logarithm AKA highbit64() and
4913 * put into otherwise unused due to ashift high bits of offset. This allows
4914 * to reduce q_exts_by_size B-tree elements to only 64 bits and compare them
4915 * with single operation. Plus it makes scrubs more sequential and reduces
4916 * chances that minor extent change move it within the B-tree.
4917 */
4918 __attribute__((always_inline)) inline
4919 static int
ext_size_compare(const void * x,const void * y)4920 ext_size_compare(const void *x, const void *y)
4921 {
4922 const uint64_t *a = x, *b = y;
4923
4924 return (TREE_CMP(*a, *b));
4925 }
4926
ZFS_BTREE_FIND_IN_BUF_FUNC(ext_size_find_in_buf,uint64_t,ext_size_compare)4927 ZFS_BTREE_FIND_IN_BUF_FUNC(ext_size_find_in_buf, uint64_t,
4928 ext_size_compare)
4929
4930 static void
4931 ext_size_create(range_tree_t *rt, void *arg)
4932 {
4933 (void) rt;
4934 zfs_btree_t *size_tree = arg;
4935
4936 zfs_btree_create(size_tree, ext_size_compare, ext_size_find_in_buf,
4937 sizeof (uint64_t));
4938 }
4939
4940 static void
ext_size_destroy(range_tree_t * rt,void * arg)4941 ext_size_destroy(range_tree_t *rt, void *arg)
4942 {
4943 (void) rt;
4944 zfs_btree_t *size_tree = arg;
4945 ASSERT0(zfs_btree_numnodes(size_tree));
4946
4947 zfs_btree_destroy(size_tree);
4948 }
4949
4950 static uint64_t
ext_size_value(range_tree_t * rt,range_seg_gap_t * rsg)4951 ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg)
4952 {
4953 (void) rt;
4954 uint64_t size = rsg->rs_end - rsg->rs_start;
4955 uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) *
4956 fill_weight * rsg->rs_fill) >> 7);
4957 ASSERT3U(rt->rt_shift, >=, 8);
4958 return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start);
4959 }
4960
4961 static void
ext_size_add(range_tree_t * rt,range_seg_t * rs,void * arg)4962 ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg)
4963 {
4964 zfs_btree_t *size_tree = arg;
4965 ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
4966 uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
4967 zfs_btree_add(size_tree, &v);
4968 }
4969
4970 static void
ext_size_remove(range_tree_t * rt,range_seg_t * rs,void * arg)4971 ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
4972 {
4973 zfs_btree_t *size_tree = arg;
4974 ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
4975 uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
4976 zfs_btree_remove(size_tree, &v);
4977 }
4978
4979 static void
ext_size_vacate(range_tree_t * rt,void * arg)4980 ext_size_vacate(range_tree_t *rt, void *arg)
4981 {
4982 zfs_btree_t *size_tree = arg;
4983 zfs_btree_clear(size_tree);
4984 zfs_btree_destroy(size_tree);
4985
4986 ext_size_create(rt, arg);
4987 }
4988
4989 static const range_tree_ops_t ext_size_ops = {
4990 .rtop_create = ext_size_create,
4991 .rtop_destroy = ext_size_destroy,
4992 .rtop_add = ext_size_add,
4993 .rtop_remove = ext_size_remove,
4994 .rtop_vacate = ext_size_vacate
4995 };
4996
4997 /*
4998 * Comparator for the q_sios_by_addr tree. Sorting is simply performed
4999 * based on LBA-order (from lowest to highest).
5000 */
5001 static int
sio_addr_compare(const void * x,const void * y)5002 sio_addr_compare(const void *x, const void *y)
5003 {
5004 const scan_io_t *a = x, *b = y;
5005
5006 return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b)));
5007 }
5008
5009 /* IO queues are created on demand when they are needed. */
5010 static dsl_scan_io_queue_t *
scan_io_queue_create(vdev_t * vd)5011 scan_io_queue_create(vdev_t *vd)
5012 {
5013 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
5014 dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
5015
5016 q->q_scn = scn;
5017 q->q_vd = vd;
5018 q->q_sio_memused = 0;
5019 q->q_last_ext_addr = -1;
5020 cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
5021 q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP,
5022 &q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap);
5023 avl_create(&q->q_sios_by_addr, sio_addr_compare,
5024 sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
5025
5026 return (q);
5027 }
5028
5029 /*
5030 * Destroys a scan queue and all segments and scan_io_t's contained in it.
5031 * No further execution of I/O occurs, anything pending in the queue is
5032 * simply freed without being executed.
5033 */
5034 void
dsl_scan_io_queue_destroy(dsl_scan_io_queue_t * queue)5035 dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
5036 {
5037 dsl_scan_t *scn = queue->q_scn;
5038 scan_io_t *sio;
5039 void *cookie = NULL;
5040
5041 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
5042
5043 if (!avl_is_empty(&queue->q_sios_by_addr))
5044 atomic_add_64(&scn->scn_queues_pending, -1);
5045 while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
5046 NULL) {
5047 ASSERT(range_tree_contains(queue->q_exts_by_addr,
5048 SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
5049 queue->q_sio_memused -= SIO_GET_MUSED(sio);
5050 sio_free(sio);
5051 }
5052
5053 ASSERT0(queue->q_sio_memused);
5054 range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
5055 range_tree_destroy(queue->q_exts_by_addr);
5056 avl_destroy(&queue->q_sios_by_addr);
5057 cv_destroy(&queue->q_zio_cv);
5058
5059 kmem_free(queue, sizeof (*queue));
5060 }
5061
5062 /*
5063 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
5064 * called on behalf of vdev_top_transfer when creating or destroying
5065 * a mirror vdev due to zpool attach/detach.
5066 */
5067 void
dsl_scan_io_queue_vdev_xfer(vdev_t * svd,vdev_t * tvd)5068 dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
5069 {
5070 mutex_enter(&svd->vdev_scan_io_queue_lock);
5071 mutex_enter(&tvd->vdev_scan_io_queue_lock);
5072
5073 VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
5074 tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
5075 svd->vdev_scan_io_queue = NULL;
5076 if (tvd->vdev_scan_io_queue != NULL)
5077 tvd->vdev_scan_io_queue->q_vd = tvd;
5078
5079 mutex_exit(&tvd->vdev_scan_io_queue_lock);
5080 mutex_exit(&svd->vdev_scan_io_queue_lock);
5081 }
5082
5083 static void
scan_io_queues_destroy(dsl_scan_t * scn)5084 scan_io_queues_destroy(dsl_scan_t *scn)
5085 {
5086 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
5087
5088 for (uint64_t i = 0; i < rvd->vdev_children; i++) {
5089 vdev_t *tvd = rvd->vdev_child[i];
5090
5091 mutex_enter(&tvd->vdev_scan_io_queue_lock);
5092 if (tvd->vdev_scan_io_queue != NULL)
5093 dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
5094 tvd->vdev_scan_io_queue = NULL;
5095 mutex_exit(&tvd->vdev_scan_io_queue_lock);
5096 }
5097 }
5098
5099 static void
dsl_scan_freed_dva(spa_t * spa,const blkptr_t * bp,int dva_i)5100 dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
5101 {
5102 dsl_pool_t *dp = spa->spa_dsl_pool;
5103 dsl_scan_t *scn = dp->dp_scan;
5104 vdev_t *vdev;
5105 kmutex_t *q_lock;
5106 dsl_scan_io_queue_t *queue;
5107 scan_io_t *srch_sio, *sio;
5108 avl_index_t idx;
5109 uint64_t start, size;
5110
5111 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
5112 ASSERT(vdev != NULL);
5113 q_lock = &vdev->vdev_scan_io_queue_lock;
5114 queue = vdev->vdev_scan_io_queue;
5115
5116 mutex_enter(q_lock);
5117 if (queue == NULL) {
5118 mutex_exit(q_lock);
5119 return;
5120 }
5121
5122 srch_sio = sio_alloc(BP_GET_NDVAS(bp));
5123 bp2sio(bp, srch_sio, dva_i);
5124 start = SIO_GET_OFFSET(srch_sio);
5125 size = SIO_GET_ASIZE(srch_sio);
5126
5127 /*
5128 * We can find the zio in two states:
5129 * 1) Cold, just sitting in the queue of zio's to be issued at
5130 * some point in the future. In this case, all we do is
5131 * remove the zio from the q_sios_by_addr tree, decrement
5132 * its data volume from the containing range_seg_t and
5133 * resort the q_exts_by_size tree to reflect that the
5134 * range_seg_t has lost some of its 'fill'. We don't shorten
5135 * the range_seg_t - this is usually rare enough not to be
5136 * worth the extra hassle of trying keep track of precise
5137 * extent boundaries.
5138 * 2) Hot, where the zio is currently in-flight in
5139 * dsl_scan_issue_ios. In this case, we can't simply
5140 * reach in and stop the in-flight zio's, so we instead
5141 * block the caller. Eventually, dsl_scan_issue_ios will
5142 * be done with issuing the zio's it gathered and will
5143 * signal us.
5144 */
5145 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
5146 sio_free(srch_sio);
5147
5148 if (sio != NULL) {
5149 blkptr_t tmpbp;
5150
5151 /* Got it while it was cold in the queue */
5152 ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
5153 ASSERT3U(size, ==, SIO_GET_ASIZE(sio));
5154 avl_remove(&queue->q_sios_by_addr, sio);
5155 if (avl_is_empty(&queue->q_sios_by_addr))
5156 atomic_add_64(&scn->scn_queues_pending, -1);
5157 queue->q_sio_memused -= SIO_GET_MUSED(sio);
5158
5159 ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
5160 range_tree_remove_fill(queue->q_exts_by_addr, start, size);
5161
5162 /* count the block as though we skipped it */
5163 sio2bp(sio, &tmpbp);
5164 count_block_skipped(scn, &tmpbp, B_FALSE);
5165
5166 sio_free(sio);
5167 }
5168 mutex_exit(q_lock);
5169 }
5170
5171 /*
5172 * Callback invoked when a zio_free() zio is executing. This needs to be
5173 * intercepted to prevent the zio from deallocating a particular portion
5174 * of disk space and it then getting reallocated and written to, while we
5175 * still have it queued up for processing.
5176 */
5177 void
dsl_scan_freed(spa_t * spa,const blkptr_t * bp)5178 dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
5179 {
5180 dsl_pool_t *dp = spa->spa_dsl_pool;
5181 dsl_scan_t *scn = dp->dp_scan;
5182
5183 ASSERT(!BP_IS_EMBEDDED(bp));
5184 ASSERT(scn != NULL);
5185 if (!dsl_scan_is_running(scn))
5186 return;
5187
5188 for (int i = 0; i < BP_GET_NDVAS(bp); i++)
5189 dsl_scan_freed_dva(spa, bp, i);
5190 }
5191
5192 /*
5193 * Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has
5194 * not started, start it. Otherwise, only restart if max txg in DTL range is
5195 * greater than the max txg in the current scan. If the DTL max is less than
5196 * the scan max, then the vdev has not missed any new data since the resilver
5197 * started, so a restart is not needed.
5198 */
5199 void
dsl_scan_assess_vdev(dsl_pool_t * dp,vdev_t * vd)5200 dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd)
5201 {
5202 uint64_t min, max;
5203
5204 if (!vdev_resilver_needed(vd, &min, &max))
5205 return;
5206
5207 if (!dsl_scan_resilvering(dp)) {
5208 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
5209 return;
5210 }
5211
5212 if (max <= dp->dp_scan->scn_phys.scn_max_txg)
5213 return;
5214
5215 /* restart is needed, check if it can be deferred */
5216 if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
5217 vdev_defer_resilver(vd);
5218 else
5219 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
5220 }
5221
5222 ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW,
5223 "Max bytes in flight per leaf vdev for scrubs and resilvers");
5224
5225 ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW,
5226 "Min millisecs to scrub per txg");
5227
5228 ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW,
5229 "Min millisecs to obsolete per txg");
5230
5231 ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW,
5232 "Min millisecs to free per txg");
5233
5234 ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW,
5235 "Min millisecs to resilver per txg");
5236
5237 ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
5238 "Set to prevent scans from progressing");
5239
5240 ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW,
5241 "Set to disable scrub I/O");
5242
5243 ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW,
5244 "Set to disable scrub prefetching");
5245
5246 ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW,
5247 "Max number of blocks freed in one txg");
5248
5249 ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW,
5250 "Max number of dedup blocks freed in one txg");
5251
5252 ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
5253 "Enable processing of the free_bpobj");
5254
5255 ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
5256 "Enable block statistics calculation during scrub");
5257
5258 ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW,
5259 "Fraction of RAM for scan hard limit");
5260
5261 ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW,
5262 "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
5263
5264 ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
5265 "Scrub using legacy non-sequential method");
5266
5267 ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW,
5268 "Scan progress on-disk checkpointing interval");
5269
5270 ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW,
5271 "Max gap in bytes between sequential scrub / resilver I/Os");
5272
5273 ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW,
5274 "Fraction of hard limit used as soft limit");
5275
5276 ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
5277 "Tunable to attempt to reduce lock contention");
5278
5279 ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW,
5280 "Tunable to adjust bias towards more filled segments during scans");
5281
5282 ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW,
5283 "Tunable to report resilver performance over the last N txgs");
5284
5285 ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
5286 "Process all resilvers immediately");
5287
5288 ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, UINT, ZMOD_RW,
5289 "Error blocks to be scrubbed in one txg");
5290 /* END CSTYLED */
5291