1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
26 * Copyright (c) 2019, 2023, 2024, Klara Inc.
27 * Copyright (c) 2019, Allan Jude
28 * Copyright (c) 2021, Datto, Inc.
29 */
30
31 #include <sys/sysmacros.h>
32 #include <sys/zfs_context.h>
33 #include <sys/fm/fs/zfs.h>
34 #include <sys/spa.h>
35 #include <sys/txg.h>
36 #include <sys/spa_impl.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/vdev_trim.h>
39 #include <sys/zio_impl.h>
40 #include <sys/zio_compress.h>
41 #include <sys/zio_checksum.h>
42 #include <sys/dmu_objset.h>
43 #include <sys/arc.h>
44 #include <sys/brt.h>
45 #include <sys/ddt.h>
46 #include <sys/blkptr.h>
47 #include <sys/zfeature.h>
48 #include <sys/dsl_scan.h>
49 #include <sys/metaslab_impl.h>
50 #include <sys/time.h>
51 #include <sys/trace_zfs.h>
52 #include <sys/abd.h>
53 #include <sys/dsl_crypt.h>
54 #include <cityhash.h>
55
56 /*
57 * ==========================================================================
58 * I/O type descriptions
59 * ==========================================================================
60 */
61 const char *const zio_type_name[ZIO_TYPES] = {
62 /*
63 * Note: Linux kernel thread name length is limited
64 * so these names will differ from upstream open zfs.
65 */
66 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim"
67 };
68
69 int zio_dva_throttle_enabled = B_TRUE;
70 static int zio_deadman_log_all = B_FALSE;
71
72 /*
73 * ==========================================================================
74 * I/O kmem caches
75 * ==========================================================================
76 */
77 static kmem_cache_t *zio_cache;
78 static kmem_cache_t *zio_link_cache;
79 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
80 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
81 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
82 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
83 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
84 #endif
85
86 /* Mark IOs as "slow" if they take longer than 30 seconds */
87 static uint_t zio_slow_io_ms = (30 * MILLISEC);
88
89 #define BP_SPANB(indblkshift, level) \
90 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
91 #define COMPARE_META_LEVEL 0x80000000ul
92 /*
93 * The following actions directly effect the spa's sync-to-convergence logic.
94 * The values below define the sync pass when we start performing the action.
95 * Care should be taken when changing these values as they directly impact
96 * spa_sync() performance. Tuning these values may introduce subtle performance
97 * pathologies and should only be done in the context of performance analysis.
98 * These tunables will eventually be removed and replaced with #defines once
99 * enough analysis has been done to determine optimal values.
100 *
101 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
102 * regular blocks are not deferred.
103 *
104 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
105 * compression (including of metadata). In practice, we don't have this
106 * many sync passes, so this has no effect.
107 *
108 * The original intent was that disabling compression would help the sync
109 * passes to converge. However, in practice disabling compression increases
110 * the average number of sync passes, because when we turn compression off, a
111 * lot of block's size will change and thus we have to re-allocate (not
112 * overwrite) them. It also increases the number of 128KB allocations (e.g.
113 * for indirect blocks and spacemaps) because these will not be compressed.
114 * The 128K allocations are especially detrimental to performance on highly
115 * fragmented systems, which may have very few free segments of this size,
116 * and may need to load new metaslabs to satisfy 128K allocations.
117 */
118
119 /* defer frees starting in this pass */
120 uint_t zfs_sync_pass_deferred_free = 2;
121
122 /* don't compress starting in this pass */
123 static uint_t zfs_sync_pass_dont_compress = 8;
124
125 /* rewrite new bps starting in this pass */
126 static uint_t zfs_sync_pass_rewrite = 2;
127
128 /*
129 * An allocating zio is one that either currently has the DVA allocate
130 * stage set or will have it later in its lifetime.
131 */
132 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
133
134 /*
135 * Enable smaller cores by excluding metadata
136 * allocations as well.
137 */
138 int zio_exclude_metadata = 0;
139 static int zio_requeue_io_start_cut_in_line = 1;
140
141 #ifdef ZFS_DEBUG
142 static const int zio_buf_debug_limit = 16384;
143 #else
144 static const int zio_buf_debug_limit = 0;
145 #endif
146
147 static inline void __zio_execute(zio_t *zio);
148
149 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
150
151 void
zio_init(void)152 zio_init(void)
153 {
154 size_t c;
155
156 zio_cache = kmem_cache_create("zio_cache",
157 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
158 zio_link_cache = kmem_cache_create("zio_link_cache",
159 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
160
161 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
162 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
163 size_t align, cflags, data_cflags;
164 char name[32];
165
166 /*
167 * Create cache for each half-power of 2 size, starting from
168 * SPA_MINBLOCKSIZE. It should give us memory space efficiency
169 * of ~7/8, sufficient for transient allocations mostly using
170 * these caches.
171 */
172 size_t p2 = size;
173 while (!ISP2(p2))
174 p2 &= p2 - 1;
175 if (!IS_P2ALIGNED(size, p2 / 2))
176 continue;
177
178 #ifndef _KERNEL
179 /*
180 * If we are using watchpoints, put each buffer on its own page,
181 * to eliminate the performance overhead of trapping to the
182 * kernel when modifying a non-watched buffer that shares the
183 * page with a watched buffer.
184 */
185 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
186 continue;
187 #endif
188
189 if (IS_P2ALIGNED(size, PAGESIZE))
190 align = PAGESIZE;
191 else
192 align = 1 << (highbit64(size ^ (size - 1)) - 1);
193
194 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
195 KMC_NODEBUG : 0;
196 data_cflags = KMC_NODEBUG;
197 if (cflags == data_cflags) {
198 /*
199 * Resulting kmem caches would be identical.
200 * Save memory by creating only one.
201 */
202 (void) snprintf(name, sizeof (name),
203 "zio_buf_comb_%lu", (ulong_t)size);
204 zio_buf_cache[c] = kmem_cache_create(name, size, align,
205 NULL, NULL, NULL, NULL, NULL, cflags);
206 zio_data_buf_cache[c] = zio_buf_cache[c];
207 continue;
208 }
209 (void) snprintf(name, sizeof (name), "zio_buf_%lu",
210 (ulong_t)size);
211 zio_buf_cache[c] = kmem_cache_create(name, size, align,
212 NULL, NULL, NULL, NULL, NULL, cflags);
213
214 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
215 (ulong_t)size);
216 zio_data_buf_cache[c] = kmem_cache_create(name, size, align,
217 NULL, NULL, NULL, NULL, NULL, data_cflags);
218 }
219
220 while (--c != 0) {
221 ASSERT(zio_buf_cache[c] != NULL);
222 if (zio_buf_cache[c - 1] == NULL)
223 zio_buf_cache[c - 1] = zio_buf_cache[c];
224
225 ASSERT(zio_data_buf_cache[c] != NULL);
226 if (zio_data_buf_cache[c - 1] == NULL)
227 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
228 }
229
230 zio_inject_init();
231
232 lz4_init();
233 }
234
235 void
zio_fini(void)236 zio_fini(void)
237 {
238 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
239
240 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
241 for (size_t i = 0; i < n; i++) {
242 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
243 (void) printf("zio_fini: [%d] %llu != %llu\n",
244 (int)((i + 1) << SPA_MINBLOCKSHIFT),
245 (long long unsigned)zio_buf_cache_allocs[i],
246 (long long unsigned)zio_buf_cache_frees[i]);
247 }
248 #endif
249
250 /*
251 * The same kmem cache can show up multiple times in both zio_buf_cache
252 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
253 * sort it out.
254 */
255 for (size_t i = 0; i < n; i++) {
256 kmem_cache_t *cache = zio_buf_cache[i];
257 if (cache == NULL)
258 continue;
259 for (size_t j = i; j < n; j++) {
260 if (cache == zio_buf_cache[j])
261 zio_buf_cache[j] = NULL;
262 if (cache == zio_data_buf_cache[j])
263 zio_data_buf_cache[j] = NULL;
264 }
265 kmem_cache_destroy(cache);
266 }
267
268 for (size_t i = 0; i < n; i++) {
269 kmem_cache_t *cache = zio_data_buf_cache[i];
270 if (cache == NULL)
271 continue;
272 for (size_t j = i; j < n; j++) {
273 if (cache == zio_data_buf_cache[j])
274 zio_data_buf_cache[j] = NULL;
275 }
276 kmem_cache_destroy(cache);
277 }
278
279 for (size_t i = 0; i < n; i++) {
280 VERIFY3P(zio_buf_cache[i], ==, NULL);
281 VERIFY3P(zio_data_buf_cache[i], ==, NULL);
282 }
283
284 kmem_cache_destroy(zio_link_cache);
285 kmem_cache_destroy(zio_cache);
286
287 zio_inject_fini();
288
289 lz4_fini();
290 }
291
292 /*
293 * ==========================================================================
294 * Allocate and free I/O buffers
295 * ==========================================================================
296 */
297
298 #ifdef ZFS_DEBUG
299 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
300 #endif
301
302 /*
303 * Use empty space after the buffer to detect overflows.
304 *
305 * Since zio_init() creates kmem caches only for certain set of buffer sizes,
306 * allocations of different sizes may have some unused space after the data.
307 * Filling part of that space with a known pattern on allocation and checking
308 * it on free should allow us to detect some buffer overflows.
309 */
310 static void
zio_buf_put_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)311 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
312 {
313 #ifdef ZFS_DEBUG
314 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
315 ulong_t *canary = p + off / sizeof (ulong_t);
316 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
317 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
318 cache[c] == cache[c + 1])
319 asize = (c + 2) << SPA_MINBLOCKSHIFT;
320 for (; off < asize; canary++, off += sizeof (ulong_t))
321 *canary = zio_buf_canary;
322 #endif
323 }
324
325 static void
zio_buf_check_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)326 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
327 {
328 #ifdef ZFS_DEBUG
329 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
330 ulong_t *canary = p + off / sizeof (ulong_t);
331 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
332 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
333 cache[c] == cache[c + 1])
334 asize = (c + 2) << SPA_MINBLOCKSHIFT;
335 for (; off < asize; canary++, off += sizeof (ulong_t)) {
336 if (unlikely(*canary != zio_buf_canary)) {
337 PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
338 p, size, (canary - p) * sizeof (ulong_t),
339 *canary, zio_buf_canary);
340 }
341 }
342 #endif
343 }
344
345 /*
346 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
347 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
348 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
349 * excess / transient data in-core during a crashdump.
350 */
351 void *
zio_buf_alloc(size_t size)352 zio_buf_alloc(size_t size)
353 {
354 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
355
356 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
357 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
358 atomic_add_64(&zio_buf_cache_allocs[c], 1);
359 #endif
360
361 void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
362 zio_buf_put_canary(p, size, zio_buf_cache, c);
363 return (p);
364 }
365
366 /*
367 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
368 * crashdump if the kernel panics. This exists so that we will limit the amount
369 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
370 * of kernel heap dumped to disk when the kernel panics)
371 */
372 void *
zio_data_buf_alloc(size_t size)373 zio_data_buf_alloc(size_t size)
374 {
375 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
376
377 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
378
379 void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
380 zio_buf_put_canary(p, size, zio_data_buf_cache, c);
381 return (p);
382 }
383
384 void
zio_buf_free(void * buf,size_t size)385 zio_buf_free(void *buf, size_t size)
386 {
387 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
388
389 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
390 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
391 atomic_add_64(&zio_buf_cache_frees[c], 1);
392 #endif
393
394 zio_buf_check_canary(buf, size, zio_buf_cache, c);
395 kmem_cache_free(zio_buf_cache[c], buf);
396 }
397
398 void
zio_data_buf_free(void * buf,size_t size)399 zio_data_buf_free(void *buf, size_t size)
400 {
401 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
402
403 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
404
405 zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
406 kmem_cache_free(zio_data_buf_cache[c], buf);
407 }
408
409 static void
zio_abd_free(void * abd,size_t size)410 zio_abd_free(void *abd, size_t size)
411 {
412 (void) size;
413 abd_free((abd_t *)abd);
414 }
415
416 /*
417 * ==========================================================================
418 * Push and pop I/O transform buffers
419 * ==========================================================================
420 */
421 void
zio_push_transform(zio_t * zio,abd_t * data,uint64_t size,uint64_t bufsize,zio_transform_func_t * transform)422 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
423 zio_transform_func_t *transform)
424 {
425 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
426
427 zt->zt_orig_abd = zio->io_abd;
428 zt->zt_orig_size = zio->io_size;
429 zt->zt_bufsize = bufsize;
430 zt->zt_transform = transform;
431
432 zt->zt_next = zio->io_transform_stack;
433 zio->io_transform_stack = zt;
434
435 zio->io_abd = data;
436 zio->io_size = size;
437 }
438
439 void
zio_pop_transforms(zio_t * zio)440 zio_pop_transforms(zio_t *zio)
441 {
442 zio_transform_t *zt;
443
444 while ((zt = zio->io_transform_stack) != NULL) {
445 if (zt->zt_transform != NULL)
446 zt->zt_transform(zio,
447 zt->zt_orig_abd, zt->zt_orig_size);
448
449 if (zt->zt_bufsize != 0)
450 abd_free(zio->io_abd);
451
452 zio->io_abd = zt->zt_orig_abd;
453 zio->io_size = zt->zt_orig_size;
454 zio->io_transform_stack = zt->zt_next;
455
456 kmem_free(zt, sizeof (zio_transform_t));
457 }
458 }
459
460 /*
461 * ==========================================================================
462 * I/O transform callbacks for subblocks, decompression, and decryption
463 * ==========================================================================
464 */
465 static void
zio_subblock(zio_t * zio,abd_t * data,uint64_t size)466 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
467 {
468 ASSERT(zio->io_size > size);
469
470 if (zio->io_type == ZIO_TYPE_READ)
471 abd_copy(data, zio->io_abd, size);
472 }
473
474 static void
zio_decompress(zio_t * zio,abd_t * data,uint64_t size)475 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
476 {
477 if (zio->io_error == 0) {
478 void *tmp = abd_borrow_buf(data, size);
479 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
480 zio->io_abd, tmp, zio->io_size, size,
481 &zio->io_prop.zp_complevel);
482 abd_return_buf_copy(data, tmp, size);
483
484 if (zio_injection_enabled && ret == 0)
485 ret = zio_handle_fault_injection(zio, EINVAL);
486
487 if (ret != 0)
488 zio->io_error = SET_ERROR(EIO);
489 }
490 }
491
492 static void
zio_decrypt(zio_t * zio,abd_t * data,uint64_t size)493 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
494 {
495 int ret;
496 void *tmp;
497 blkptr_t *bp = zio->io_bp;
498 spa_t *spa = zio->io_spa;
499 uint64_t dsobj = zio->io_bookmark.zb_objset;
500 uint64_t lsize = BP_GET_LSIZE(bp);
501 dmu_object_type_t ot = BP_GET_TYPE(bp);
502 uint8_t salt[ZIO_DATA_SALT_LEN];
503 uint8_t iv[ZIO_DATA_IV_LEN];
504 uint8_t mac[ZIO_DATA_MAC_LEN];
505 boolean_t no_crypt = B_FALSE;
506
507 ASSERT(BP_USES_CRYPT(bp));
508 ASSERT3U(size, !=, 0);
509
510 if (zio->io_error != 0)
511 return;
512
513 /*
514 * Verify the cksum of MACs stored in an indirect bp. It will always
515 * be possible to verify this since it does not require an encryption
516 * key.
517 */
518 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
519 zio_crypt_decode_mac_bp(bp, mac);
520
521 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
522 /*
523 * We haven't decompressed the data yet, but
524 * zio_crypt_do_indirect_mac_checksum() requires
525 * decompressed data to be able to parse out the MACs
526 * from the indirect block. We decompress it now and
527 * throw away the result after we are finished.
528 */
529 tmp = zio_buf_alloc(lsize);
530 ret = zio_decompress_data(BP_GET_COMPRESS(bp),
531 zio->io_abd, tmp, zio->io_size, lsize,
532 &zio->io_prop.zp_complevel);
533 if (ret != 0) {
534 ret = SET_ERROR(EIO);
535 goto error;
536 }
537 ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
538 tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
539 zio_buf_free(tmp, lsize);
540 } else {
541 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
542 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
543 }
544 abd_copy(data, zio->io_abd, size);
545
546 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
547 ret = zio_handle_decrypt_injection(spa,
548 &zio->io_bookmark, ot, ECKSUM);
549 }
550 if (ret != 0)
551 goto error;
552
553 return;
554 }
555
556 /*
557 * If this is an authenticated block, just check the MAC. It would be
558 * nice to separate this out into its own flag, but when this was done,
559 * we had run out of bits in what is now zio_flag_t. Future cleanup
560 * could make this a flag bit.
561 */
562 if (BP_IS_AUTHENTICATED(bp)) {
563 if (ot == DMU_OT_OBJSET) {
564 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
565 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
566 } else {
567 zio_crypt_decode_mac_bp(bp, mac);
568 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
569 zio->io_abd, size, mac);
570 if (zio_injection_enabled && ret == 0) {
571 ret = zio_handle_decrypt_injection(spa,
572 &zio->io_bookmark, ot, ECKSUM);
573 }
574 }
575 abd_copy(data, zio->io_abd, size);
576
577 if (ret != 0)
578 goto error;
579
580 return;
581 }
582
583 zio_crypt_decode_params_bp(bp, salt, iv);
584
585 if (ot == DMU_OT_INTENT_LOG) {
586 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
587 zio_crypt_decode_mac_zil(tmp, mac);
588 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
589 } else {
590 zio_crypt_decode_mac_bp(bp, mac);
591 }
592
593 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
594 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
595 zio->io_abd, &no_crypt);
596 if (no_crypt)
597 abd_copy(data, zio->io_abd, size);
598
599 if (ret != 0)
600 goto error;
601
602 return;
603
604 error:
605 /* assert that the key was found unless this was speculative */
606 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
607
608 /*
609 * If there was a decryption / authentication error return EIO as
610 * the io_error. If this was not a speculative zio, create an ereport.
611 */
612 if (ret == ECKSUM) {
613 zio->io_error = SET_ERROR(EIO);
614 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
615 spa_log_error(spa, &zio->io_bookmark,
616 BP_GET_LOGICAL_BIRTH(zio->io_bp));
617 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
618 spa, NULL, &zio->io_bookmark, zio, 0);
619 }
620 } else {
621 zio->io_error = ret;
622 }
623 }
624
625 /*
626 * ==========================================================================
627 * I/O parent/child relationships and pipeline interlocks
628 * ==========================================================================
629 */
630 zio_t *
zio_walk_parents(zio_t * cio,zio_link_t ** zl)631 zio_walk_parents(zio_t *cio, zio_link_t **zl)
632 {
633 list_t *pl = &cio->io_parent_list;
634
635 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
636 if (*zl == NULL)
637 return (NULL);
638
639 ASSERT((*zl)->zl_child == cio);
640 return ((*zl)->zl_parent);
641 }
642
643 zio_t *
zio_walk_children(zio_t * pio,zio_link_t ** zl)644 zio_walk_children(zio_t *pio, zio_link_t **zl)
645 {
646 list_t *cl = &pio->io_child_list;
647
648 ASSERT(MUTEX_HELD(&pio->io_lock));
649
650 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
651 if (*zl == NULL)
652 return (NULL);
653
654 ASSERT((*zl)->zl_parent == pio);
655 return ((*zl)->zl_child);
656 }
657
658 zio_t *
zio_unique_parent(zio_t * cio)659 zio_unique_parent(zio_t *cio)
660 {
661 zio_link_t *zl = NULL;
662 zio_t *pio = zio_walk_parents(cio, &zl);
663
664 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
665 return (pio);
666 }
667
668 void
zio_add_child(zio_t * pio,zio_t * cio)669 zio_add_child(zio_t *pio, zio_t *cio)
670 {
671 /*
672 * Logical I/Os can have logical, gang, or vdev children.
673 * Gang I/Os can have gang or vdev children.
674 * Vdev I/Os can only have vdev children.
675 * The following ASSERT captures all of these constraints.
676 */
677 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
678
679 /* Parent should not have READY stage if child doesn't have it. */
680 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
681 (cio->io_child_type != ZIO_CHILD_VDEV),
682 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
683
684 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
685 zl->zl_parent = pio;
686 zl->zl_child = cio;
687
688 mutex_enter(&pio->io_lock);
689 mutex_enter(&cio->io_lock);
690
691 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
692
693 uint64_t *countp = pio->io_children[cio->io_child_type];
694 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
695 countp[w] += !cio->io_state[w];
696
697 list_insert_head(&pio->io_child_list, zl);
698 list_insert_head(&cio->io_parent_list, zl);
699
700 mutex_exit(&cio->io_lock);
701 mutex_exit(&pio->io_lock);
702 }
703
704 void
zio_add_child_first(zio_t * pio,zio_t * cio)705 zio_add_child_first(zio_t *pio, zio_t *cio)
706 {
707 /*
708 * Logical I/Os can have logical, gang, or vdev children.
709 * Gang I/Os can have gang or vdev children.
710 * Vdev I/Os can only have vdev children.
711 * The following ASSERT captures all of these constraints.
712 */
713 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
714
715 /* Parent should not have READY stage if child doesn't have it. */
716 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
717 (cio->io_child_type != ZIO_CHILD_VDEV),
718 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
719
720 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
721 zl->zl_parent = pio;
722 zl->zl_child = cio;
723
724 ASSERT(list_is_empty(&cio->io_parent_list));
725 list_insert_head(&cio->io_parent_list, zl);
726
727 mutex_enter(&pio->io_lock);
728
729 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
730
731 uint64_t *countp = pio->io_children[cio->io_child_type];
732 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
733 countp[w] += !cio->io_state[w];
734
735 list_insert_head(&pio->io_child_list, zl);
736
737 mutex_exit(&pio->io_lock);
738 }
739
740 static void
zio_remove_child(zio_t * pio,zio_t * cio,zio_link_t * zl)741 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
742 {
743 ASSERT(zl->zl_parent == pio);
744 ASSERT(zl->zl_child == cio);
745
746 mutex_enter(&pio->io_lock);
747 mutex_enter(&cio->io_lock);
748
749 list_remove(&pio->io_child_list, zl);
750 list_remove(&cio->io_parent_list, zl);
751
752 mutex_exit(&cio->io_lock);
753 mutex_exit(&pio->io_lock);
754 kmem_cache_free(zio_link_cache, zl);
755 }
756
757 static boolean_t
zio_wait_for_children(zio_t * zio,uint8_t childbits,enum zio_wait_type wait)758 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
759 {
760 boolean_t waiting = B_FALSE;
761
762 mutex_enter(&zio->io_lock);
763 ASSERT(zio->io_stall == NULL);
764 for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
765 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
766 continue;
767
768 uint64_t *countp = &zio->io_children[c][wait];
769 if (*countp != 0) {
770 zio->io_stage >>= 1;
771 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
772 zio->io_stall = countp;
773 waiting = B_TRUE;
774 break;
775 }
776 }
777 mutex_exit(&zio->io_lock);
778 return (waiting);
779 }
780
781 __attribute__((always_inline))
782 static inline void
zio_notify_parent(zio_t * pio,zio_t * zio,enum zio_wait_type wait,zio_t ** next_to_executep)783 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
784 zio_t **next_to_executep)
785 {
786 uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
787 int *errorp = &pio->io_child_error[zio->io_child_type];
788
789 mutex_enter(&pio->io_lock);
790 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
791 *errorp = zio_worst_error(*errorp, zio->io_error);
792 pio->io_reexecute |= zio->io_reexecute;
793 ASSERT3U(*countp, >, 0);
794
795 (*countp)--;
796
797 if (*countp == 0 && pio->io_stall == countp) {
798 zio_taskq_type_t type =
799 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
800 ZIO_TASKQ_INTERRUPT;
801 pio->io_stall = NULL;
802 mutex_exit(&pio->io_lock);
803
804 /*
805 * If we can tell the caller to execute this parent next, do
806 * so. We do this if the parent's zio type matches the child's
807 * type, or if it's a zio_null() with no done callback, and so
808 * has no actual work to do. Otherwise dispatch the parent zio
809 * in its own taskq.
810 *
811 * Having the caller execute the parent when possible reduces
812 * locking on the zio taskq's, reduces context switch
813 * overhead, and has no recursion penalty. Note that one
814 * read from disk typically causes at least 3 zio's: a
815 * zio_null(), the logical zio_read(), and then a physical
816 * zio. When the physical ZIO completes, we are able to call
817 * zio_done() on all 3 of these zio's from one invocation of
818 * zio_execute() by returning the parent back to
819 * zio_execute(). Since the parent isn't executed until this
820 * thread returns back to zio_execute(), the caller should do
821 * so promptly.
822 *
823 * In other cases, dispatching the parent prevents
824 * overflowing the stack when we have deeply nested
825 * parent-child relationships, as we do with the "mega zio"
826 * of writes for spa_sync(), and the chain of ZIL blocks.
827 */
828 if (next_to_executep != NULL && *next_to_executep == NULL &&
829 (pio->io_type == zio->io_type ||
830 (pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) {
831 *next_to_executep = pio;
832 } else {
833 zio_taskq_dispatch(pio, type, B_FALSE);
834 }
835 } else {
836 mutex_exit(&pio->io_lock);
837 }
838 }
839
840 static void
zio_inherit_child_errors(zio_t * zio,enum zio_child c)841 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
842 {
843 if (zio->io_child_error[c] != 0 && zio->io_error == 0)
844 zio->io_error = zio->io_child_error[c];
845 }
846
847 int
zio_bookmark_compare(const void * x1,const void * x2)848 zio_bookmark_compare(const void *x1, const void *x2)
849 {
850 const zio_t *z1 = x1;
851 const zio_t *z2 = x2;
852
853 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
854 return (-1);
855 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
856 return (1);
857
858 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
859 return (-1);
860 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
861 return (1);
862
863 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
864 return (-1);
865 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
866 return (1);
867
868 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
869 return (-1);
870 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
871 return (1);
872
873 if (z1 < z2)
874 return (-1);
875 if (z1 > z2)
876 return (1);
877
878 return (0);
879 }
880
881 /*
882 * ==========================================================================
883 * Create the various types of I/O (read, write, free, etc)
884 * ==========================================================================
885 */
886 static zio_t *
zio_create(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,zio_done_func_t * done,void * private,zio_type_t type,zio_priority_t priority,zio_flag_t flags,vdev_t * vd,uint64_t offset,const zbookmark_phys_t * zb,enum zio_stage stage,enum zio_stage pipeline)887 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
888 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
889 void *private, zio_type_t type, zio_priority_t priority,
890 zio_flag_t flags, vdev_t *vd, uint64_t offset,
891 const zbookmark_phys_t *zb, enum zio_stage stage,
892 enum zio_stage pipeline)
893 {
894 zio_t *zio;
895
896 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
897 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
898 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
899
900 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
901 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
902 ASSERT(vd || stage == ZIO_STAGE_OPEN);
903
904 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
905
906 zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
907 memset(zio, 0, sizeof (zio_t));
908
909 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
910 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
911
912 list_create(&zio->io_parent_list, sizeof (zio_link_t),
913 offsetof(zio_link_t, zl_parent_node));
914 list_create(&zio->io_child_list, sizeof (zio_link_t),
915 offsetof(zio_link_t, zl_child_node));
916 metaslab_trace_init(&zio->io_alloc_list);
917
918 if (vd != NULL)
919 zio->io_child_type = ZIO_CHILD_VDEV;
920 else if (flags & ZIO_FLAG_GANG_CHILD)
921 zio->io_child_type = ZIO_CHILD_GANG;
922 else if (flags & ZIO_FLAG_DDT_CHILD)
923 zio->io_child_type = ZIO_CHILD_DDT;
924 else
925 zio->io_child_type = ZIO_CHILD_LOGICAL;
926
927 if (bp != NULL) {
928 if (type != ZIO_TYPE_WRITE ||
929 zio->io_child_type == ZIO_CHILD_DDT) {
930 zio->io_bp_copy = *bp;
931 zio->io_bp = &zio->io_bp_copy; /* so caller can free */
932 } else {
933 zio->io_bp = (blkptr_t *)bp;
934 }
935 zio->io_bp_orig = *bp;
936 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
937 zio->io_logical = zio;
938 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
939 pipeline |= ZIO_GANG_STAGES;
940 }
941
942 zio->io_spa = spa;
943 zio->io_txg = txg;
944 zio->io_done = done;
945 zio->io_private = private;
946 zio->io_type = type;
947 zio->io_priority = priority;
948 zio->io_vd = vd;
949 zio->io_offset = offset;
950 zio->io_orig_abd = zio->io_abd = data;
951 zio->io_orig_size = zio->io_size = psize;
952 zio->io_lsize = lsize;
953 zio->io_orig_flags = zio->io_flags = flags;
954 zio->io_orig_stage = zio->io_stage = stage;
955 zio->io_orig_pipeline = zio->io_pipeline = pipeline;
956 zio->io_pipeline_trace = ZIO_STAGE_OPEN;
957 zio->io_allocator = ZIO_ALLOCATOR_NONE;
958
959 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) ||
960 (pipeline & ZIO_STAGE_READY) == 0;
961 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
962
963 if (zb != NULL)
964 zio->io_bookmark = *zb;
965
966 if (pio != NULL) {
967 zio->io_metaslab_class = pio->io_metaslab_class;
968 if (zio->io_logical == NULL)
969 zio->io_logical = pio->io_logical;
970 if (zio->io_child_type == ZIO_CHILD_GANG)
971 zio->io_gang_leader = pio->io_gang_leader;
972 zio_add_child_first(pio, zio);
973 }
974
975 taskq_init_ent(&zio->io_tqent);
976
977 return (zio);
978 }
979
980 void
zio_destroy(zio_t * zio)981 zio_destroy(zio_t *zio)
982 {
983 metaslab_trace_fini(&zio->io_alloc_list);
984 list_destroy(&zio->io_parent_list);
985 list_destroy(&zio->io_child_list);
986 mutex_destroy(&zio->io_lock);
987 cv_destroy(&zio->io_cv);
988 kmem_cache_free(zio_cache, zio);
989 }
990
991 /*
992 * ZIO intended to be between others. Provides synchronization at READY
993 * and DONE pipeline stages and calls the respective callbacks.
994 */
995 zio_t *
zio_null(zio_t * pio,spa_t * spa,vdev_t * vd,zio_done_func_t * done,void * private,zio_flag_t flags)996 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
997 void *private, zio_flag_t flags)
998 {
999 zio_t *zio;
1000
1001 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
1002 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
1003 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
1004
1005 return (zio);
1006 }
1007
1008 /*
1009 * ZIO intended to be a root of a tree. Unlike null ZIO does not have a
1010 * READY pipeline stage (is ready on creation), so it should not be used
1011 * as child of any ZIO that may need waiting for grandchildren READY stage
1012 * (any other ZIO type).
1013 */
1014 zio_t *
zio_root(spa_t * spa,zio_done_func_t * done,void * private,zio_flag_t flags)1015 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
1016 {
1017 zio_t *zio;
1018
1019 zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private,
1020 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL,
1021 ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE);
1022
1023 return (zio);
1024 }
1025
1026 static int
zfs_blkptr_verify_log(spa_t * spa,const blkptr_t * bp,enum blk_verify_flag blk_verify,const char * fmt,...)1027 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
1028 enum blk_verify_flag blk_verify, const char *fmt, ...)
1029 {
1030 va_list adx;
1031 char buf[256];
1032
1033 va_start(adx, fmt);
1034 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
1035 va_end(adx);
1036
1037 zfs_dbgmsg("bad blkptr at %px: "
1038 "DVA[0]=%#llx/%#llx "
1039 "DVA[1]=%#llx/%#llx "
1040 "DVA[2]=%#llx/%#llx "
1041 "prop=%#llx "
1042 "pad=%#llx,%#llx "
1043 "phys_birth=%#llx "
1044 "birth=%#llx "
1045 "fill=%#llx "
1046 "cksum=%#llx/%#llx/%#llx/%#llx",
1047 bp,
1048 (long long)bp->blk_dva[0].dva_word[0],
1049 (long long)bp->blk_dva[0].dva_word[1],
1050 (long long)bp->blk_dva[1].dva_word[0],
1051 (long long)bp->blk_dva[1].dva_word[1],
1052 (long long)bp->blk_dva[2].dva_word[0],
1053 (long long)bp->blk_dva[2].dva_word[1],
1054 (long long)bp->blk_prop,
1055 (long long)bp->blk_pad[0],
1056 (long long)bp->blk_pad[1],
1057 (long long)BP_GET_PHYSICAL_BIRTH(bp),
1058 (long long)BP_GET_LOGICAL_BIRTH(bp),
1059 (long long)bp->blk_fill,
1060 (long long)bp->blk_cksum.zc_word[0],
1061 (long long)bp->blk_cksum.zc_word[1],
1062 (long long)bp->blk_cksum.zc_word[2],
1063 (long long)bp->blk_cksum.zc_word[3]);
1064 switch (blk_verify) {
1065 case BLK_VERIFY_HALT:
1066 zfs_panic_recover("%s: %s", spa_name(spa), buf);
1067 break;
1068 case BLK_VERIFY_LOG:
1069 zfs_dbgmsg("%s: %s", spa_name(spa), buf);
1070 break;
1071 case BLK_VERIFY_ONLY:
1072 break;
1073 }
1074
1075 return (1);
1076 }
1077
1078 /*
1079 * Verify the block pointer fields contain reasonable values. This means
1080 * it only contains known object types, checksum/compression identifiers,
1081 * block sizes within the maximum allowed limits, valid DVAs, etc.
1082 *
1083 * If everything checks out B_TRUE is returned. The zfs_blkptr_verify
1084 * argument controls the behavior when an invalid field is detected.
1085 *
1086 * Values for blk_verify_flag:
1087 * BLK_VERIFY_ONLY: evaluate the block
1088 * BLK_VERIFY_LOG: evaluate the block and log problems
1089 * BLK_VERIFY_HALT: call zfs_panic_recover on error
1090 *
1091 * Values for blk_config_flag:
1092 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
1093 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
1094 * obtained for reader
1095 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
1096 * performance
1097 */
1098 boolean_t
zfs_blkptr_verify(spa_t * spa,const blkptr_t * bp,enum blk_config_flag blk_config,enum blk_verify_flag blk_verify)1099 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
1100 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
1101 {
1102 int errors = 0;
1103
1104 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
1105 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1106 "blkptr at %px has invalid TYPE %llu",
1107 bp, (longlong_t)BP_GET_TYPE(bp));
1108 }
1109 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
1110 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1111 "blkptr at %px has invalid CHECKSUM %llu",
1112 bp, (longlong_t)BP_GET_CHECKSUM(bp));
1113 }
1114 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
1115 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1116 "blkptr at %px has invalid COMPRESS %llu",
1117 bp, (longlong_t)BP_GET_COMPRESS(bp));
1118 }
1119 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
1120 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1121 "blkptr at %px has invalid LSIZE %llu",
1122 bp, (longlong_t)BP_GET_LSIZE(bp));
1123 }
1124 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
1125 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1126 "blkptr at %px has invalid PSIZE %llu",
1127 bp, (longlong_t)BP_GET_PSIZE(bp));
1128 }
1129
1130 if (BP_IS_EMBEDDED(bp)) {
1131 if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
1132 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1133 "blkptr at %px has invalid ETYPE %llu",
1134 bp, (longlong_t)BPE_GET_ETYPE(bp));
1135 }
1136 }
1137
1138 /*
1139 * Do not verify individual DVAs if the config is not trusted. This
1140 * will be done once the zio is executed in vdev_mirror_map_alloc.
1141 */
1142 if (!spa->spa_trust_config)
1143 return (errors == 0);
1144
1145 switch (blk_config) {
1146 case BLK_CONFIG_HELD:
1147 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
1148 break;
1149 case BLK_CONFIG_NEEDED:
1150 spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
1151 break;
1152 case BLK_CONFIG_SKIP:
1153 return (errors == 0);
1154 default:
1155 panic("invalid blk_config %u", blk_config);
1156 }
1157
1158 /*
1159 * Pool-specific checks.
1160 *
1161 * Note: it would be nice to verify that the logical birth
1162 * and physical birth are not too large. However,
1163 * spa_freeze() allows the birth time of log blocks (and
1164 * dmu_sync()-ed blocks that are in the log) to be arbitrarily
1165 * large.
1166 */
1167 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1168 const dva_t *dva = &bp->blk_dva[i];
1169 uint64_t vdevid = DVA_GET_VDEV(dva);
1170
1171 if (vdevid >= spa->spa_root_vdev->vdev_children) {
1172 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1173 "blkptr at %px DVA %u has invalid VDEV %llu",
1174 bp, i, (longlong_t)vdevid);
1175 continue;
1176 }
1177 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1178 if (vd == NULL) {
1179 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1180 "blkptr at %px DVA %u has invalid VDEV %llu",
1181 bp, i, (longlong_t)vdevid);
1182 continue;
1183 }
1184 if (vd->vdev_ops == &vdev_hole_ops) {
1185 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1186 "blkptr at %px DVA %u has hole VDEV %llu",
1187 bp, i, (longlong_t)vdevid);
1188 continue;
1189 }
1190 if (vd->vdev_ops == &vdev_missing_ops) {
1191 /*
1192 * "missing" vdevs are valid during import, but we
1193 * don't have their detailed info (e.g. asize), so
1194 * we can't perform any more checks on them.
1195 */
1196 continue;
1197 }
1198 uint64_t offset = DVA_GET_OFFSET(dva);
1199 uint64_t asize = DVA_GET_ASIZE(dva);
1200 if (DVA_GET_GANG(dva))
1201 asize = vdev_gang_header_asize(vd);
1202 if (offset + asize > vd->vdev_asize) {
1203 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1204 "blkptr at %px DVA %u has invalid OFFSET %llu",
1205 bp, i, (longlong_t)offset);
1206 }
1207 }
1208 if (blk_config == BLK_CONFIG_NEEDED)
1209 spa_config_exit(spa, SCL_VDEV, bp);
1210
1211 return (errors == 0);
1212 }
1213
1214 boolean_t
zfs_dva_valid(spa_t * spa,const dva_t * dva,const blkptr_t * bp)1215 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
1216 {
1217 (void) bp;
1218 uint64_t vdevid = DVA_GET_VDEV(dva);
1219
1220 if (vdevid >= spa->spa_root_vdev->vdev_children)
1221 return (B_FALSE);
1222
1223 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1224 if (vd == NULL)
1225 return (B_FALSE);
1226
1227 if (vd->vdev_ops == &vdev_hole_ops)
1228 return (B_FALSE);
1229
1230 if (vd->vdev_ops == &vdev_missing_ops) {
1231 return (B_FALSE);
1232 }
1233
1234 uint64_t offset = DVA_GET_OFFSET(dva);
1235 uint64_t asize = DVA_GET_ASIZE(dva);
1236
1237 if (DVA_GET_GANG(dva))
1238 asize = vdev_gang_header_asize(vd);
1239 if (offset + asize > vd->vdev_asize)
1240 return (B_FALSE);
1241
1242 return (B_TRUE);
1243 }
1244
1245 zio_t *
zio_read(zio_t * pio,spa_t * spa,const blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1246 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
1247 abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
1248 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
1249 {
1250 zio_t *zio;
1251
1252 zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp,
1253 data, size, size, done, private,
1254 ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
1255 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1256 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
1257
1258 return (zio);
1259 }
1260
1261 zio_t *
zio_write(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,const zio_prop_t * zp,zio_done_func_t * ready,zio_done_func_t * children_ready,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1262 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
1263 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
1264 zio_done_func_t *ready, zio_done_func_t *children_ready,
1265 zio_done_func_t *done, void *private, zio_priority_t priority,
1266 zio_flag_t flags, const zbookmark_phys_t *zb)
1267 {
1268 zio_t *zio;
1269
1270 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
1271 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
1272 zp->zp_compress >= ZIO_COMPRESS_OFF &&
1273 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
1274 DMU_OT_IS_VALID(zp->zp_type) &&
1275 zp->zp_level < 32 &&
1276 zp->zp_copies > 0 &&
1277 zp->zp_copies <= spa_max_replication(spa));
1278
1279 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
1280 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
1281 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1282 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
1283
1284 zio->io_ready = ready;
1285 zio->io_children_ready = children_ready;
1286 zio->io_prop = *zp;
1287
1288 /*
1289 * Data can be NULL if we are going to call zio_write_override() to
1290 * provide the already-allocated BP. But we may need the data to
1291 * verify a dedup hit (if requested). In this case, don't try to
1292 * dedup (just take the already-allocated BP verbatim). Encrypted
1293 * dedup blocks need data as well so we also disable dedup in this
1294 * case.
1295 */
1296 if (data == NULL &&
1297 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
1298 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
1299 }
1300
1301 return (zio);
1302 }
1303
1304 zio_t *
zio_rewrite(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,zbookmark_phys_t * zb)1305 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
1306 uint64_t size, zio_done_func_t *done, void *private,
1307 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
1308 {
1309 zio_t *zio;
1310
1311 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
1312 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
1313 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
1314
1315 return (zio);
1316 }
1317
1318 void
zio_write_override(zio_t * zio,blkptr_t * bp,int copies,boolean_t nopwrite,boolean_t brtwrite)1319 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
1320 boolean_t brtwrite)
1321 {
1322 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1323 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1324 ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1325 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
1326 ASSERT(!brtwrite || !nopwrite);
1327
1328 /*
1329 * We must reset the io_prop to match the values that existed
1330 * when the bp was first written by dmu_sync() keeping in mind
1331 * that nopwrite and dedup are mutually exclusive.
1332 */
1333 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1334 zio->io_prop.zp_nopwrite = nopwrite;
1335 zio->io_prop.zp_brtwrite = brtwrite;
1336 zio->io_prop.zp_copies = copies;
1337 zio->io_bp_override = bp;
1338 }
1339
1340 void
zio_free(spa_t * spa,uint64_t txg,const blkptr_t * bp)1341 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1342 {
1343
1344 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1345
1346 /*
1347 * The check for EMBEDDED is a performance optimization. We
1348 * process the free here (by ignoring it) rather than
1349 * putting it on the list and then processing it in zio_free_sync().
1350 */
1351 if (BP_IS_EMBEDDED(bp))
1352 return;
1353
1354 /*
1355 * Frees that are for the currently-syncing txg, are not going to be
1356 * deferred, and which will not need to do a read (i.e. not GANG or
1357 * DEDUP), can be processed immediately. Otherwise, put them on the
1358 * in-memory list for later processing.
1359 *
1360 * Note that we only defer frees after zfs_sync_pass_deferred_free
1361 * when the log space map feature is disabled. [see relevant comment
1362 * in spa_sync_iterate_to_convergence()]
1363 */
1364 if (BP_IS_GANG(bp) ||
1365 BP_GET_DEDUP(bp) ||
1366 txg != spa->spa_syncing_txg ||
1367 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
1368 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
1369 brt_maybe_exists(spa, bp)) {
1370 metaslab_check_free(spa, bp);
1371 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1372 } else {
1373 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
1374 }
1375 }
1376
1377 /*
1378 * To improve performance, this function may return NULL if we were able
1379 * to do the free immediately. This avoids the cost of creating a zio
1380 * (and linking it to the parent, etc).
1381 */
1382 zio_t *
zio_free_sync(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_flag_t flags)1383 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1384 zio_flag_t flags)
1385 {
1386 ASSERT(!BP_IS_HOLE(bp));
1387 ASSERT(spa_syncing_txg(spa) == txg);
1388
1389 if (BP_IS_EMBEDDED(bp))
1390 return (NULL);
1391
1392 metaslab_check_free(spa, bp);
1393 arc_freed(spa, bp);
1394 dsl_scan_freed(spa, bp);
1395
1396 if (BP_IS_GANG(bp) ||
1397 BP_GET_DEDUP(bp) ||
1398 brt_maybe_exists(spa, bp)) {
1399 /*
1400 * GANG, DEDUP and BRT blocks can induce a read (for the gang
1401 * block header, the DDT or the BRT), so issue them
1402 * asynchronously so that this thread is not tied up.
1403 */
1404 enum zio_stage stage =
1405 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
1406
1407 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1408 BP_GET_PSIZE(bp), NULL, NULL,
1409 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1410 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
1411 } else {
1412 metaslab_free(spa, bp, txg, B_FALSE);
1413 return (NULL);
1414 }
1415 }
1416
1417 zio_t *
zio_claim(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_done_func_t * done,void * private,zio_flag_t flags)1418 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1419 zio_done_func_t *done, void *private, zio_flag_t flags)
1420 {
1421 zio_t *zio;
1422
1423 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
1424 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1425
1426 if (BP_IS_EMBEDDED(bp))
1427 return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1428
1429 /*
1430 * A claim is an allocation of a specific block. Claims are needed
1431 * to support immediate writes in the intent log. The issue is that
1432 * immediate writes contain committed data, but in a txg that was
1433 * *not* committed. Upon opening the pool after an unclean shutdown,
1434 * the intent log claims all blocks that contain immediate write data
1435 * so that the SPA knows they're in use.
1436 *
1437 * All claims *must* be resolved in the first txg -- before the SPA
1438 * starts allocating blocks -- so that nothing is allocated twice.
1439 * If txg == 0 we just verify that the block is claimable.
1440 */
1441 ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <,
1442 spa_min_claim_txg(spa));
1443 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
1444 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
1445
1446 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1447 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1448 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
1449 ASSERT0(zio->io_queued_timestamp);
1450
1451 return (zio);
1452 }
1453
1454 zio_t *
zio_trim(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,enum trim_flag trim_flags)1455 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1456 zio_done_func_t *done, void *private, zio_priority_t priority,
1457 zio_flag_t flags, enum trim_flag trim_flags)
1458 {
1459 zio_t *zio;
1460
1461 ASSERT0(vd->vdev_children);
1462 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1463 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1464 ASSERT3U(size, !=, 0);
1465
1466 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1467 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1468 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1469 zio->io_trim_flags = trim_flags;
1470
1471 return (zio);
1472 }
1473
1474 zio_t *
zio_read_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1475 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1476 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1477 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1478 {
1479 zio_t *zio;
1480
1481 ASSERT(vd->vdev_children == 0);
1482 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1483 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1484 ASSERT3U(offset + size, <=, vd->vdev_psize);
1485
1486 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1487 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1488 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
1489
1490 zio->io_prop.zp_checksum = checksum;
1491
1492 return (zio);
1493 }
1494
1495 zio_t *
zio_write_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1496 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1497 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1498 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1499 {
1500 zio_t *zio;
1501
1502 ASSERT(vd->vdev_children == 0);
1503 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1504 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1505 ASSERT3U(offset + size, <=, vd->vdev_psize);
1506
1507 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1508 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1509 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
1510
1511 zio->io_prop.zp_checksum = checksum;
1512
1513 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
1514 /*
1515 * zec checksums are necessarily destructive -- they modify
1516 * the end of the write buffer to hold the verifier/checksum.
1517 * Therefore, we must make a local copy in case the data is
1518 * being written to multiple places in parallel.
1519 */
1520 abd_t *wbuf = abd_alloc_sametype(data, size);
1521 abd_copy(wbuf, data, size);
1522
1523 zio_push_transform(zio, wbuf, size, size, NULL);
1524 }
1525
1526 return (zio);
1527 }
1528
1529 /*
1530 * Create a child I/O to do some work for us.
1531 */
1532 zio_t *
zio_vdev_child_io(zio_t * pio,blkptr_t * bp,vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,int type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1533 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
1534 abd_t *data, uint64_t size, int type, zio_priority_t priority,
1535 zio_flag_t flags, zio_done_func_t *done, void *private)
1536 {
1537 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1538 zio_t *zio;
1539
1540 /*
1541 * vdev child I/Os do not propagate their error to the parent.
1542 * Therefore, for correct operation the caller *must* check for
1543 * and handle the error in the child i/o's done callback.
1544 * The only exceptions are i/os that we don't care about
1545 * (OPTIONAL or REPAIR).
1546 */
1547 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1548 done != NULL);
1549
1550 if (type == ZIO_TYPE_READ && bp != NULL) {
1551 /*
1552 * If we have the bp, then the child should perform the
1553 * checksum and the parent need not. This pushes error
1554 * detection as close to the leaves as possible and
1555 * eliminates redundant checksums in the interior nodes.
1556 */
1557 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1558 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1559 }
1560
1561 if (vd->vdev_ops->vdev_op_leaf) {
1562 ASSERT0(vd->vdev_children);
1563 offset += VDEV_LABEL_START_SIZE;
1564 }
1565
1566 flags |= ZIO_VDEV_CHILD_FLAGS(pio);
1567
1568 /*
1569 * If we've decided to do a repair, the write is not speculative --
1570 * even if the original read was.
1571 */
1572 if (flags & ZIO_FLAG_IO_REPAIR)
1573 flags &= ~ZIO_FLAG_SPECULATIVE;
1574
1575 /*
1576 * If we're creating a child I/O that is not associated with a
1577 * top-level vdev, then the child zio is not an allocating I/O.
1578 * If this is a retried I/O then we ignore it since we will
1579 * have already processed the original allocating I/O.
1580 */
1581 if (flags & ZIO_FLAG_IO_ALLOCATING &&
1582 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
1583 ASSERT(pio->io_metaslab_class != NULL);
1584 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
1585 ASSERT(type == ZIO_TYPE_WRITE);
1586 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1587 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1588 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1589 pio->io_child_type == ZIO_CHILD_GANG);
1590
1591 flags &= ~ZIO_FLAG_IO_ALLOCATING;
1592 }
1593
1594 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1595 done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1596 ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1597 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1598
1599 return (zio);
1600 }
1601
1602 zio_t *
zio_vdev_delegated_io(vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,zio_type_t type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1603 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
1604 zio_type_t type, zio_priority_t priority, zio_flag_t flags,
1605 zio_done_func_t *done, void *private)
1606 {
1607 zio_t *zio;
1608
1609 ASSERT(vd->vdev_ops->vdev_op_leaf);
1610
1611 zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1612 data, size, size, done, private, type, priority,
1613 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1614 vd, offset, NULL,
1615 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1616
1617 return (zio);
1618 }
1619
1620
1621 /*
1622 * Send a flush command to the given vdev. Unlike most zio creation functions,
1623 * the flush zios are issued immediately. You can wait on pio to pause until
1624 * the flushes complete.
1625 */
1626 void
zio_flush(zio_t * pio,vdev_t * vd)1627 zio_flush(zio_t *pio, vdev_t *vd)
1628 {
1629 const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
1630 ZIO_FLAG_DONT_RETRY;
1631
1632 if (vd->vdev_nowritecache)
1633 return;
1634
1635 if (vd->vdev_children == 0) {
1636 zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0,
1637 NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0,
1638 NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE));
1639 } else {
1640 for (uint64_t c = 0; c < vd->vdev_children; c++)
1641 zio_flush(pio, vd->vdev_child[c]);
1642 }
1643 }
1644
1645 void
zio_shrink(zio_t * zio,uint64_t size)1646 zio_shrink(zio_t *zio, uint64_t size)
1647 {
1648 ASSERT3P(zio->io_executor, ==, NULL);
1649 ASSERT3U(zio->io_orig_size, ==, zio->io_size);
1650 ASSERT3U(size, <=, zio->io_size);
1651
1652 /*
1653 * We don't shrink for raidz because of problems with the
1654 * reconstruction when reading back less than the block size.
1655 * Note, BP_IS_RAIDZ() assumes no compression.
1656 */
1657 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1658 if (!BP_IS_RAIDZ(zio->io_bp)) {
1659 /* we are not doing a raw write */
1660 ASSERT3U(zio->io_size, ==, zio->io_lsize);
1661 zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1662 }
1663 }
1664
1665 /*
1666 * Round provided allocation size up to a value that can be allocated
1667 * by at least some vdev(s) in the pool with minimum or no additional
1668 * padding and without extra space usage on others
1669 */
1670 static uint64_t
zio_roundup_alloc_size(spa_t * spa,uint64_t size)1671 zio_roundup_alloc_size(spa_t *spa, uint64_t size)
1672 {
1673 if (size > spa->spa_min_alloc)
1674 return (roundup(size, spa->spa_gcd_alloc));
1675 return (spa->spa_min_alloc);
1676 }
1677
1678 /*
1679 * ==========================================================================
1680 * Prepare to read and write logical blocks
1681 * ==========================================================================
1682 */
1683
1684 static zio_t *
zio_read_bp_init(zio_t * zio)1685 zio_read_bp_init(zio_t *zio)
1686 {
1687 blkptr_t *bp = zio->io_bp;
1688 uint64_t psize =
1689 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1690
1691 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1692
1693 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1694 zio->io_child_type == ZIO_CHILD_LOGICAL &&
1695 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1696 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1697 psize, psize, zio_decompress);
1698 }
1699
1700 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1701 BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1702 zio->io_child_type == ZIO_CHILD_LOGICAL) {
1703 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1704 psize, psize, zio_decrypt);
1705 }
1706
1707 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1708 int psize = BPE_GET_PSIZE(bp);
1709 void *data = abd_borrow_buf(zio->io_abd, psize);
1710
1711 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1712 decode_embedded_bp_compressed(bp, data);
1713 abd_return_buf_copy(zio->io_abd, data, psize);
1714 } else {
1715 ASSERT(!BP_IS_EMBEDDED(bp));
1716 }
1717
1718 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1719 zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1720
1721 return (zio);
1722 }
1723
1724 static zio_t *
zio_write_bp_init(zio_t * zio)1725 zio_write_bp_init(zio_t *zio)
1726 {
1727 if (!IO_IS_ALLOCATING(zio))
1728 return (zio);
1729
1730 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1731
1732 if (zio->io_bp_override) {
1733 blkptr_t *bp = zio->io_bp;
1734 zio_prop_t *zp = &zio->io_prop;
1735
1736 ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg);
1737
1738 *bp = *zio->io_bp_override;
1739 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1740
1741 if (zp->zp_brtwrite)
1742 return (zio);
1743
1744 ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
1745
1746 if (BP_IS_EMBEDDED(bp))
1747 return (zio);
1748
1749 /*
1750 * If we've been overridden and nopwrite is set then
1751 * set the flag accordingly to indicate that a nopwrite
1752 * has already occurred.
1753 */
1754 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1755 ASSERT(!zp->zp_dedup);
1756 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1757 zio->io_flags |= ZIO_FLAG_NOPWRITE;
1758 return (zio);
1759 }
1760
1761 ASSERT(!zp->zp_nopwrite);
1762
1763 if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1764 return (zio);
1765
1766 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1767 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1768
1769 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1770 !zp->zp_encrypt) {
1771 BP_SET_DEDUP(bp, 1);
1772 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1773 return (zio);
1774 }
1775
1776 /*
1777 * We were unable to handle this as an override bp, treat
1778 * it as a regular write I/O.
1779 */
1780 zio->io_bp_override = NULL;
1781 *bp = zio->io_bp_orig;
1782 zio->io_pipeline = zio->io_orig_pipeline;
1783 }
1784
1785 return (zio);
1786 }
1787
1788 static zio_t *
zio_write_compress(zio_t * zio)1789 zio_write_compress(zio_t *zio)
1790 {
1791 spa_t *spa = zio->io_spa;
1792 zio_prop_t *zp = &zio->io_prop;
1793 enum zio_compress compress = zp->zp_compress;
1794 blkptr_t *bp = zio->io_bp;
1795 uint64_t lsize = zio->io_lsize;
1796 uint64_t psize = zio->io_size;
1797 uint32_t pass = 1;
1798
1799 /*
1800 * If our children haven't all reached the ready stage,
1801 * wait for them and then repeat this pipeline stage.
1802 */
1803 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1804 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
1805 return (NULL);
1806 }
1807
1808 if (!IO_IS_ALLOCATING(zio))
1809 return (zio);
1810
1811 if (zio->io_children_ready != NULL) {
1812 /*
1813 * Now that all our children are ready, run the callback
1814 * associated with this zio in case it wants to modify the
1815 * data to be written.
1816 */
1817 ASSERT3U(zp->zp_level, >, 0);
1818 zio->io_children_ready(zio);
1819 }
1820
1821 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1822 ASSERT(zio->io_bp_override == NULL);
1823
1824 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) {
1825 /*
1826 * We're rewriting an existing block, which means we're
1827 * working on behalf of spa_sync(). For spa_sync() to
1828 * converge, it must eventually be the case that we don't
1829 * have to allocate new blocks. But compression changes
1830 * the blocksize, which forces a reallocate, and makes
1831 * convergence take longer. Therefore, after the first
1832 * few passes, stop compressing to ensure convergence.
1833 */
1834 pass = spa_sync_pass(spa);
1835
1836 ASSERT(zio->io_txg == spa_syncing_txg(spa));
1837 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1838 ASSERT(!BP_GET_DEDUP(bp));
1839
1840 if (pass >= zfs_sync_pass_dont_compress)
1841 compress = ZIO_COMPRESS_OFF;
1842
1843 /* Make sure someone doesn't change their mind on overwrites */
1844 ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
1845 MIN(zp->zp_copies, spa_max_replication(spa))
1846 == BP_GET_NDVAS(bp));
1847 }
1848
1849 /* If it's a compressed write that is not raw, compress the buffer. */
1850 if (compress != ZIO_COMPRESS_OFF &&
1851 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1852 void *cbuf = NULL;
1853 psize = zio_compress_data(compress, zio->io_abd, &cbuf, lsize,
1854 zp->zp_complevel);
1855 if (psize == 0) {
1856 compress = ZIO_COMPRESS_OFF;
1857 } else if (psize >= lsize) {
1858 compress = ZIO_COMPRESS_OFF;
1859 if (cbuf != NULL)
1860 zio_buf_free(cbuf, lsize);
1861 } else if (!zp->zp_dedup && !zp->zp_encrypt &&
1862 psize <= BPE_PAYLOAD_SIZE &&
1863 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1864 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1865 encode_embedded_bp_compressed(bp,
1866 cbuf, compress, lsize, psize);
1867 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1868 BP_SET_TYPE(bp, zio->io_prop.zp_type);
1869 BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1870 zio_buf_free(cbuf, lsize);
1871 BP_SET_LOGICAL_BIRTH(bp, zio->io_txg);
1872 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1873 ASSERT(spa_feature_is_active(spa,
1874 SPA_FEATURE_EMBEDDED_DATA));
1875 return (zio);
1876 } else {
1877 /*
1878 * Round compressed size up to the minimum allocation
1879 * size of the smallest-ashift device, and zero the
1880 * tail. This ensures that the compressed size of the
1881 * BP (and thus compressratio property) are correct,
1882 * in that we charge for the padding used to fill out
1883 * the last sector.
1884 */
1885 size_t rounded = (size_t)zio_roundup_alloc_size(spa,
1886 psize);
1887 if (rounded >= lsize) {
1888 compress = ZIO_COMPRESS_OFF;
1889 zio_buf_free(cbuf, lsize);
1890 psize = lsize;
1891 } else {
1892 abd_t *cdata = abd_get_from_buf(cbuf, lsize);
1893 abd_take_ownership_of_buf(cdata, B_TRUE);
1894 abd_zero_off(cdata, psize, rounded - psize);
1895 psize = rounded;
1896 zio_push_transform(zio, cdata,
1897 psize, lsize, NULL);
1898 }
1899 }
1900
1901 /*
1902 * We were unable to handle this as an override bp, treat
1903 * it as a regular write I/O.
1904 */
1905 zio->io_bp_override = NULL;
1906 *bp = zio->io_bp_orig;
1907 zio->io_pipeline = zio->io_orig_pipeline;
1908
1909 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
1910 zp->zp_type == DMU_OT_DNODE) {
1911 /*
1912 * The DMU actually relies on the zio layer's compression
1913 * to free metadnode blocks that have had all contained
1914 * dnodes freed. As a result, even when doing a raw
1915 * receive, we must check whether the block can be compressed
1916 * to a hole.
1917 */
1918 psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
1919 zio->io_abd, NULL, lsize, zp->zp_complevel);
1920 if (psize == 0 || psize >= lsize)
1921 compress = ZIO_COMPRESS_OFF;
1922 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
1923 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
1924 /*
1925 * If we are raw receiving an encrypted dataset we should not
1926 * take this codepath because it will change the on-disk block
1927 * and decryption will fail.
1928 */
1929 size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
1930 lsize);
1931
1932 if (rounded != psize) {
1933 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
1934 abd_zero_off(cdata, psize, rounded - psize);
1935 abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
1936 psize = rounded;
1937 zio_push_transform(zio, cdata,
1938 psize, rounded, NULL);
1939 }
1940 } else {
1941 ASSERT3U(psize, !=, 0);
1942 }
1943
1944 /*
1945 * The final pass of spa_sync() must be all rewrites, but the first
1946 * few passes offer a trade-off: allocating blocks defers convergence,
1947 * but newly allocated blocks are sequential, so they can be written
1948 * to disk faster. Therefore, we allow the first few passes of
1949 * spa_sync() to allocate new blocks, but force rewrites after that.
1950 * There should only be a handful of blocks after pass 1 in any case.
1951 */
1952 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg &&
1953 BP_GET_PSIZE(bp) == psize &&
1954 pass >= zfs_sync_pass_rewrite) {
1955 VERIFY3U(psize, !=, 0);
1956 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
1957
1958 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1959 zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1960 } else {
1961 BP_ZERO(bp);
1962 zio->io_pipeline = ZIO_WRITE_PIPELINE;
1963 }
1964
1965 if (psize == 0) {
1966 if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 &&
1967 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1968 BP_SET_LSIZE(bp, lsize);
1969 BP_SET_TYPE(bp, zp->zp_type);
1970 BP_SET_LEVEL(bp, zp->zp_level);
1971 BP_SET_BIRTH(bp, zio->io_txg, 0);
1972 }
1973 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1974 } else {
1975 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1976 BP_SET_LSIZE(bp, lsize);
1977 BP_SET_TYPE(bp, zp->zp_type);
1978 BP_SET_LEVEL(bp, zp->zp_level);
1979 BP_SET_PSIZE(bp, psize);
1980 BP_SET_COMPRESS(bp, compress);
1981 BP_SET_CHECKSUM(bp, zp->zp_checksum);
1982 BP_SET_DEDUP(bp, zp->zp_dedup);
1983 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
1984 if (zp->zp_dedup) {
1985 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1986 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1987 ASSERT(!zp->zp_encrypt ||
1988 DMU_OT_IS_ENCRYPTED(zp->zp_type));
1989 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
1990 }
1991 if (zp->zp_nopwrite) {
1992 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1993 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1994 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
1995 }
1996 }
1997 return (zio);
1998 }
1999
2000 static zio_t *
zio_free_bp_init(zio_t * zio)2001 zio_free_bp_init(zio_t *zio)
2002 {
2003 blkptr_t *bp = zio->io_bp;
2004
2005 if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
2006 if (BP_GET_DEDUP(bp))
2007 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
2008 }
2009
2010 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
2011
2012 return (zio);
2013 }
2014
2015 /*
2016 * ==========================================================================
2017 * Execute the I/O pipeline
2018 * ==========================================================================
2019 */
2020
2021 static void
zio_taskq_dispatch(zio_t * zio,zio_taskq_type_t q,boolean_t cutinline)2022 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
2023 {
2024 spa_t *spa = zio->io_spa;
2025 zio_type_t t = zio->io_type;
2026 int flags = (cutinline ? TQ_FRONT : 0);
2027
2028 /*
2029 * If we're a config writer or a probe, the normal issue and
2030 * interrupt threads may all be blocked waiting for the config lock.
2031 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
2032 */
2033 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
2034 t = ZIO_TYPE_NULL;
2035
2036 /*
2037 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
2038 */
2039 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
2040 t = ZIO_TYPE_NULL;
2041
2042 /*
2043 * If this is a high priority I/O, then use the high priority taskq if
2044 * available.
2045 */
2046 if ((zio->io_priority == ZIO_PRIORITY_NOW ||
2047 zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
2048 spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
2049 q++;
2050
2051 ASSERT3U(q, <, ZIO_TASKQ_TYPES);
2052
2053 /*
2054 * NB: We are assuming that the zio can only be dispatched
2055 * to a single taskq at a time. It would be a grievous error
2056 * to dispatch the zio to another taskq at the same time.
2057 */
2058 ASSERT(taskq_empty_ent(&zio->io_tqent));
2059 spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
2060 &zio->io_tqent, zio);
2061 }
2062
2063 static boolean_t
zio_taskq_member(zio_t * zio,zio_taskq_type_t q)2064 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
2065 {
2066 spa_t *spa = zio->io_spa;
2067
2068 taskq_t *tq = taskq_of_curthread();
2069
2070 for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
2071 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
2072 uint_t i;
2073 for (i = 0; i < tqs->stqs_count; i++) {
2074 if (tqs->stqs_taskq[i] == tq)
2075 return (B_TRUE);
2076 }
2077 }
2078
2079 return (B_FALSE);
2080 }
2081
2082 static zio_t *
zio_issue_async(zio_t * zio)2083 zio_issue_async(zio_t *zio)
2084 {
2085 ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio));
2086 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2087 return (NULL);
2088 }
2089
2090 void
zio_interrupt(void * zio)2091 zio_interrupt(void *zio)
2092 {
2093 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
2094 }
2095
2096 void
zio_delay_interrupt(zio_t * zio)2097 zio_delay_interrupt(zio_t *zio)
2098 {
2099 /*
2100 * The timeout_generic() function isn't defined in userspace, so
2101 * rather than trying to implement the function, the zio delay
2102 * functionality has been disabled for userspace builds.
2103 */
2104
2105 #ifdef _KERNEL
2106 /*
2107 * If io_target_timestamp is zero, then no delay has been registered
2108 * for this IO, thus jump to the end of this function and "skip" the
2109 * delay; issuing it directly to the zio layer.
2110 */
2111 if (zio->io_target_timestamp != 0) {
2112 hrtime_t now = gethrtime();
2113
2114 if (now >= zio->io_target_timestamp) {
2115 /*
2116 * This IO has already taken longer than the target
2117 * delay to complete, so we don't want to delay it
2118 * any longer; we "miss" the delay and issue it
2119 * directly to the zio layer. This is likely due to
2120 * the target latency being set to a value less than
2121 * the underlying hardware can satisfy (e.g. delay
2122 * set to 1ms, but the disks take 10ms to complete an
2123 * IO request).
2124 */
2125
2126 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
2127 hrtime_t, now);
2128
2129 zio_interrupt(zio);
2130 } else {
2131 taskqid_t tid;
2132 hrtime_t diff = zio->io_target_timestamp - now;
2133 clock_t expire_at_tick = ddi_get_lbolt() +
2134 NSEC_TO_TICK(diff);
2135
2136 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
2137 hrtime_t, now, hrtime_t, diff);
2138
2139 if (NSEC_TO_TICK(diff) == 0) {
2140 /* Our delay is less than a jiffy - just spin */
2141 zfs_sleep_until(zio->io_target_timestamp);
2142 zio_interrupt(zio);
2143 } else {
2144 /*
2145 * Use taskq_dispatch_delay() in the place of
2146 * OpenZFS's timeout_generic().
2147 */
2148 tid = taskq_dispatch_delay(system_taskq,
2149 zio_interrupt, zio, TQ_NOSLEEP,
2150 expire_at_tick);
2151 if (tid == TASKQID_INVALID) {
2152 /*
2153 * Couldn't allocate a task. Just
2154 * finish the zio without a delay.
2155 */
2156 zio_interrupt(zio);
2157 }
2158 }
2159 }
2160 return;
2161 }
2162 #endif
2163 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
2164 zio_interrupt(zio);
2165 }
2166
2167 static void
zio_deadman_impl(zio_t * pio,int ziodepth)2168 zio_deadman_impl(zio_t *pio, int ziodepth)
2169 {
2170 zio_t *cio, *cio_next;
2171 zio_link_t *zl = NULL;
2172 vdev_t *vd = pio->io_vd;
2173
2174 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
2175 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
2176 zbookmark_phys_t *zb = &pio->io_bookmark;
2177 uint64_t delta = gethrtime() - pio->io_timestamp;
2178 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
2179
2180 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
2181 "delta=%llu queued=%llu io=%llu "
2182 "path=%s "
2183 "last=%llu type=%d "
2184 "priority=%d flags=0x%llx stage=0x%x "
2185 "pipeline=0x%x pipeline-trace=0x%x "
2186 "objset=%llu object=%llu "
2187 "level=%llu blkid=%llu "
2188 "offset=%llu size=%llu "
2189 "error=%d",
2190 ziodepth, pio, pio->io_timestamp,
2191 (u_longlong_t)delta, pio->io_delta, pio->io_delay,
2192 vd ? vd->vdev_path : "NULL",
2193 vq ? vq->vq_io_complete_ts : 0, pio->io_type,
2194 pio->io_priority, (u_longlong_t)pio->io_flags,
2195 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
2196 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
2197 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
2198 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
2199 pio->io_error);
2200 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
2201 pio->io_spa, vd, zb, pio, 0);
2202
2203 if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
2204 taskq_empty_ent(&pio->io_tqent)) {
2205 zio_interrupt(pio);
2206 }
2207 }
2208
2209 mutex_enter(&pio->io_lock);
2210 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2211 cio_next = zio_walk_children(pio, &zl);
2212 zio_deadman_impl(cio, ziodepth + 1);
2213 }
2214 mutex_exit(&pio->io_lock);
2215 }
2216
2217 /*
2218 * Log the critical information describing this zio and all of its children
2219 * using the zfs_dbgmsg() interface then post deadman event for the ZED.
2220 */
2221 void
zio_deadman(zio_t * pio,const char * tag)2222 zio_deadman(zio_t *pio, const char *tag)
2223 {
2224 spa_t *spa = pio->io_spa;
2225 char *name = spa_name(spa);
2226
2227 if (!zfs_deadman_enabled || spa_suspended(spa))
2228 return;
2229
2230 zio_deadman_impl(pio, 0);
2231
2232 switch (spa_get_deadman_failmode(spa)) {
2233 case ZIO_FAILURE_MODE_WAIT:
2234 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
2235 break;
2236
2237 case ZIO_FAILURE_MODE_CONTINUE:
2238 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
2239 break;
2240
2241 case ZIO_FAILURE_MODE_PANIC:
2242 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
2243 break;
2244 }
2245 }
2246
2247 /*
2248 * Execute the I/O pipeline until one of the following occurs:
2249 * (1) the I/O completes; (2) the pipeline stalls waiting for
2250 * dependent child I/Os; (3) the I/O issues, so we're waiting
2251 * for an I/O completion interrupt; (4) the I/O is delegated by
2252 * vdev-level caching or aggregation; (5) the I/O is deferred
2253 * due to vdev-level queueing; (6) the I/O is handed off to
2254 * another thread. In all cases, the pipeline stops whenever
2255 * there's no CPU work; it never burns a thread in cv_wait_io().
2256 *
2257 * There's no locking on io_stage because there's no legitimate way
2258 * for multiple threads to be attempting to process the same I/O.
2259 */
2260 static zio_pipe_stage_t *zio_pipeline[];
2261
2262 /*
2263 * zio_execute() is a wrapper around the static function
2264 * __zio_execute() so that we can force __zio_execute() to be
2265 * inlined. This reduces stack overhead which is important
2266 * because __zio_execute() is called recursively in several zio
2267 * code paths. zio_execute() itself cannot be inlined because
2268 * it is externally visible.
2269 */
2270 void
zio_execute(void * zio)2271 zio_execute(void *zio)
2272 {
2273 fstrans_cookie_t cookie;
2274
2275 cookie = spl_fstrans_mark();
2276 __zio_execute(zio);
2277 spl_fstrans_unmark(cookie);
2278 }
2279
2280 /*
2281 * Used to determine if in the current context the stack is sized large
2282 * enough to allow zio_execute() to be called recursively. A minimum
2283 * stack size of 16K is required to avoid needing to re-dispatch the zio.
2284 */
2285 static boolean_t
zio_execute_stack_check(zio_t * zio)2286 zio_execute_stack_check(zio_t *zio)
2287 {
2288 #if !defined(HAVE_LARGE_STACKS)
2289 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
2290
2291 /* Executing in txg_sync_thread() context. */
2292 if (dp && curthread == dp->dp_tx.tx_sync_thread)
2293 return (B_TRUE);
2294
2295 /* Pool initialization outside of zio_taskq context. */
2296 if (dp && spa_is_initializing(dp->dp_spa) &&
2297 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
2298 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
2299 return (B_TRUE);
2300 #else
2301 (void) zio;
2302 #endif /* HAVE_LARGE_STACKS */
2303
2304 return (B_FALSE);
2305 }
2306
2307 __attribute__((always_inline))
2308 static inline void
__zio_execute(zio_t * zio)2309 __zio_execute(zio_t *zio)
2310 {
2311 ASSERT3U(zio->io_queued_timestamp, >, 0);
2312
2313 while (zio->io_stage < ZIO_STAGE_DONE) {
2314 enum zio_stage pipeline = zio->io_pipeline;
2315 enum zio_stage stage = zio->io_stage;
2316
2317 zio->io_executor = curthread;
2318
2319 ASSERT(!MUTEX_HELD(&zio->io_lock));
2320 ASSERT(ISP2(stage));
2321 ASSERT(zio->io_stall == NULL);
2322
2323 do {
2324 stage <<= 1;
2325 } while ((stage & pipeline) == 0);
2326
2327 ASSERT(stage <= ZIO_STAGE_DONE);
2328
2329 /*
2330 * If we are in interrupt context and this pipeline stage
2331 * will grab a config lock that is held across I/O,
2332 * or may wait for an I/O that needs an interrupt thread
2333 * to complete, issue async to avoid deadlock.
2334 *
2335 * For VDEV_IO_START, we cut in line so that the io will
2336 * be sent to disk promptly.
2337 */
2338 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
2339 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
2340 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2341 zio_requeue_io_start_cut_in_line : B_FALSE;
2342 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2343 return;
2344 }
2345
2346 /*
2347 * If the current context doesn't have large enough stacks
2348 * the zio must be issued asynchronously to prevent overflow.
2349 */
2350 if (zio_execute_stack_check(zio)) {
2351 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2352 zio_requeue_io_start_cut_in_line : B_FALSE;
2353 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2354 return;
2355 }
2356
2357 zio->io_stage = stage;
2358 zio->io_pipeline_trace |= zio->io_stage;
2359
2360 /*
2361 * The zio pipeline stage returns the next zio to execute
2362 * (typically the same as this one), or NULL if we should
2363 * stop.
2364 */
2365 zio = zio_pipeline[highbit64(stage) - 1](zio);
2366
2367 if (zio == NULL)
2368 return;
2369 }
2370 }
2371
2372
2373 /*
2374 * ==========================================================================
2375 * Initiate I/O, either sync or async
2376 * ==========================================================================
2377 */
2378 int
zio_wait(zio_t * zio)2379 zio_wait(zio_t *zio)
2380 {
2381 /*
2382 * Some routines, like zio_free_sync(), may return a NULL zio
2383 * to avoid the performance overhead of creating and then destroying
2384 * an unneeded zio. For the callers' simplicity, we accept a NULL
2385 * zio and ignore it.
2386 */
2387 if (zio == NULL)
2388 return (0);
2389
2390 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
2391 int error;
2392
2393 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
2394 ASSERT3P(zio->io_executor, ==, NULL);
2395
2396 zio->io_waiter = curthread;
2397 ASSERT0(zio->io_queued_timestamp);
2398 zio->io_queued_timestamp = gethrtime();
2399
2400 if (zio->io_type == ZIO_TYPE_WRITE) {
2401 spa_select_allocator(zio);
2402 }
2403 __zio_execute(zio);
2404
2405 mutex_enter(&zio->io_lock);
2406 while (zio->io_executor != NULL) {
2407 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
2408 ddi_get_lbolt() + timeout);
2409
2410 if (zfs_deadman_enabled && error == -1 &&
2411 gethrtime() - zio->io_queued_timestamp >
2412 spa_deadman_ziotime(zio->io_spa)) {
2413 mutex_exit(&zio->io_lock);
2414 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
2415 zio_deadman(zio, FTAG);
2416 mutex_enter(&zio->io_lock);
2417 }
2418 }
2419 mutex_exit(&zio->io_lock);
2420
2421 error = zio->io_error;
2422 zio_destroy(zio);
2423
2424 return (error);
2425 }
2426
2427 void
zio_nowait(zio_t * zio)2428 zio_nowait(zio_t *zio)
2429 {
2430 /*
2431 * See comment in zio_wait().
2432 */
2433 if (zio == NULL)
2434 return;
2435
2436 ASSERT3P(zio->io_executor, ==, NULL);
2437
2438 if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
2439 list_is_empty(&zio->io_parent_list)) {
2440 zio_t *pio;
2441
2442 /*
2443 * This is a logical async I/O with no parent to wait for it.
2444 * We add it to the spa_async_root_zio "Godfather" I/O which
2445 * will ensure they complete prior to unloading the pool.
2446 */
2447 spa_t *spa = zio->io_spa;
2448 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
2449
2450 zio_add_child(pio, zio);
2451 }
2452
2453 ASSERT0(zio->io_queued_timestamp);
2454 zio->io_queued_timestamp = gethrtime();
2455 if (zio->io_type == ZIO_TYPE_WRITE) {
2456 spa_select_allocator(zio);
2457 }
2458 __zio_execute(zio);
2459 }
2460
2461 /*
2462 * ==========================================================================
2463 * Reexecute, cancel, or suspend/resume failed I/O
2464 * ==========================================================================
2465 */
2466
2467 static void
zio_reexecute(void * arg)2468 zio_reexecute(void *arg)
2469 {
2470 zio_t *pio = arg;
2471 zio_t *cio, *cio_next, *gio;
2472
2473 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
2474 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
2475 ASSERT(pio->io_gang_leader == NULL);
2476 ASSERT(pio->io_gang_tree == NULL);
2477
2478 mutex_enter(&pio->io_lock);
2479 pio->io_flags = pio->io_orig_flags;
2480 pio->io_stage = pio->io_orig_stage;
2481 pio->io_pipeline = pio->io_orig_pipeline;
2482 pio->io_reexecute = 0;
2483 pio->io_flags |= ZIO_FLAG_REEXECUTED;
2484 pio->io_pipeline_trace = 0;
2485 pio->io_error = 0;
2486 pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) ||
2487 (pio->io_pipeline & ZIO_STAGE_READY) == 0;
2488 pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE);
2489 zio_link_t *zl = NULL;
2490 while ((gio = zio_walk_parents(pio, &zl)) != NULL) {
2491 for (int w = 0; w < ZIO_WAIT_TYPES; w++) {
2492 gio->io_children[pio->io_child_type][w] +=
2493 !pio->io_state[w];
2494 }
2495 }
2496 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
2497 pio->io_child_error[c] = 0;
2498
2499 if (IO_IS_ALLOCATING(pio))
2500 BP_ZERO(pio->io_bp);
2501
2502 /*
2503 * As we reexecute pio's children, new children could be created.
2504 * New children go to the head of pio's io_child_list, however,
2505 * so we will (correctly) not reexecute them. The key is that
2506 * the remainder of pio's io_child_list, from 'cio_next' onward,
2507 * cannot be affected by any side effects of reexecuting 'cio'.
2508 */
2509 zl = NULL;
2510 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2511 cio_next = zio_walk_children(pio, &zl);
2512 mutex_exit(&pio->io_lock);
2513 zio_reexecute(cio);
2514 mutex_enter(&pio->io_lock);
2515 }
2516 mutex_exit(&pio->io_lock);
2517
2518 /*
2519 * Now that all children have been reexecuted, execute the parent.
2520 * We don't reexecute "The Godfather" I/O here as it's the
2521 * responsibility of the caller to wait on it.
2522 */
2523 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2524 pio->io_queued_timestamp = gethrtime();
2525 __zio_execute(pio);
2526 }
2527 }
2528
2529 void
zio_suspend(spa_t * spa,zio_t * zio,zio_suspend_reason_t reason)2530 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
2531 {
2532 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2533 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2534 "failure and the failure mode property for this pool "
2535 "is set to panic.", spa_name(spa));
2536
2537 if (reason != ZIO_SUSPEND_MMP) {
2538 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable "
2539 "I/O failure and has been suspended.\n", spa_name(spa));
2540 }
2541
2542 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
2543 NULL, NULL, 0);
2544
2545 mutex_enter(&spa->spa_suspend_lock);
2546
2547 if (spa->spa_suspend_zio_root == NULL)
2548 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2549 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2550 ZIO_FLAG_GODFATHER);
2551
2552 spa->spa_suspended = reason;
2553
2554 if (zio != NULL) {
2555 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
2556 ASSERT(zio != spa->spa_suspend_zio_root);
2557 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2558 ASSERT(zio_unique_parent(zio) == NULL);
2559 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2560 zio_add_child(spa->spa_suspend_zio_root, zio);
2561 }
2562
2563 mutex_exit(&spa->spa_suspend_lock);
2564 }
2565
2566 int
zio_resume(spa_t * spa)2567 zio_resume(spa_t *spa)
2568 {
2569 zio_t *pio;
2570
2571 /*
2572 * Reexecute all previously suspended i/o.
2573 */
2574 mutex_enter(&spa->spa_suspend_lock);
2575 spa->spa_suspended = ZIO_SUSPEND_NONE;
2576 cv_broadcast(&spa->spa_suspend_cv);
2577 pio = spa->spa_suspend_zio_root;
2578 spa->spa_suspend_zio_root = NULL;
2579 mutex_exit(&spa->spa_suspend_lock);
2580
2581 if (pio == NULL)
2582 return (0);
2583
2584 zio_reexecute(pio);
2585 return (zio_wait(pio));
2586 }
2587
2588 void
zio_resume_wait(spa_t * spa)2589 zio_resume_wait(spa_t *spa)
2590 {
2591 mutex_enter(&spa->spa_suspend_lock);
2592 while (spa_suspended(spa))
2593 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2594 mutex_exit(&spa->spa_suspend_lock);
2595 }
2596
2597 /*
2598 * ==========================================================================
2599 * Gang blocks.
2600 *
2601 * A gang block is a collection of small blocks that looks to the DMU
2602 * like one large block. When zio_dva_allocate() cannot find a block
2603 * of the requested size, due to either severe fragmentation or the pool
2604 * being nearly full, it calls zio_write_gang_block() to construct the
2605 * block from smaller fragments.
2606 *
2607 * A gang block consists of a gang header (zio_gbh_phys_t) and up to
2608 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
2609 * an indirect block: it's an array of block pointers. It consumes
2610 * only one sector and hence is allocatable regardless of fragmentation.
2611 * The gang header's bps point to its gang members, which hold the data.
2612 *
2613 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2614 * as the verifier to ensure uniqueness of the SHA256 checksum.
2615 * Critically, the gang block bp's blk_cksum is the checksum of the data,
2616 * not the gang header. This ensures that data block signatures (needed for
2617 * deduplication) are independent of how the block is physically stored.
2618 *
2619 * Gang blocks can be nested: a gang member may itself be a gang block.
2620 * Thus every gang block is a tree in which root and all interior nodes are
2621 * gang headers, and the leaves are normal blocks that contain user data.
2622 * The root of the gang tree is called the gang leader.
2623 *
2624 * To perform any operation (read, rewrite, free, claim) on a gang block,
2625 * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2626 * in the io_gang_tree field of the original logical i/o by recursively
2627 * reading the gang leader and all gang headers below it. This yields
2628 * an in-core tree containing the contents of every gang header and the
2629 * bps for every constituent of the gang block.
2630 *
2631 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2632 * and invokes a callback on each bp. To free a gang block, zio_gang_issue()
2633 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2634 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2635 * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2636 * headers, since we already have those in io_gang_tree. zio_rewrite_gang()
2637 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2638 * of the gang header plus zio_checksum_compute() of the data to update the
2639 * gang header's blk_cksum as described above.
2640 *
2641 * The two-phase assemble/issue model solves the problem of partial failure --
2642 * what if you'd freed part of a gang block but then couldn't read the
2643 * gang header for another part? Assembling the entire gang tree first
2644 * ensures that all the necessary gang header I/O has succeeded before
2645 * starting the actual work of free, claim, or write. Once the gang tree
2646 * is assembled, free and claim are in-memory operations that cannot fail.
2647 *
2648 * In the event that a gang write fails, zio_dva_unallocate() walks the
2649 * gang tree to immediately free (i.e. insert back into the space map)
2650 * everything we've allocated. This ensures that we don't get ENOSPC
2651 * errors during repeated suspend/resume cycles due to a flaky device.
2652 *
2653 * Gang rewrites only happen during sync-to-convergence. If we can't assemble
2654 * the gang tree, we won't modify the block, so we can safely defer the free
2655 * (knowing that the block is still intact). If we *can* assemble the gang
2656 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2657 * each constituent bp and we can allocate a new block on the next sync pass.
2658 *
2659 * In all cases, the gang tree allows complete recovery from partial failure.
2660 * ==========================================================================
2661 */
2662
2663 static void
zio_gang_issue_func_done(zio_t * zio)2664 zio_gang_issue_func_done(zio_t *zio)
2665 {
2666 abd_free(zio->io_abd);
2667 }
2668
2669 static zio_t *
zio_read_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2670 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2671 uint64_t offset)
2672 {
2673 if (gn != NULL)
2674 return (pio);
2675
2676 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2677 BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2678 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2679 &pio->io_bookmark));
2680 }
2681
2682 static zio_t *
zio_rewrite_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2683 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2684 uint64_t offset)
2685 {
2686 zio_t *zio;
2687
2688 if (gn != NULL) {
2689 abd_t *gbh_abd =
2690 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2691 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2692 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
2693 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2694 &pio->io_bookmark);
2695 /*
2696 * As we rewrite each gang header, the pipeline will compute
2697 * a new gang block header checksum for it; but no one will
2698 * compute a new data checksum, so we do that here. The one
2699 * exception is the gang leader: the pipeline already computed
2700 * its data checksum because that stage precedes gang assembly.
2701 * (Presently, nothing actually uses interior data checksums;
2702 * this is just good hygiene.)
2703 */
2704 if (gn != pio->io_gang_leader->io_gang_tree) {
2705 abd_t *buf = abd_get_offset(data, offset);
2706
2707 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
2708 buf, BP_GET_PSIZE(bp));
2709
2710 abd_free(buf);
2711 }
2712 /*
2713 * If we are here to damage data for testing purposes,
2714 * leave the GBH alone so that we can detect the damage.
2715 */
2716 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2717 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
2718 } else {
2719 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2720 abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2721 zio_gang_issue_func_done, NULL, pio->io_priority,
2722 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2723 }
2724
2725 return (zio);
2726 }
2727
2728 static zio_t *
zio_free_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2729 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2730 uint64_t offset)
2731 {
2732 (void) gn, (void) data, (void) offset;
2733
2734 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2735 ZIO_GANG_CHILD_FLAGS(pio));
2736 if (zio == NULL) {
2737 zio = zio_null(pio, pio->io_spa,
2738 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
2739 }
2740 return (zio);
2741 }
2742
2743 static zio_t *
zio_claim_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2744 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2745 uint64_t offset)
2746 {
2747 (void) gn, (void) data, (void) offset;
2748 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2749 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2750 }
2751
2752 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2753 NULL,
2754 zio_read_gang,
2755 zio_rewrite_gang,
2756 zio_free_gang,
2757 zio_claim_gang,
2758 NULL
2759 };
2760
2761 static void zio_gang_tree_assemble_done(zio_t *zio);
2762
2763 static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t ** gnpp)2764 zio_gang_node_alloc(zio_gang_node_t **gnpp)
2765 {
2766 zio_gang_node_t *gn;
2767
2768 ASSERT(*gnpp == NULL);
2769
2770 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
2771 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2772 *gnpp = gn;
2773
2774 return (gn);
2775 }
2776
2777 static void
zio_gang_node_free(zio_gang_node_t ** gnpp)2778 zio_gang_node_free(zio_gang_node_t **gnpp)
2779 {
2780 zio_gang_node_t *gn = *gnpp;
2781
2782 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2783 ASSERT(gn->gn_child[g] == NULL);
2784
2785 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2786 kmem_free(gn, sizeof (*gn));
2787 *gnpp = NULL;
2788 }
2789
2790 static void
zio_gang_tree_free(zio_gang_node_t ** gnpp)2791 zio_gang_tree_free(zio_gang_node_t **gnpp)
2792 {
2793 zio_gang_node_t *gn = *gnpp;
2794
2795 if (gn == NULL)
2796 return;
2797
2798 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2799 zio_gang_tree_free(&gn->gn_child[g]);
2800
2801 zio_gang_node_free(gnpp);
2802 }
2803
2804 static void
zio_gang_tree_assemble(zio_t * gio,blkptr_t * bp,zio_gang_node_t ** gnpp)2805 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
2806 {
2807 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
2808 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2809
2810 ASSERT(gio->io_gang_leader == gio);
2811 ASSERT(BP_IS_GANG(bp));
2812
2813 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2814 zio_gang_tree_assemble_done, gn, gio->io_priority,
2815 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
2816 }
2817
2818 static void
zio_gang_tree_assemble_done(zio_t * zio)2819 zio_gang_tree_assemble_done(zio_t *zio)
2820 {
2821 zio_t *gio = zio->io_gang_leader;
2822 zio_gang_node_t *gn = zio->io_private;
2823 blkptr_t *bp = zio->io_bp;
2824
2825 ASSERT(gio == zio_unique_parent(zio));
2826 ASSERT(list_is_empty(&zio->io_child_list));
2827
2828 if (zio->io_error)
2829 return;
2830
2831 /* this ABD was created from a linear buf in zio_gang_tree_assemble */
2832 if (BP_SHOULD_BYTESWAP(bp))
2833 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
2834
2835 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
2836 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
2837 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2838
2839 abd_free(zio->io_abd);
2840
2841 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2842 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2843 if (!BP_IS_GANG(gbp))
2844 continue;
2845 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
2846 }
2847 }
2848
2849 static void
zio_gang_tree_issue(zio_t * pio,zio_gang_node_t * gn,blkptr_t * bp,abd_t * data,uint64_t offset)2850 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
2851 uint64_t offset)
2852 {
2853 zio_t *gio = pio->io_gang_leader;
2854 zio_t *zio;
2855
2856 ASSERT(BP_IS_GANG(bp) == !!gn);
2857 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
2858 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
2859
2860 /*
2861 * If you're a gang header, your data is in gn->gn_gbh.
2862 * If you're a gang member, your data is in 'data' and gn == NULL.
2863 */
2864 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
2865
2866 if (gn != NULL) {
2867 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2868
2869 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2870 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2871 if (BP_IS_HOLE(gbp))
2872 continue;
2873 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
2874 offset);
2875 offset += BP_GET_PSIZE(gbp);
2876 }
2877 }
2878
2879 if (gn == gio->io_gang_tree)
2880 ASSERT3U(gio->io_size, ==, offset);
2881
2882 if (zio != pio)
2883 zio_nowait(zio);
2884 }
2885
2886 static zio_t *
zio_gang_assemble(zio_t * zio)2887 zio_gang_assemble(zio_t *zio)
2888 {
2889 blkptr_t *bp = zio->io_bp;
2890
2891 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
2892 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2893
2894 zio->io_gang_leader = zio;
2895
2896 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
2897
2898 return (zio);
2899 }
2900
2901 static zio_t *
zio_gang_issue(zio_t * zio)2902 zio_gang_issue(zio_t *zio)
2903 {
2904 blkptr_t *bp = zio->io_bp;
2905
2906 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
2907 return (NULL);
2908 }
2909
2910 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
2911 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2912
2913 if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
2914 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
2915 0);
2916 else
2917 zio_gang_tree_free(&zio->io_gang_tree);
2918
2919 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2920
2921 return (zio);
2922 }
2923
2924 static void
zio_gang_inherit_allocator(zio_t * pio,zio_t * cio)2925 zio_gang_inherit_allocator(zio_t *pio, zio_t *cio)
2926 {
2927 cio->io_allocator = pio->io_allocator;
2928 }
2929
2930 static void
zio_write_gang_member_ready(zio_t * zio)2931 zio_write_gang_member_ready(zio_t *zio)
2932 {
2933 zio_t *pio = zio_unique_parent(zio);
2934 dva_t *cdva = zio->io_bp->blk_dva;
2935 dva_t *pdva = pio->io_bp->blk_dva;
2936 uint64_t asize;
2937 zio_t *gio __maybe_unused = zio->io_gang_leader;
2938
2939 if (BP_IS_HOLE(zio->io_bp))
2940 return;
2941
2942 ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
2943
2944 ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
2945 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
2946 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
2947 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
2948 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
2949
2950 mutex_enter(&pio->io_lock);
2951 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
2952 ASSERT(DVA_GET_GANG(&pdva[d]));
2953 asize = DVA_GET_ASIZE(&pdva[d]);
2954 asize += DVA_GET_ASIZE(&cdva[d]);
2955 DVA_SET_ASIZE(&pdva[d], asize);
2956 }
2957 mutex_exit(&pio->io_lock);
2958 }
2959
2960 static void
zio_write_gang_done(zio_t * zio)2961 zio_write_gang_done(zio_t *zio)
2962 {
2963 /*
2964 * The io_abd field will be NULL for a zio with no data. The io_flags
2965 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
2966 * check for it here as it is cleared in zio_ready.
2967 */
2968 if (zio->io_abd != NULL)
2969 abd_free(zio->io_abd);
2970 }
2971
2972 static zio_t *
zio_write_gang_block(zio_t * pio,metaslab_class_t * mc)2973 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
2974 {
2975 spa_t *spa = pio->io_spa;
2976 blkptr_t *bp = pio->io_bp;
2977 zio_t *gio = pio->io_gang_leader;
2978 zio_t *zio;
2979 zio_gang_node_t *gn, **gnpp;
2980 zio_gbh_phys_t *gbh;
2981 abd_t *gbh_abd;
2982 uint64_t txg = pio->io_txg;
2983 uint64_t resid = pio->io_size;
2984 uint64_t lsize;
2985 int copies = gio->io_prop.zp_copies;
2986 zio_prop_t zp;
2987 int error;
2988 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
2989
2990 /*
2991 * If one copy was requested, store 2 copies of the GBH, so that we
2992 * can still traverse all the data (e.g. to free or scrub) even if a
2993 * block is damaged. Note that we can't store 3 copies of the GBH in
2994 * all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
2995 */
2996 int gbh_copies = copies;
2997 if (gbh_copies == 1) {
2998 gbh_copies = MIN(2, spa_max_replication(spa));
2999 }
3000
3001 ASSERT(ZIO_HAS_ALLOCATOR(pio));
3002 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
3003 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3004 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3005 ASSERT(has_data);
3006
3007 flags |= METASLAB_ASYNC_ALLOC;
3008 VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
3009 mca_alloc_slots, pio));
3010
3011 /*
3012 * The logical zio has already placed a reservation for
3013 * 'copies' allocation slots but gang blocks may require
3014 * additional copies. These additional copies
3015 * (i.e. gbh_copies - copies) are guaranteed to succeed
3016 * since metaslab_class_throttle_reserve() always allows
3017 * additional reservations for gang blocks.
3018 */
3019 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
3020 pio->io_allocator, pio, flags));
3021 }
3022
3023 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
3024 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
3025 &pio->io_alloc_list, pio, pio->io_allocator);
3026 if (error) {
3027 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3028 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3029 ASSERT(has_data);
3030
3031 /*
3032 * If we failed to allocate the gang block header then
3033 * we remove any additional allocation reservations that
3034 * we placed here. The original reservation will
3035 * be removed when the logical I/O goes to the ready
3036 * stage.
3037 */
3038 metaslab_class_throttle_unreserve(mc,
3039 gbh_copies - copies, pio->io_allocator, pio);
3040 }
3041
3042 pio->io_error = error;
3043 return (pio);
3044 }
3045
3046 if (pio == gio) {
3047 gnpp = &gio->io_gang_tree;
3048 } else {
3049 gnpp = pio->io_private;
3050 ASSERT(pio->io_ready == zio_write_gang_member_ready);
3051 }
3052
3053 gn = zio_gang_node_alloc(gnpp);
3054 gbh = gn->gn_gbh;
3055 memset(gbh, 0, SPA_GANGBLOCKSIZE);
3056 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
3057
3058 /*
3059 * Create the gang header.
3060 */
3061 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
3062 zio_write_gang_done, NULL, pio->io_priority,
3063 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3064
3065 zio_gang_inherit_allocator(pio, zio);
3066
3067 /*
3068 * Create and nowait the gang children.
3069 */
3070 for (int g = 0; resid != 0; resid -= lsize, g++) {
3071 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
3072 SPA_MINBLOCKSIZE);
3073 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
3074
3075 zp.zp_checksum = gio->io_prop.zp_checksum;
3076 zp.zp_compress = ZIO_COMPRESS_OFF;
3077 zp.zp_complevel = gio->io_prop.zp_complevel;
3078 zp.zp_type = DMU_OT_NONE;
3079 zp.zp_level = 0;
3080 zp.zp_copies = gio->io_prop.zp_copies;
3081 zp.zp_dedup = B_FALSE;
3082 zp.zp_dedup_verify = B_FALSE;
3083 zp.zp_nopwrite = B_FALSE;
3084 zp.zp_encrypt = gio->io_prop.zp_encrypt;
3085 zp.zp_byteorder = gio->io_prop.zp_byteorder;
3086 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
3087 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
3088 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
3089
3090 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
3091 has_data ? abd_get_offset(pio->io_abd, pio->io_size -
3092 resid) : NULL, lsize, lsize, &zp,
3093 zio_write_gang_member_ready, NULL,
3094 zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
3095 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3096
3097 zio_gang_inherit_allocator(zio, cio);
3098
3099 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3100 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3101 ASSERT(has_data);
3102
3103 /*
3104 * Gang children won't throttle but we should
3105 * account for their work, so reserve an allocation
3106 * slot for them here.
3107 */
3108 VERIFY(metaslab_class_throttle_reserve(mc,
3109 zp.zp_copies, cio->io_allocator, cio, flags));
3110 }
3111 zio_nowait(cio);
3112 }
3113
3114 /*
3115 * Set pio's pipeline to just wait for zio to finish.
3116 */
3117 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3118
3119 zio_nowait(zio);
3120
3121 return (pio);
3122 }
3123
3124 /*
3125 * The zio_nop_write stage in the pipeline determines if allocating a
3126 * new bp is necessary. The nopwrite feature can handle writes in
3127 * either syncing or open context (i.e. zil writes) and as a result is
3128 * mutually exclusive with dedup.
3129 *
3130 * By leveraging a cryptographically secure checksum, such as SHA256, we
3131 * can compare the checksums of the new data and the old to determine if
3132 * allocating a new block is required. Note that our requirements for
3133 * cryptographic strength are fairly weak: there can't be any accidental
3134 * hash collisions, but we don't need to be secure against intentional
3135 * (malicious) collisions. To trigger a nopwrite, you have to be able
3136 * to write the file to begin with, and triggering an incorrect (hash
3137 * collision) nopwrite is no worse than simply writing to the file.
3138 * That said, there are no known attacks against the checksum algorithms
3139 * used for nopwrite, assuming that the salt and the checksums
3140 * themselves remain secret.
3141 */
3142 static zio_t *
zio_nop_write(zio_t * zio)3143 zio_nop_write(zio_t *zio)
3144 {
3145 blkptr_t *bp = zio->io_bp;
3146 blkptr_t *bp_orig = &zio->io_bp_orig;
3147 zio_prop_t *zp = &zio->io_prop;
3148
3149 ASSERT(BP_IS_HOLE(bp));
3150 ASSERT(BP_GET_LEVEL(bp) == 0);
3151 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
3152 ASSERT(zp->zp_nopwrite);
3153 ASSERT(!zp->zp_dedup);
3154 ASSERT(zio->io_bp_override == NULL);
3155 ASSERT(IO_IS_ALLOCATING(zio));
3156
3157 /*
3158 * Check to see if the original bp and the new bp have matching
3159 * characteristics (i.e. same checksum, compression algorithms, etc).
3160 * If they don't then just continue with the pipeline which will
3161 * allocate a new bp.
3162 */
3163 if (BP_IS_HOLE(bp_orig) ||
3164 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
3165 ZCHECKSUM_FLAG_NOPWRITE) ||
3166 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
3167 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
3168 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
3169 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
3170 zp->zp_copies != BP_GET_NDVAS(bp_orig))
3171 return (zio);
3172
3173 /*
3174 * If the checksums match then reset the pipeline so that we
3175 * avoid allocating a new bp and issuing any I/O.
3176 */
3177 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
3178 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
3179 ZCHECKSUM_FLAG_NOPWRITE);
3180 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
3181 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
3182 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
3183 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
3184
3185 /*
3186 * If we're overwriting a block that is currently on an
3187 * indirect vdev, then ignore the nopwrite request and
3188 * allow a new block to be allocated on a concrete vdev.
3189 */
3190 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
3191 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
3192 vdev_t *tvd = vdev_lookup_top(zio->io_spa,
3193 DVA_GET_VDEV(&bp_orig->blk_dva[d]));
3194 if (tvd->vdev_ops == &vdev_indirect_ops) {
3195 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3196 return (zio);
3197 }
3198 }
3199 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3200
3201 *bp = *bp_orig;
3202 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3203 zio->io_flags |= ZIO_FLAG_NOPWRITE;
3204 }
3205
3206 return (zio);
3207 }
3208
3209 /*
3210 * ==========================================================================
3211 * Block Reference Table
3212 * ==========================================================================
3213 */
3214 static zio_t *
zio_brt_free(zio_t * zio)3215 zio_brt_free(zio_t *zio)
3216 {
3217 blkptr_t *bp;
3218
3219 bp = zio->io_bp;
3220
3221 if (BP_GET_LEVEL(bp) > 0 ||
3222 BP_IS_METADATA(bp) ||
3223 !brt_maybe_exists(zio->io_spa, bp)) {
3224 return (zio);
3225 }
3226
3227 if (!brt_entry_decref(zio->io_spa, bp)) {
3228 /*
3229 * This isn't the last reference, so we cannot free
3230 * the data yet.
3231 */
3232 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3233 }
3234
3235 return (zio);
3236 }
3237
3238 /*
3239 * ==========================================================================
3240 * Dedup
3241 * ==========================================================================
3242 */
3243 static void
zio_ddt_child_read_done(zio_t * zio)3244 zio_ddt_child_read_done(zio_t *zio)
3245 {
3246 blkptr_t *bp = zio->io_bp;
3247 ddt_entry_t *dde = zio->io_private;
3248 ddt_phys_t *ddp;
3249 zio_t *pio = zio_unique_parent(zio);
3250
3251 mutex_enter(&pio->io_lock);
3252 ddp = ddt_phys_select(dde, bp);
3253 if (zio->io_error == 0)
3254 ddt_phys_clear(ddp); /* this ddp doesn't need repair */
3255
3256 if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
3257 dde->dde_repair_abd = zio->io_abd;
3258 else
3259 abd_free(zio->io_abd);
3260 mutex_exit(&pio->io_lock);
3261 }
3262
3263 static zio_t *
zio_ddt_read_start(zio_t * zio)3264 zio_ddt_read_start(zio_t *zio)
3265 {
3266 blkptr_t *bp = zio->io_bp;
3267
3268 ASSERT(BP_GET_DEDUP(bp));
3269 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3270 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3271
3272 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3273 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3274 ddt_entry_t *dde = ddt_repair_start(ddt, bp);
3275 ddt_phys_t *ddp = dde->dde_phys;
3276 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
3277 blkptr_t blk;
3278
3279 ASSERT(zio->io_vsd == NULL);
3280 zio->io_vsd = dde;
3281
3282 if (ddp_self == NULL)
3283 return (zio);
3284
3285 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
3286 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
3287 continue;
3288 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
3289 &blk);
3290 zio_nowait(zio_read(zio, zio->io_spa, &blk,
3291 abd_alloc_for_io(zio->io_size, B_TRUE),
3292 zio->io_size, zio_ddt_child_read_done, dde,
3293 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
3294 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
3295 }
3296 return (zio);
3297 }
3298
3299 zio_nowait(zio_read(zio, zio->io_spa, bp,
3300 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
3301 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
3302
3303 return (zio);
3304 }
3305
3306 static zio_t *
zio_ddt_read_done(zio_t * zio)3307 zio_ddt_read_done(zio_t *zio)
3308 {
3309 blkptr_t *bp = zio->io_bp;
3310
3311 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
3312 return (NULL);
3313 }
3314
3315 ASSERT(BP_GET_DEDUP(bp));
3316 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3317 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3318
3319 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3320 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3321 ddt_entry_t *dde = zio->io_vsd;
3322 if (ddt == NULL) {
3323 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
3324 return (zio);
3325 }
3326 if (dde == NULL) {
3327 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
3328 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
3329 return (NULL);
3330 }
3331 if (dde->dde_repair_abd != NULL) {
3332 abd_copy(zio->io_abd, dde->dde_repair_abd,
3333 zio->io_size);
3334 zio->io_child_error[ZIO_CHILD_DDT] = 0;
3335 }
3336 ddt_repair_done(ddt, dde);
3337 zio->io_vsd = NULL;
3338 }
3339
3340 ASSERT(zio->io_vsd == NULL);
3341
3342 return (zio);
3343 }
3344
3345 static boolean_t
zio_ddt_collision(zio_t * zio,ddt_t * ddt,ddt_entry_t * dde)3346 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
3347 {
3348 spa_t *spa = zio->io_spa;
3349 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
3350
3351 ASSERT(!(zio->io_bp_override && do_raw));
3352
3353 /*
3354 * Note: we compare the original data, not the transformed data,
3355 * because when zio->io_bp is an override bp, we will not have
3356 * pushed the I/O transforms. That's an important optimization
3357 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
3358 * However, we should never get a raw, override zio so in these
3359 * cases we can compare the io_abd directly. This is useful because
3360 * it allows us to do dedup verification even if we don't have access
3361 * to the original data (for instance, if the encryption keys aren't
3362 * loaded).
3363 */
3364
3365 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
3366 zio_t *lio = dde->dde_lead_zio[p];
3367
3368 if (lio != NULL && do_raw) {
3369 return (lio->io_size != zio->io_size ||
3370 abd_cmp(zio->io_abd, lio->io_abd) != 0);
3371 } else if (lio != NULL) {
3372 return (lio->io_orig_size != zio->io_orig_size ||
3373 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
3374 }
3375 }
3376
3377 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
3378 ddt_phys_t *ddp = &dde->dde_phys[p];
3379
3380 if (ddp->ddp_phys_birth != 0 && do_raw) {
3381 blkptr_t blk = *zio->io_bp;
3382 uint64_t psize;
3383 abd_t *tmpabd;
3384 int error;
3385
3386 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3387 psize = BP_GET_PSIZE(&blk);
3388
3389 if (psize != zio->io_size)
3390 return (B_TRUE);
3391
3392 ddt_exit(ddt);
3393
3394 tmpabd = abd_alloc_for_io(psize, B_TRUE);
3395
3396 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
3397 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
3398 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3399 ZIO_FLAG_RAW, &zio->io_bookmark));
3400
3401 if (error == 0) {
3402 if (abd_cmp(tmpabd, zio->io_abd) != 0)
3403 error = SET_ERROR(ENOENT);
3404 }
3405
3406 abd_free(tmpabd);
3407 ddt_enter(ddt);
3408 return (error != 0);
3409 } else if (ddp->ddp_phys_birth != 0) {
3410 arc_buf_t *abuf = NULL;
3411 arc_flags_t aflags = ARC_FLAG_WAIT;
3412 blkptr_t blk = *zio->io_bp;
3413 int error;
3414
3415 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3416
3417 if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
3418 return (B_TRUE);
3419
3420 ddt_exit(ddt);
3421
3422 error = arc_read(NULL, spa, &blk,
3423 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
3424 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3425 &aflags, &zio->io_bookmark);
3426
3427 if (error == 0) {
3428 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
3429 zio->io_orig_size) != 0)
3430 error = SET_ERROR(ENOENT);
3431 arc_buf_destroy(abuf, &abuf);
3432 }
3433
3434 ddt_enter(ddt);
3435 return (error != 0);
3436 }
3437 }
3438
3439 return (B_FALSE);
3440 }
3441
3442 static void
zio_ddt_child_write_ready(zio_t * zio)3443 zio_ddt_child_write_ready(zio_t *zio)
3444 {
3445 int p = zio->io_prop.zp_copies;
3446 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3447 ddt_entry_t *dde = zio->io_private;
3448 ddt_phys_t *ddp = &dde->dde_phys[p];
3449 zio_t *pio;
3450
3451 if (zio->io_error)
3452 return;
3453
3454 ddt_enter(ddt);
3455
3456 ASSERT(dde->dde_lead_zio[p] == zio);
3457
3458 ddt_phys_fill(ddp, zio->io_bp);
3459
3460 zio_link_t *zl = NULL;
3461 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
3462 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
3463
3464 ddt_exit(ddt);
3465 }
3466
3467 static void
zio_ddt_child_write_done(zio_t * zio)3468 zio_ddt_child_write_done(zio_t *zio)
3469 {
3470 int p = zio->io_prop.zp_copies;
3471 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3472 ddt_entry_t *dde = zio->io_private;
3473 ddt_phys_t *ddp = &dde->dde_phys[p];
3474
3475 ddt_enter(ddt);
3476
3477 ASSERT(ddp->ddp_refcnt == 0);
3478 ASSERT(dde->dde_lead_zio[p] == zio);
3479 dde->dde_lead_zio[p] = NULL;
3480
3481 if (zio->io_error == 0) {
3482 zio_link_t *zl = NULL;
3483 while (zio_walk_parents(zio, &zl) != NULL)
3484 ddt_phys_addref(ddp);
3485 } else {
3486 ddt_phys_clear(ddp);
3487 }
3488
3489 ddt_exit(ddt);
3490 }
3491
3492 static zio_t *
zio_ddt_write(zio_t * zio)3493 zio_ddt_write(zio_t *zio)
3494 {
3495 spa_t *spa = zio->io_spa;
3496 blkptr_t *bp = zio->io_bp;
3497 uint64_t txg = zio->io_txg;
3498 zio_prop_t *zp = &zio->io_prop;
3499 int p = zp->zp_copies;
3500 zio_t *cio = NULL;
3501 ddt_t *ddt = ddt_select(spa, bp);
3502 ddt_entry_t *dde;
3503 ddt_phys_t *ddp;
3504
3505 ASSERT(BP_GET_DEDUP(bp));
3506 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
3507 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
3508 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
3509
3510 ddt_enter(ddt);
3511 dde = ddt_lookup(ddt, bp, B_TRUE);
3512 ddp = &dde->dde_phys[p];
3513
3514 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
3515 /*
3516 * If we're using a weak checksum, upgrade to a strong checksum
3517 * and try again. If we're already using a strong checksum,
3518 * we can't resolve it, so just convert to an ordinary write.
3519 * (And automatically e-mail a paper to Nature?)
3520 */
3521 if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
3522 ZCHECKSUM_FLAG_DEDUP)) {
3523 zp->zp_checksum = spa_dedup_checksum(spa);
3524 zio_pop_transforms(zio);
3525 zio->io_stage = ZIO_STAGE_OPEN;
3526 BP_ZERO(bp);
3527 } else {
3528 zp->zp_dedup = B_FALSE;
3529 BP_SET_DEDUP(bp, B_FALSE);
3530 }
3531 ASSERT(!BP_GET_DEDUP(bp));
3532 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3533 ddt_exit(ddt);
3534 return (zio);
3535 }
3536
3537 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
3538 if (ddp->ddp_phys_birth != 0)
3539 ddt_bp_fill(ddp, bp, txg);
3540 if (dde->dde_lead_zio[p] != NULL)
3541 zio_add_child(zio, dde->dde_lead_zio[p]);
3542 else
3543 ddt_phys_addref(ddp);
3544 } else if (zio->io_bp_override) {
3545 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg);
3546 ASSERT(BP_EQUAL(bp, zio->io_bp_override));
3547 ddt_phys_fill(ddp, bp);
3548 ddt_phys_addref(ddp);
3549 } else {
3550 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
3551 zio->io_orig_size, zio->io_orig_size, zp,
3552 zio_ddt_child_write_ready, NULL,
3553 zio_ddt_child_write_done, dde, zio->io_priority,
3554 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
3555
3556 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
3557 dde->dde_lead_zio[p] = cio;
3558 }
3559
3560 ddt_exit(ddt);
3561
3562 zio_nowait(cio);
3563
3564 return (zio);
3565 }
3566
3567 static ddt_entry_t *freedde; /* for debugging */
3568
3569 static zio_t *
zio_ddt_free(zio_t * zio)3570 zio_ddt_free(zio_t *zio)
3571 {
3572 spa_t *spa = zio->io_spa;
3573 blkptr_t *bp = zio->io_bp;
3574 ddt_t *ddt = ddt_select(spa, bp);
3575 ddt_entry_t *dde;
3576 ddt_phys_t *ddp;
3577
3578 ASSERT(BP_GET_DEDUP(bp));
3579 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3580
3581 ddt_enter(ddt);
3582 freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
3583 if (dde) {
3584 ddp = ddt_phys_select(dde, bp);
3585 if (ddp)
3586 ddt_phys_decref(ddp);
3587 }
3588 ddt_exit(ddt);
3589
3590 return (zio);
3591 }
3592
3593 /*
3594 * ==========================================================================
3595 * Allocate and free blocks
3596 * ==========================================================================
3597 */
3598
3599 static zio_t *
zio_io_to_allocate(spa_t * spa,int allocator)3600 zio_io_to_allocate(spa_t *spa, int allocator)
3601 {
3602 zio_t *zio;
3603
3604 ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
3605
3606 zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
3607 if (zio == NULL)
3608 return (NULL);
3609
3610 ASSERT(IO_IS_ALLOCATING(zio));
3611 ASSERT(ZIO_HAS_ALLOCATOR(zio));
3612
3613 /*
3614 * Try to place a reservation for this zio. If we're unable to
3615 * reserve then we throttle.
3616 */
3617 ASSERT3U(zio->io_allocator, ==, allocator);
3618 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
3619 zio->io_prop.zp_copies, allocator, zio, 0)) {
3620 return (NULL);
3621 }
3622
3623 avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
3624 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
3625
3626 return (zio);
3627 }
3628
3629 static zio_t *
zio_dva_throttle(zio_t * zio)3630 zio_dva_throttle(zio_t *zio)
3631 {
3632 spa_t *spa = zio->io_spa;
3633 zio_t *nio;
3634 metaslab_class_t *mc;
3635
3636 /* locate an appropriate allocation class */
3637 mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
3638 zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
3639
3640 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
3641 !mc->mc_alloc_throttle_enabled ||
3642 zio->io_child_type == ZIO_CHILD_GANG ||
3643 zio->io_flags & ZIO_FLAG_NODATA) {
3644 return (zio);
3645 }
3646
3647 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3648 ASSERT(ZIO_HAS_ALLOCATOR(zio));
3649 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3650 ASSERT3U(zio->io_queued_timestamp, >, 0);
3651 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
3652
3653 int allocator = zio->io_allocator;
3654 zio->io_metaslab_class = mc;
3655 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3656 avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
3657 nio = zio_io_to_allocate(spa, allocator);
3658 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3659 return (nio);
3660 }
3661
3662 static void
zio_allocate_dispatch(spa_t * spa,int allocator)3663 zio_allocate_dispatch(spa_t *spa, int allocator)
3664 {
3665 zio_t *zio;
3666
3667 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3668 zio = zio_io_to_allocate(spa, allocator);
3669 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3670 if (zio == NULL)
3671 return;
3672
3673 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
3674 ASSERT0(zio->io_error);
3675 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
3676 }
3677
3678 static zio_t *
zio_dva_allocate(zio_t * zio)3679 zio_dva_allocate(zio_t *zio)
3680 {
3681 spa_t *spa = zio->io_spa;
3682 metaslab_class_t *mc;
3683 blkptr_t *bp = zio->io_bp;
3684 int error;
3685 int flags = 0;
3686
3687 if (zio->io_gang_leader == NULL) {
3688 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3689 zio->io_gang_leader = zio;
3690 }
3691
3692 ASSERT(BP_IS_HOLE(bp));
3693 ASSERT0(BP_GET_NDVAS(bp));
3694 ASSERT3U(zio->io_prop.zp_copies, >, 0);
3695 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
3696 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
3697
3698 if (zio->io_flags & ZIO_FLAG_NODATA)
3699 flags |= METASLAB_DONT_THROTTLE;
3700 if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
3701 flags |= METASLAB_GANG_CHILD;
3702 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
3703 flags |= METASLAB_ASYNC_ALLOC;
3704
3705 /*
3706 * if not already chosen, locate an appropriate allocation class
3707 */
3708 mc = zio->io_metaslab_class;
3709 if (mc == NULL) {
3710 mc = spa_preferred_class(spa, zio->io_size,
3711 zio->io_prop.zp_type, zio->io_prop.zp_level,
3712 zio->io_prop.zp_zpl_smallblk);
3713 zio->io_metaslab_class = mc;
3714 }
3715
3716 /*
3717 * Try allocating the block in the usual metaslab class.
3718 * If that's full, allocate it in the normal class.
3719 * If that's full, allocate as a gang block,
3720 * and if all are full, the allocation fails (which shouldn't happen).
3721 *
3722 * Note that we do not fall back on embedded slog (ZIL) space, to
3723 * preserve unfragmented slog space, which is critical for decent
3724 * sync write performance. If a log allocation fails, we will fall
3725 * back to spa_sync() which is abysmal for performance.
3726 */
3727 ASSERT(ZIO_HAS_ALLOCATOR(zio));
3728 error = metaslab_alloc(spa, mc, zio->io_size, bp,
3729 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
3730 &zio->io_alloc_list, zio, zio->io_allocator);
3731
3732 /*
3733 * Fallback to normal class when an alloc class is full
3734 */
3735 if (error == ENOSPC && mc != spa_normal_class(spa)) {
3736 /*
3737 * If throttling, transfer reservation over to normal class.
3738 * The io_allocator slot can remain the same even though we
3739 * are switching classes.
3740 */
3741 if (mc->mc_alloc_throttle_enabled &&
3742 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
3743 metaslab_class_throttle_unreserve(mc,
3744 zio->io_prop.zp_copies, zio->io_allocator, zio);
3745 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
3746
3747 VERIFY(metaslab_class_throttle_reserve(
3748 spa_normal_class(spa),
3749 zio->io_prop.zp_copies, zio->io_allocator, zio,
3750 flags | METASLAB_MUST_RESERVE));
3751 }
3752 zio->io_metaslab_class = mc = spa_normal_class(spa);
3753 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3754 zfs_dbgmsg("%s: metaslab allocation failure, "
3755 "trying normal class: zio %px, size %llu, error %d",
3756 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3757 error);
3758 }
3759
3760 error = metaslab_alloc(spa, mc, zio->io_size, bp,
3761 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
3762 &zio->io_alloc_list, zio, zio->io_allocator);
3763 }
3764
3765 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
3766 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3767 zfs_dbgmsg("%s: metaslab allocation failure, "
3768 "trying ganging: zio %px, size %llu, error %d",
3769 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3770 error);
3771 }
3772 return (zio_write_gang_block(zio, mc));
3773 }
3774 if (error != 0) {
3775 if (error != ENOSPC ||
3776 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
3777 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
3778 "size %llu, error %d",
3779 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3780 error);
3781 }
3782 zio->io_error = error;
3783 }
3784
3785 return (zio);
3786 }
3787
3788 static zio_t *
zio_dva_free(zio_t * zio)3789 zio_dva_free(zio_t *zio)
3790 {
3791 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
3792
3793 return (zio);
3794 }
3795
3796 static zio_t *
zio_dva_claim(zio_t * zio)3797 zio_dva_claim(zio_t *zio)
3798 {
3799 int error;
3800
3801 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
3802 if (error)
3803 zio->io_error = error;
3804
3805 return (zio);
3806 }
3807
3808 /*
3809 * Undo an allocation. This is used by zio_done() when an I/O fails
3810 * and we want to give back the block we just allocated.
3811 * This handles both normal blocks and gang blocks.
3812 */
3813 static void
zio_dva_unallocate(zio_t * zio,zio_gang_node_t * gn,blkptr_t * bp)3814 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
3815 {
3816 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
3817 ASSERT(zio->io_bp_override == NULL);
3818
3819 if (!BP_IS_HOLE(bp)) {
3820 metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp),
3821 B_TRUE);
3822 }
3823
3824 if (gn != NULL) {
3825 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
3826 zio_dva_unallocate(zio, gn->gn_child[g],
3827 &gn->gn_gbh->zg_blkptr[g]);
3828 }
3829 }
3830 }
3831
3832 /*
3833 * Try to allocate an intent log block. Return 0 on success, errno on failure.
3834 */
3835 int
zio_alloc_zil(spa_t * spa,objset_t * os,uint64_t txg,blkptr_t * new_bp,uint64_t size,boolean_t * slog)3836 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
3837 uint64_t size, boolean_t *slog)
3838 {
3839 int error = 1;
3840 zio_alloc_list_t io_alloc_list;
3841
3842 ASSERT(txg > spa_syncing_txg(spa));
3843
3844 metaslab_trace_init(&io_alloc_list);
3845
3846 /*
3847 * Block pointer fields are useful to metaslabs for stats and debugging.
3848 * Fill in the obvious ones before calling into metaslab_alloc().
3849 */
3850 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3851 BP_SET_PSIZE(new_bp, size);
3852 BP_SET_LEVEL(new_bp, 0);
3853
3854 /*
3855 * When allocating a zil block, we don't have information about
3856 * the final destination of the block except the objset it's part
3857 * of, so we just hash the objset ID to pick the allocator to get
3858 * some parallelism.
3859 */
3860 int flags = METASLAB_ZIL;
3861 int allocator = (uint_t)cityhash4(0, 0, 0,
3862 os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
3863 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
3864 txg, NULL, flags, &io_alloc_list, NULL, allocator);
3865 *slog = (error == 0);
3866 if (error != 0) {
3867 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
3868 new_bp, 1, txg, NULL, flags,
3869 &io_alloc_list, NULL, allocator);
3870 }
3871 if (error != 0) {
3872 error = metaslab_alloc(spa, spa_normal_class(spa), size,
3873 new_bp, 1, txg, NULL, flags,
3874 &io_alloc_list, NULL, allocator);
3875 }
3876 metaslab_trace_fini(&io_alloc_list);
3877
3878 if (error == 0) {
3879 BP_SET_LSIZE(new_bp, size);
3880 BP_SET_PSIZE(new_bp, size);
3881 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
3882 BP_SET_CHECKSUM(new_bp,
3883 spa_version(spa) >= SPA_VERSION_SLIM_ZIL
3884 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
3885 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3886 BP_SET_LEVEL(new_bp, 0);
3887 BP_SET_DEDUP(new_bp, 0);
3888 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
3889
3890 /*
3891 * encrypted blocks will require an IV and salt. We generate
3892 * these now since we will not be rewriting the bp at
3893 * rewrite time.
3894 */
3895 if (os->os_encrypted) {
3896 uint8_t iv[ZIO_DATA_IV_LEN];
3897 uint8_t salt[ZIO_DATA_SALT_LEN];
3898
3899 BP_SET_CRYPT(new_bp, B_TRUE);
3900 VERIFY0(spa_crypt_get_salt(spa,
3901 dmu_objset_id(os), salt));
3902 VERIFY0(zio_crypt_generate_iv(iv));
3903
3904 zio_crypt_encode_params_bp(new_bp, salt, iv);
3905 }
3906 } else {
3907 zfs_dbgmsg("%s: zil block allocation failure: "
3908 "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
3909 error);
3910 }
3911
3912 return (error);
3913 }
3914
3915 /*
3916 * ==========================================================================
3917 * Read and write to physical devices
3918 * ==========================================================================
3919 */
3920
3921 /*
3922 * Issue an I/O to the underlying vdev. Typically the issue pipeline
3923 * stops after this stage and will resume upon I/O completion.
3924 * However, there are instances where the vdev layer may need to
3925 * continue the pipeline when an I/O was not issued. Since the I/O
3926 * that was sent to the vdev layer might be different than the one
3927 * currently active in the pipeline (see vdev_queue_io()), we explicitly
3928 * force the underlying vdev layers to call either zio_execute() or
3929 * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
3930 */
3931 static zio_t *
zio_vdev_io_start(zio_t * zio)3932 zio_vdev_io_start(zio_t *zio)
3933 {
3934 vdev_t *vd = zio->io_vd;
3935 uint64_t align;
3936 spa_t *spa = zio->io_spa;
3937
3938 zio->io_delay = 0;
3939
3940 ASSERT(zio->io_error == 0);
3941 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
3942
3943 if (vd == NULL) {
3944 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
3945 spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
3946
3947 /*
3948 * The mirror_ops handle multiple DVAs in a single BP.
3949 */
3950 vdev_mirror_ops.vdev_op_io_start(zio);
3951 return (NULL);
3952 }
3953
3954 ASSERT3P(zio->io_logical, !=, zio);
3955 if (zio->io_type == ZIO_TYPE_WRITE) {
3956 ASSERT(spa->spa_trust_config);
3957
3958 /*
3959 * Note: the code can handle other kinds of writes,
3960 * but we don't expect them.
3961 */
3962 if (zio->io_vd->vdev_noalloc) {
3963 ASSERT(zio->io_flags &
3964 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
3965 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
3966 }
3967 }
3968
3969 align = 1ULL << vd->vdev_top->vdev_ashift;
3970
3971 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
3972 P2PHASE(zio->io_size, align) != 0) {
3973 /* Transform logical writes to be a full physical block size. */
3974 uint64_t asize = P2ROUNDUP(zio->io_size, align);
3975 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
3976 ASSERT(vd == vd->vdev_top);
3977 if (zio->io_type == ZIO_TYPE_WRITE) {
3978 abd_copy(abuf, zio->io_abd, zio->io_size);
3979 abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
3980 }
3981 zio_push_transform(zio, abuf, asize, asize, zio_subblock);
3982 }
3983
3984 /*
3985 * If this is not a physical io, make sure that it is properly aligned
3986 * before proceeding.
3987 */
3988 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
3989 ASSERT0(P2PHASE(zio->io_offset, align));
3990 ASSERT0(P2PHASE(zio->io_size, align));
3991 } else {
3992 /*
3993 * For physical writes, we allow 512b aligned writes and assume
3994 * the device will perform a read-modify-write as necessary.
3995 */
3996 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
3997 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
3998 }
3999
4000 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
4001
4002 /*
4003 * If this is a repair I/O, and there's no self-healing involved --
4004 * that is, we're just resilvering what we expect to resilver --
4005 * then don't do the I/O unless zio's txg is actually in vd's DTL.
4006 * This prevents spurious resilvering.
4007 *
4008 * There are a few ways that we can end up creating these spurious
4009 * resilver i/os:
4010 *
4011 * 1. A resilver i/o will be issued if any DVA in the BP has a
4012 * dirty DTL. The mirror code will issue resilver writes to
4013 * each DVA, including the one(s) that are not on vdevs with dirty
4014 * DTLs.
4015 *
4016 * 2. With nested replication, which happens when we have a
4017 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
4018 * For example, given mirror(replacing(A+B), C), it's likely that
4019 * only A is out of date (it's the new device). In this case, we'll
4020 * read from C, then use the data to resilver A+B -- but we don't
4021 * actually want to resilver B, just A. The top-level mirror has no
4022 * way to know this, so instead we just discard unnecessary repairs
4023 * as we work our way down the vdev tree.
4024 *
4025 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
4026 * The same logic applies to any form of nested replication: ditto
4027 * + mirror, RAID-Z + replacing, etc.
4028 *
4029 * However, indirect vdevs point off to other vdevs which may have
4030 * DTL's, so we never bypass them. The child i/os on concrete vdevs
4031 * will be properly bypassed instead.
4032 *
4033 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
4034 * a dRAID spare vdev. For example, when a dRAID spare is first
4035 * used, its spare blocks need to be written to but the leaf vdev's
4036 * of such blocks can have empty DTL_PARTIAL.
4037 *
4038 * There seemed no clean way to allow such writes while bypassing
4039 * spurious ones. At this point, just avoid all bypassing for dRAID
4040 * for correctness.
4041 */
4042 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
4043 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
4044 zio->io_txg != 0 && /* not a delegated i/o */
4045 vd->vdev_ops != &vdev_indirect_ops &&
4046 vd->vdev_top->vdev_ops != &vdev_draid_ops &&
4047 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
4048 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4049 zio_vdev_io_bypass(zio);
4050 return (zio);
4051 }
4052
4053 /*
4054 * Select the next best leaf I/O to process. Distributed spares are
4055 * excluded since they dispatch the I/O directly to a leaf vdev after
4056 * applying the dRAID mapping.
4057 */
4058 if (vd->vdev_ops->vdev_op_leaf &&
4059 vd->vdev_ops != &vdev_draid_spare_ops &&
4060 (zio->io_type == ZIO_TYPE_READ ||
4061 zio->io_type == ZIO_TYPE_WRITE ||
4062 zio->io_type == ZIO_TYPE_TRIM)) {
4063
4064 if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) {
4065 /*
4066 * "no-op" injections return success, but do no actual
4067 * work. Just skip the remaining vdev stages.
4068 */
4069 zio_vdev_io_bypass(zio);
4070 zio_interrupt(zio);
4071 return (NULL);
4072 }
4073
4074 if ((zio = vdev_queue_io(zio)) == NULL)
4075 return (NULL);
4076
4077 if (!vdev_accessible(vd, zio)) {
4078 zio->io_error = SET_ERROR(ENXIO);
4079 zio_interrupt(zio);
4080 return (NULL);
4081 }
4082 zio->io_delay = gethrtime();
4083 }
4084
4085 vd->vdev_ops->vdev_op_io_start(zio);
4086 return (NULL);
4087 }
4088
4089 static zio_t *
zio_vdev_io_done(zio_t * zio)4090 zio_vdev_io_done(zio_t *zio)
4091 {
4092 vdev_t *vd = zio->io_vd;
4093 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
4094 boolean_t unexpected_error = B_FALSE;
4095
4096 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4097 return (NULL);
4098 }
4099
4100 ASSERT(zio->io_type == ZIO_TYPE_READ ||
4101 zio->io_type == ZIO_TYPE_WRITE ||
4102 zio->io_type == ZIO_TYPE_FLUSH ||
4103 zio->io_type == ZIO_TYPE_TRIM);
4104
4105 if (zio->io_delay)
4106 zio->io_delay = gethrtime() - zio->io_delay;
4107
4108 if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4109 vd->vdev_ops != &vdev_draid_spare_ops) {
4110 if (zio->io_type != ZIO_TYPE_FLUSH)
4111 vdev_queue_io_done(zio);
4112
4113 if (zio_injection_enabled && zio->io_error == 0)
4114 zio->io_error = zio_handle_device_injections(vd, zio,
4115 EIO, EILSEQ);
4116
4117 if (zio_injection_enabled && zio->io_error == 0)
4118 zio->io_error = zio_handle_label_injection(zio, EIO);
4119
4120 if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH &&
4121 zio->io_type != ZIO_TYPE_TRIM) {
4122 if (!vdev_accessible(vd, zio)) {
4123 zio->io_error = SET_ERROR(ENXIO);
4124 } else {
4125 unexpected_error = B_TRUE;
4126 }
4127 }
4128 }
4129
4130 ops->vdev_op_io_done(zio);
4131
4132 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
4133 VERIFY(vdev_probe(vd, zio) == NULL);
4134
4135 return (zio);
4136 }
4137
4138 /*
4139 * This function is used to change the priority of an existing zio that is
4140 * currently in-flight. This is used by the arc to upgrade priority in the
4141 * event that a demand read is made for a block that is currently queued
4142 * as a scrub or async read IO. Otherwise, the high priority read request
4143 * would end up having to wait for the lower priority IO.
4144 */
4145 void
zio_change_priority(zio_t * pio,zio_priority_t priority)4146 zio_change_priority(zio_t *pio, zio_priority_t priority)
4147 {
4148 zio_t *cio, *cio_next;
4149 zio_link_t *zl = NULL;
4150
4151 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
4152
4153 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
4154 vdev_queue_change_io_priority(pio, priority);
4155 } else {
4156 pio->io_priority = priority;
4157 }
4158
4159 mutex_enter(&pio->io_lock);
4160 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
4161 cio_next = zio_walk_children(pio, &zl);
4162 zio_change_priority(cio, priority);
4163 }
4164 mutex_exit(&pio->io_lock);
4165 }
4166
4167 /*
4168 * For non-raidz ZIOs, we can just copy aside the bad data read from the
4169 * disk, and use that to finish the checksum ereport later.
4170 */
4171 static void
zio_vsd_default_cksum_finish(zio_cksum_report_t * zcr,const abd_t * good_buf)4172 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
4173 const abd_t *good_buf)
4174 {
4175 /* no processing needed */
4176 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
4177 }
4178
4179 void
zio_vsd_default_cksum_report(zio_t * zio,zio_cksum_report_t * zcr)4180 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
4181 {
4182 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
4183
4184 abd_copy(abd, zio->io_abd, zio->io_size);
4185
4186 zcr->zcr_cbinfo = zio->io_size;
4187 zcr->zcr_cbdata = abd;
4188 zcr->zcr_finish = zio_vsd_default_cksum_finish;
4189 zcr->zcr_free = zio_abd_free;
4190 }
4191
4192 static zio_t *
zio_vdev_io_assess(zio_t * zio)4193 zio_vdev_io_assess(zio_t *zio)
4194 {
4195 vdev_t *vd = zio->io_vd;
4196
4197 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4198 return (NULL);
4199 }
4200
4201 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4202 spa_config_exit(zio->io_spa, SCL_ZIO, zio);
4203
4204 if (zio->io_vsd != NULL) {
4205 zio->io_vsd_ops->vsd_free(zio);
4206 zio->io_vsd = NULL;
4207 }
4208
4209 if (zio_injection_enabled && zio->io_error == 0)
4210 zio->io_error = zio_handle_fault_injection(zio, EIO);
4211
4212 /*
4213 * If the I/O failed, determine whether we should attempt to retry it.
4214 *
4215 * On retry, we cut in line in the issue queue, since we don't want
4216 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
4217 */
4218 if (zio->io_error && vd == NULL &&
4219 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
4220 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
4221 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
4222 zio->io_error = 0;
4223 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
4224 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
4225 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
4226 zio_requeue_io_start_cut_in_line);
4227 return (NULL);
4228 }
4229
4230 /*
4231 * If we got an error on a leaf device, convert it to ENXIO
4232 * if the device is not accessible at all.
4233 */
4234 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4235 !vdev_accessible(vd, zio))
4236 zio->io_error = SET_ERROR(ENXIO);
4237
4238 /*
4239 * If we can't write to an interior vdev (mirror or RAID-Z),
4240 * set vdev_cant_write so that we stop trying to allocate from it.
4241 */
4242 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
4243 vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
4244 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
4245 "cant_write=TRUE due to write failure with ENXIO",
4246 zio);
4247 vd->vdev_cant_write = B_TRUE;
4248 }
4249
4250 /*
4251 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future
4252 * attempts will ever succeed. In this case we set a persistent
4253 * boolean flag so that we don't bother with it in the future.
4254 */
4255 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
4256 zio->io_type == ZIO_TYPE_FLUSH && vd != NULL)
4257 vd->vdev_nowritecache = B_TRUE;
4258
4259 if (zio->io_error)
4260 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4261
4262 return (zio);
4263 }
4264
4265 void
zio_vdev_io_reissue(zio_t * zio)4266 zio_vdev_io_reissue(zio_t *zio)
4267 {
4268 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4269 ASSERT(zio->io_error == 0);
4270
4271 zio->io_stage >>= 1;
4272 }
4273
4274 void
zio_vdev_io_redone(zio_t * zio)4275 zio_vdev_io_redone(zio_t *zio)
4276 {
4277 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
4278
4279 zio->io_stage >>= 1;
4280 }
4281
4282 void
zio_vdev_io_bypass(zio_t * zio)4283 zio_vdev_io_bypass(zio_t *zio)
4284 {
4285 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4286 ASSERT(zio->io_error == 0);
4287
4288 zio->io_flags |= ZIO_FLAG_IO_BYPASS;
4289 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
4290 }
4291
4292 /*
4293 * ==========================================================================
4294 * Encrypt and store encryption parameters
4295 * ==========================================================================
4296 */
4297
4298
4299 /*
4300 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
4301 * managing the storage of encryption parameters and passing them to the
4302 * lower-level encryption functions.
4303 */
4304 static zio_t *
zio_encrypt(zio_t * zio)4305 zio_encrypt(zio_t *zio)
4306 {
4307 zio_prop_t *zp = &zio->io_prop;
4308 spa_t *spa = zio->io_spa;
4309 blkptr_t *bp = zio->io_bp;
4310 uint64_t psize = BP_GET_PSIZE(bp);
4311 uint64_t dsobj = zio->io_bookmark.zb_objset;
4312 dmu_object_type_t ot = BP_GET_TYPE(bp);
4313 void *enc_buf = NULL;
4314 abd_t *eabd = NULL;
4315 uint8_t salt[ZIO_DATA_SALT_LEN];
4316 uint8_t iv[ZIO_DATA_IV_LEN];
4317 uint8_t mac[ZIO_DATA_MAC_LEN];
4318 boolean_t no_crypt = B_FALSE;
4319
4320 /* the root zio already encrypted the data */
4321 if (zio->io_child_type == ZIO_CHILD_GANG)
4322 return (zio);
4323
4324 /* only ZIL blocks are re-encrypted on rewrite */
4325 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
4326 return (zio);
4327
4328 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
4329 BP_SET_CRYPT(bp, B_FALSE);
4330 return (zio);
4331 }
4332
4333 /* if we are doing raw encryption set the provided encryption params */
4334 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
4335 ASSERT0(BP_GET_LEVEL(bp));
4336 BP_SET_CRYPT(bp, B_TRUE);
4337 BP_SET_BYTEORDER(bp, zp->zp_byteorder);
4338 if (ot != DMU_OT_OBJSET)
4339 zio_crypt_encode_mac_bp(bp, zp->zp_mac);
4340
4341 /* dnode blocks must be written out in the provided byteorder */
4342 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
4343 ot == DMU_OT_DNODE) {
4344 void *bswap_buf = zio_buf_alloc(psize);
4345 abd_t *babd = abd_get_from_buf(bswap_buf, psize);
4346
4347 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4348 abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
4349 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
4350 psize);
4351
4352 abd_take_ownership_of_buf(babd, B_TRUE);
4353 zio_push_transform(zio, babd, psize, psize, NULL);
4354 }
4355
4356 if (DMU_OT_IS_ENCRYPTED(ot))
4357 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
4358 return (zio);
4359 }
4360
4361 /* indirect blocks only maintain a cksum of the lower level MACs */
4362 if (BP_GET_LEVEL(bp) > 0) {
4363 BP_SET_CRYPT(bp, B_TRUE);
4364 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
4365 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
4366 mac));
4367 zio_crypt_encode_mac_bp(bp, mac);
4368 return (zio);
4369 }
4370
4371 /*
4372 * Objset blocks are a special case since they have 2 256-bit MACs
4373 * embedded within them.
4374 */
4375 if (ot == DMU_OT_OBJSET) {
4376 ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
4377 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4378 BP_SET_CRYPT(bp, B_TRUE);
4379 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
4380 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
4381 return (zio);
4382 }
4383
4384 /* unencrypted object types are only authenticated with a MAC */
4385 if (!DMU_OT_IS_ENCRYPTED(ot)) {
4386 BP_SET_CRYPT(bp, B_TRUE);
4387 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
4388 zio->io_abd, psize, mac));
4389 zio_crypt_encode_mac_bp(bp, mac);
4390 return (zio);
4391 }
4392
4393 /*
4394 * Later passes of sync-to-convergence may decide to rewrite data
4395 * in place to avoid more disk reallocations. This presents a problem
4396 * for encryption because this constitutes rewriting the new data with
4397 * the same encryption key and IV. However, this only applies to blocks
4398 * in the MOS (particularly the spacemaps) and we do not encrypt the
4399 * MOS. We assert that the zio is allocating or an intent log write
4400 * to enforce this.
4401 */
4402 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
4403 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
4404 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
4405 ASSERT3U(psize, !=, 0);
4406
4407 enc_buf = zio_buf_alloc(psize);
4408 eabd = abd_get_from_buf(enc_buf, psize);
4409 abd_take_ownership_of_buf(eabd, B_TRUE);
4410
4411 /*
4412 * For an explanation of what encryption parameters are stored
4413 * where, see the block comment in zio_crypt.c.
4414 */
4415 if (ot == DMU_OT_INTENT_LOG) {
4416 zio_crypt_decode_params_bp(bp, salt, iv);
4417 } else {
4418 BP_SET_CRYPT(bp, B_TRUE);
4419 }
4420
4421 /* Perform the encryption. This should not fail */
4422 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
4423 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
4424 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
4425
4426 /* encode encryption metadata into the bp */
4427 if (ot == DMU_OT_INTENT_LOG) {
4428 /*
4429 * ZIL blocks store the MAC in the embedded checksum, so the
4430 * transform must always be applied.
4431 */
4432 zio_crypt_encode_mac_zil(enc_buf, mac);
4433 zio_push_transform(zio, eabd, psize, psize, NULL);
4434 } else {
4435 BP_SET_CRYPT(bp, B_TRUE);
4436 zio_crypt_encode_params_bp(bp, salt, iv);
4437 zio_crypt_encode_mac_bp(bp, mac);
4438
4439 if (no_crypt) {
4440 ASSERT3U(ot, ==, DMU_OT_DNODE);
4441 abd_free(eabd);
4442 } else {
4443 zio_push_transform(zio, eabd, psize, psize, NULL);
4444 }
4445 }
4446
4447 return (zio);
4448 }
4449
4450 /*
4451 * ==========================================================================
4452 * Generate and verify checksums
4453 * ==========================================================================
4454 */
4455 static zio_t *
zio_checksum_generate(zio_t * zio)4456 zio_checksum_generate(zio_t *zio)
4457 {
4458 blkptr_t *bp = zio->io_bp;
4459 enum zio_checksum checksum;
4460
4461 if (bp == NULL) {
4462 /*
4463 * This is zio_write_phys().
4464 * We're either generating a label checksum, or none at all.
4465 */
4466 checksum = zio->io_prop.zp_checksum;
4467
4468 if (checksum == ZIO_CHECKSUM_OFF)
4469 return (zio);
4470
4471 ASSERT(checksum == ZIO_CHECKSUM_LABEL);
4472 } else {
4473 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
4474 ASSERT(!IO_IS_ALLOCATING(zio));
4475 checksum = ZIO_CHECKSUM_GANG_HEADER;
4476 } else {
4477 checksum = BP_GET_CHECKSUM(bp);
4478 }
4479 }
4480
4481 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
4482
4483 return (zio);
4484 }
4485
4486 static zio_t *
zio_checksum_verify(zio_t * zio)4487 zio_checksum_verify(zio_t *zio)
4488 {
4489 zio_bad_cksum_t info;
4490 blkptr_t *bp = zio->io_bp;
4491 int error;
4492
4493 ASSERT(zio->io_vd != NULL);
4494
4495 if (bp == NULL) {
4496 /*
4497 * This is zio_read_phys().
4498 * We're either verifying a label checksum, or nothing at all.
4499 */
4500 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
4501 return (zio);
4502
4503 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
4504 }
4505
4506 if ((error = zio_checksum_error(zio, &info)) != 0) {
4507 zio->io_error = error;
4508 if (error == ECKSUM &&
4509 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
4510 mutex_enter(&zio->io_vd->vdev_stat_lock);
4511 zio->io_vd->vdev_stat.vs_checksum_errors++;
4512 mutex_exit(&zio->io_vd->vdev_stat_lock);
4513 (void) zfs_ereport_start_checksum(zio->io_spa,
4514 zio->io_vd, &zio->io_bookmark, zio,
4515 zio->io_offset, zio->io_size, &info);
4516 }
4517 }
4518
4519 return (zio);
4520 }
4521
4522 /*
4523 * Called by RAID-Z to ensure we don't compute the checksum twice.
4524 */
4525 void
zio_checksum_verified(zio_t * zio)4526 zio_checksum_verified(zio_t *zio)
4527 {
4528 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
4529 }
4530
4531 /*
4532 * ==========================================================================
4533 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
4534 * An error of 0 indicates success. ENXIO indicates whole-device failure,
4535 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
4536 * indicate errors that are specific to one I/O, and most likely permanent.
4537 * Any other error is presumed to be worse because we weren't expecting it.
4538 * ==========================================================================
4539 */
4540 int
zio_worst_error(int e1,int e2)4541 zio_worst_error(int e1, int e2)
4542 {
4543 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
4544 int r1, r2;
4545
4546 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
4547 if (e1 == zio_error_rank[r1])
4548 break;
4549
4550 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
4551 if (e2 == zio_error_rank[r2])
4552 break;
4553
4554 return (r1 > r2 ? e1 : e2);
4555 }
4556
4557 /*
4558 * ==========================================================================
4559 * I/O completion
4560 * ==========================================================================
4561 */
4562 static zio_t *
zio_ready(zio_t * zio)4563 zio_ready(zio_t *zio)
4564 {
4565 blkptr_t *bp = zio->io_bp;
4566 zio_t *pio, *pio_next;
4567 zio_link_t *zl = NULL;
4568
4569 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
4570 ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
4571 return (NULL);
4572 }
4573
4574 if (zio->io_ready) {
4575 ASSERT(IO_IS_ALLOCATING(zio));
4576 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg ||
4577 BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
4578 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
4579
4580 zio->io_ready(zio);
4581 }
4582
4583 #ifdef ZFS_DEBUG
4584 if (bp != NULL && bp != &zio->io_bp_copy)
4585 zio->io_bp_copy = *bp;
4586 #endif
4587
4588 if (zio->io_error != 0) {
4589 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4590
4591 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4592 ASSERT(IO_IS_ALLOCATING(zio));
4593 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
4594 ASSERT(zio->io_metaslab_class != NULL);
4595 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4596
4597 /*
4598 * We were unable to allocate anything, unreserve and
4599 * issue the next I/O to allocate.
4600 */
4601 metaslab_class_throttle_unreserve(
4602 zio->io_metaslab_class, zio->io_prop.zp_copies,
4603 zio->io_allocator, zio);
4604 zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
4605 }
4606 }
4607
4608 mutex_enter(&zio->io_lock);
4609 zio->io_state[ZIO_WAIT_READY] = 1;
4610 pio = zio_walk_parents(zio, &zl);
4611 mutex_exit(&zio->io_lock);
4612
4613 /*
4614 * As we notify zio's parents, new parents could be added.
4615 * New parents go to the head of zio's io_parent_list, however,
4616 * so we will (correctly) not notify them. The remainder of zio's
4617 * io_parent_list, from 'pio_next' onward, cannot change because
4618 * all parents must wait for us to be done before they can be done.
4619 */
4620 for (; pio != NULL; pio = pio_next) {
4621 pio_next = zio_walk_parents(zio, &zl);
4622 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
4623 }
4624
4625 if (zio->io_flags & ZIO_FLAG_NODATA) {
4626 if (bp != NULL && BP_IS_GANG(bp)) {
4627 zio->io_flags &= ~ZIO_FLAG_NODATA;
4628 } else {
4629 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
4630 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
4631 }
4632 }
4633
4634 if (zio_injection_enabled &&
4635 zio->io_spa->spa_syncing_txg == zio->io_txg)
4636 zio_handle_ignored_writes(zio);
4637
4638 return (zio);
4639 }
4640
4641 /*
4642 * Update the allocation throttle accounting.
4643 */
4644 static void
zio_dva_throttle_done(zio_t * zio)4645 zio_dva_throttle_done(zio_t *zio)
4646 {
4647 zio_t *lio __maybe_unused = zio->io_logical;
4648 zio_t *pio = zio_unique_parent(zio);
4649 vdev_t *vd = zio->io_vd;
4650 int flags = METASLAB_ASYNC_ALLOC;
4651
4652 ASSERT3P(zio->io_bp, !=, NULL);
4653 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
4654 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
4655 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
4656 ASSERT(vd != NULL);
4657 ASSERT3P(vd, ==, vd->vdev_top);
4658 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
4659 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4660 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
4661 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
4662 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
4663
4664 /*
4665 * Parents of gang children can have two flavors -- ones that
4666 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
4667 * and ones that allocated the constituent blocks. The allocation
4668 * throttle needs to know the allocating parent zio so we must find
4669 * it here.
4670 */
4671 if (pio->io_child_type == ZIO_CHILD_GANG) {
4672 /*
4673 * If our parent is a rewrite gang child then our grandparent
4674 * would have been the one that performed the allocation.
4675 */
4676 if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
4677 pio = zio_unique_parent(pio);
4678 flags |= METASLAB_GANG_CHILD;
4679 }
4680
4681 ASSERT(IO_IS_ALLOCATING(pio));
4682 ASSERT(ZIO_HAS_ALLOCATOR(pio));
4683 ASSERT3P(zio, !=, zio->io_logical);
4684 ASSERT(zio->io_logical != NULL);
4685 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4686 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
4687 ASSERT(zio->io_metaslab_class != NULL);
4688
4689 mutex_enter(&pio->io_lock);
4690 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
4691 pio->io_allocator, B_TRUE);
4692 mutex_exit(&pio->io_lock);
4693
4694 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
4695 pio->io_allocator, pio);
4696
4697 /*
4698 * Call into the pipeline to see if there is more work that
4699 * needs to be done. If there is work to be done it will be
4700 * dispatched to another taskq thread.
4701 */
4702 zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
4703 }
4704
4705 static zio_t *
zio_done(zio_t * zio)4706 zio_done(zio_t *zio)
4707 {
4708 /*
4709 * Always attempt to keep stack usage minimal here since
4710 * we can be called recursively up to 19 levels deep.
4711 */
4712 const uint64_t psize = zio->io_size;
4713 zio_t *pio, *pio_next;
4714 zio_link_t *zl = NULL;
4715
4716 /*
4717 * If our children haven't all completed,
4718 * wait for them and then repeat this pipeline stage.
4719 */
4720 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
4721 return (NULL);
4722 }
4723
4724 /*
4725 * If the allocation throttle is enabled, then update the accounting.
4726 * We only track child I/Os that are part of an allocating async
4727 * write. We must do this since the allocation is performed
4728 * by the logical I/O but the actual write is done by child I/Os.
4729 */
4730 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
4731 zio->io_child_type == ZIO_CHILD_VDEV) {
4732 ASSERT(zio->io_metaslab_class != NULL);
4733 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
4734 zio_dva_throttle_done(zio);
4735 }
4736
4737 /*
4738 * If the allocation throttle is enabled, verify that
4739 * we have decremented the refcounts for every I/O that was throttled.
4740 */
4741 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4742 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4743 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
4744 ASSERT(zio->io_bp != NULL);
4745 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4746
4747 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
4748 zio->io_allocator);
4749 VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
4750 mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
4751 }
4752
4753
4754 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
4755 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
4756 ASSERT(zio->io_children[c][w] == 0);
4757
4758 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
4759 ASSERT(zio->io_bp->blk_pad[0] == 0);
4760 ASSERT(zio->io_bp->blk_pad[1] == 0);
4761 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
4762 sizeof (blkptr_t)) == 0 ||
4763 (zio->io_bp == zio_unique_parent(zio)->io_bp));
4764 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
4765 zio->io_bp_override == NULL &&
4766 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
4767 ASSERT3U(zio->io_prop.zp_copies, <=,
4768 BP_GET_NDVAS(zio->io_bp));
4769 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
4770 (BP_COUNT_GANG(zio->io_bp) ==
4771 BP_GET_NDVAS(zio->io_bp)));
4772 }
4773 if (zio->io_flags & ZIO_FLAG_NOPWRITE)
4774 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4775 }
4776
4777 /*
4778 * If there were child vdev/gang/ddt errors, they apply to us now.
4779 */
4780 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
4781 zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
4782 zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
4783
4784 /*
4785 * If the I/O on the transformed data was successful, generate any
4786 * checksum reports now while we still have the transformed data.
4787 */
4788 if (zio->io_error == 0) {
4789 while (zio->io_cksum_report != NULL) {
4790 zio_cksum_report_t *zcr = zio->io_cksum_report;
4791 uint64_t align = zcr->zcr_align;
4792 uint64_t asize = P2ROUNDUP(psize, align);
4793 abd_t *adata = zio->io_abd;
4794
4795 if (adata != NULL && asize != psize) {
4796 adata = abd_alloc(asize, B_TRUE);
4797 abd_copy(adata, zio->io_abd, psize);
4798 abd_zero_off(adata, psize, asize - psize);
4799 }
4800
4801 zio->io_cksum_report = zcr->zcr_next;
4802 zcr->zcr_next = NULL;
4803 zcr->zcr_finish(zcr, adata);
4804 zfs_ereport_free_checksum(zcr);
4805
4806 if (adata != NULL && asize != psize)
4807 abd_free(adata);
4808 }
4809 }
4810
4811 zio_pop_transforms(zio); /* note: may set zio->io_error */
4812
4813 vdev_stat_update(zio, psize);
4814
4815 /*
4816 * If this I/O is attached to a particular vdev is slow, exceeding
4817 * 30 seconds to complete, post an error described the I/O delay.
4818 * We ignore these errors if the device is currently unavailable.
4819 */
4820 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
4821 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
4822 /*
4823 * We want to only increment our slow IO counters if
4824 * the IO is valid (i.e. not if the drive is removed).
4825 *
4826 * zfs_ereport_post() will also do these checks, but
4827 * it can also ratelimit and have other failures, so we
4828 * need to increment the slow_io counters independent
4829 * of it.
4830 */
4831 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
4832 zio->io_spa, zio->io_vd, zio)) {
4833 mutex_enter(&zio->io_vd->vdev_stat_lock);
4834 zio->io_vd->vdev_stat.vs_slow_ios++;
4835 mutex_exit(&zio->io_vd->vdev_stat_lock);
4836
4837 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
4838 zio->io_spa, zio->io_vd, &zio->io_bookmark,
4839 zio, 0);
4840 }
4841 }
4842 }
4843
4844 if (zio->io_error) {
4845 /*
4846 * If this I/O is attached to a particular vdev,
4847 * generate an error message describing the I/O failure
4848 * at the block level. We ignore these errors if the
4849 * device is currently unavailable.
4850 */
4851 if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
4852 !vdev_is_dead(zio->io_vd)) {
4853 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
4854 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
4855 if (ret != EALREADY) {
4856 mutex_enter(&zio->io_vd->vdev_stat_lock);
4857 if (zio->io_type == ZIO_TYPE_READ)
4858 zio->io_vd->vdev_stat.vs_read_errors++;
4859 else if (zio->io_type == ZIO_TYPE_WRITE)
4860 zio->io_vd->vdev_stat.vs_write_errors++;
4861 mutex_exit(&zio->io_vd->vdev_stat_lock);
4862 }
4863 }
4864
4865 if ((zio->io_error == EIO || !(zio->io_flags &
4866 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
4867 zio == zio->io_logical) {
4868 /*
4869 * For logical I/O requests, tell the SPA to log the
4870 * error and generate a logical data ereport.
4871 */
4872 spa_log_error(zio->io_spa, &zio->io_bookmark,
4873 BP_GET_LOGICAL_BIRTH(zio->io_bp));
4874 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
4875 zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
4876 }
4877 }
4878
4879 if (zio->io_error && zio == zio->io_logical) {
4880 /*
4881 * Determine whether zio should be reexecuted. This will
4882 * propagate all the way to the root via zio_notify_parent().
4883 */
4884 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
4885 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
4886
4887 if (IO_IS_ALLOCATING(zio) &&
4888 !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
4889 if (zio->io_error != ENOSPC)
4890 zio->io_reexecute |= ZIO_REEXECUTE_NOW;
4891 else
4892 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4893 }
4894
4895 if ((zio->io_type == ZIO_TYPE_READ ||
4896 zio->io_type == ZIO_TYPE_FREE) &&
4897 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
4898 zio->io_error == ENXIO &&
4899 spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
4900 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
4901 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4902
4903 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
4904 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4905
4906 /*
4907 * Here is a possibly good place to attempt to do
4908 * either combinatorial reconstruction or error correction
4909 * based on checksums. It also might be a good place
4910 * to send out preliminary ereports before we suspend
4911 * processing.
4912 */
4913 }
4914
4915 /*
4916 * If there were logical child errors, they apply to us now.
4917 * We defer this until now to avoid conflating logical child
4918 * errors with errors that happened to the zio itself when
4919 * updating vdev stats and reporting FMA events above.
4920 */
4921 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
4922
4923 if ((zio->io_error || zio->io_reexecute) &&
4924 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
4925 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
4926 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
4927
4928 zio_gang_tree_free(&zio->io_gang_tree);
4929
4930 /*
4931 * Godfather I/Os should never suspend.
4932 */
4933 if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
4934 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
4935 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
4936
4937 if (zio->io_reexecute) {
4938 /*
4939 * This is a logical I/O that wants to reexecute.
4940 *
4941 * Reexecute is top-down. When an i/o fails, if it's not
4942 * the root, it simply notifies its parent and sticks around.
4943 * The parent, seeing that it still has children in zio_done(),
4944 * does the same. This percolates all the way up to the root.
4945 * The root i/o will reexecute or suspend the entire tree.
4946 *
4947 * This approach ensures that zio_reexecute() honors
4948 * all the original i/o dependency relationships, e.g.
4949 * parents not executing until children are ready.
4950 */
4951 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
4952
4953 zio->io_gang_leader = NULL;
4954
4955 mutex_enter(&zio->io_lock);
4956 zio->io_state[ZIO_WAIT_DONE] = 1;
4957 mutex_exit(&zio->io_lock);
4958
4959 /*
4960 * "The Godfather" I/O monitors its children but is
4961 * not a true parent to them. It will track them through
4962 * the pipeline but severs its ties whenever they get into
4963 * trouble (e.g. suspended). This allows "The Godfather"
4964 * I/O to return status without blocking.
4965 */
4966 zl = NULL;
4967 for (pio = zio_walk_parents(zio, &zl); pio != NULL;
4968 pio = pio_next) {
4969 zio_link_t *remove_zl = zl;
4970 pio_next = zio_walk_parents(zio, &zl);
4971
4972 if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
4973 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
4974 zio_remove_child(pio, zio, remove_zl);
4975 /*
4976 * This is a rare code path, so we don't
4977 * bother with "next_to_execute".
4978 */
4979 zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
4980 NULL);
4981 }
4982 }
4983
4984 if ((pio = zio_unique_parent(zio)) != NULL) {
4985 /*
4986 * We're not a root i/o, so there's nothing to do
4987 * but notify our parent. Don't propagate errors
4988 * upward since we haven't permanently failed yet.
4989 */
4990 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
4991 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
4992 /*
4993 * This is a rare code path, so we don't bother with
4994 * "next_to_execute".
4995 */
4996 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
4997 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
4998 /*
4999 * We'd fail again if we reexecuted now, so suspend
5000 * until conditions improve (e.g. device comes online).
5001 */
5002 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
5003 } else {
5004 /*
5005 * Reexecution is potentially a huge amount of work.
5006 * Hand it off to the otherwise-unused claim taskq.
5007 */
5008 ASSERT(taskq_empty_ent(&zio->io_tqent));
5009 spa_taskq_dispatch_ent(zio->io_spa,
5010 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
5011 zio_reexecute, zio, 0, &zio->io_tqent, NULL);
5012 }
5013 return (NULL);
5014 }
5015
5016 ASSERT(list_is_empty(&zio->io_child_list));
5017 ASSERT(zio->io_reexecute == 0);
5018 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
5019
5020 /*
5021 * Report any checksum errors, since the I/O is complete.
5022 */
5023 while (zio->io_cksum_report != NULL) {
5024 zio_cksum_report_t *zcr = zio->io_cksum_report;
5025 zio->io_cksum_report = zcr->zcr_next;
5026 zcr->zcr_next = NULL;
5027 zcr->zcr_finish(zcr, NULL);
5028 zfs_ereport_free_checksum(zcr);
5029 }
5030
5031 /*
5032 * It is the responsibility of the done callback to ensure that this
5033 * particular zio is no longer discoverable for adoption, and as
5034 * such, cannot acquire any new parents.
5035 */
5036 if (zio->io_done)
5037 zio->io_done(zio);
5038
5039 mutex_enter(&zio->io_lock);
5040 zio->io_state[ZIO_WAIT_DONE] = 1;
5041 mutex_exit(&zio->io_lock);
5042
5043 /*
5044 * We are done executing this zio. We may want to execute a parent
5045 * next. See the comment in zio_notify_parent().
5046 */
5047 zio_t *next_to_execute = NULL;
5048 zl = NULL;
5049 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
5050 zio_link_t *remove_zl = zl;
5051 pio_next = zio_walk_parents(zio, &zl);
5052 zio_remove_child(pio, zio, remove_zl);
5053 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
5054 }
5055
5056 if (zio->io_waiter != NULL) {
5057 mutex_enter(&zio->io_lock);
5058 zio->io_executor = NULL;
5059 cv_broadcast(&zio->io_cv);
5060 mutex_exit(&zio->io_lock);
5061 } else {
5062 zio_destroy(zio);
5063 }
5064
5065 return (next_to_execute);
5066 }
5067
5068 /*
5069 * ==========================================================================
5070 * I/O pipeline definition
5071 * ==========================================================================
5072 */
5073 static zio_pipe_stage_t *zio_pipeline[] = {
5074 NULL,
5075 zio_read_bp_init,
5076 zio_write_bp_init,
5077 zio_free_bp_init,
5078 zio_issue_async,
5079 zio_write_compress,
5080 zio_encrypt,
5081 zio_checksum_generate,
5082 zio_nop_write,
5083 zio_brt_free,
5084 zio_ddt_read_start,
5085 zio_ddt_read_done,
5086 zio_ddt_write,
5087 zio_ddt_free,
5088 zio_gang_assemble,
5089 zio_gang_issue,
5090 zio_dva_throttle,
5091 zio_dva_allocate,
5092 zio_dva_free,
5093 zio_dva_claim,
5094 zio_ready,
5095 zio_vdev_io_start,
5096 zio_vdev_io_done,
5097 zio_vdev_io_assess,
5098 zio_checksum_verify,
5099 zio_done
5100 };
5101
5102
5103
5104
5105 /*
5106 * Compare two zbookmark_phys_t's to see which we would reach first in a
5107 * pre-order traversal of the object tree.
5108 *
5109 * This is simple in every case aside from the meta-dnode object. For all other
5110 * objects, we traverse them in order (object 1 before object 2, and so on).
5111 * However, all of these objects are traversed while traversing object 0, since
5112 * the data it points to is the list of objects. Thus, we need to convert to a
5113 * canonical representation so we can compare meta-dnode bookmarks to
5114 * non-meta-dnode bookmarks.
5115 *
5116 * We do this by calculating "equivalents" for each field of the zbookmark.
5117 * zbookmarks outside of the meta-dnode use their own object and level, and
5118 * calculate the level 0 equivalent (the first L0 blkid that is contained in the
5119 * blocks this bookmark refers to) by multiplying their blkid by their span
5120 * (the number of L0 blocks contained within one block at their level).
5121 * zbookmarks inside the meta-dnode calculate their object equivalent
5122 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
5123 * level + 1<<31 (any value larger than a level could ever be) for their level.
5124 * This causes them to always compare before a bookmark in their object
5125 * equivalent, compare appropriately to bookmarks in other objects, and to
5126 * compare appropriately to other bookmarks in the meta-dnode.
5127 */
5128 int
zbookmark_compare(uint16_t dbss1,uint8_t ibs1,uint16_t dbss2,uint8_t ibs2,const zbookmark_phys_t * zb1,const zbookmark_phys_t * zb2)5129 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
5130 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
5131 {
5132 /*
5133 * These variables represent the "equivalent" values for the zbookmark,
5134 * after converting zbookmarks inside the meta dnode to their
5135 * normal-object equivalents.
5136 */
5137 uint64_t zb1obj, zb2obj;
5138 uint64_t zb1L0, zb2L0;
5139 uint64_t zb1level, zb2level;
5140
5141 if (zb1->zb_object == zb2->zb_object &&
5142 zb1->zb_level == zb2->zb_level &&
5143 zb1->zb_blkid == zb2->zb_blkid)
5144 return (0);
5145
5146 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
5147 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
5148
5149 /*
5150 * BP_SPANB calculates the span in blocks.
5151 */
5152 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
5153 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
5154
5155 if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
5156 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5157 zb1L0 = 0;
5158 zb1level = zb1->zb_level + COMPARE_META_LEVEL;
5159 } else {
5160 zb1obj = zb1->zb_object;
5161 zb1level = zb1->zb_level;
5162 }
5163
5164 if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
5165 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5166 zb2L0 = 0;
5167 zb2level = zb2->zb_level + COMPARE_META_LEVEL;
5168 } else {
5169 zb2obj = zb2->zb_object;
5170 zb2level = zb2->zb_level;
5171 }
5172
5173 /* Now that we have a canonical representation, do the comparison. */
5174 if (zb1obj != zb2obj)
5175 return (zb1obj < zb2obj ? -1 : 1);
5176 else if (zb1L0 != zb2L0)
5177 return (zb1L0 < zb2L0 ? -1 : 1);
5178 else if (zb1level != zb2level)
5179 return (zb1level > zb2level ? -1 : 1);
5180 /*
5181 * This can (theoretically) happen if the bookmarks have the same object
5182 * and level, but different blkids, if the block sizes are not the same.
5183 * There is presently no way to change the indirect block sizes
5184 */
5185 return (0);
5186 }
5187
5188 /*
5189 * This function checks the following: given that last_block is the place that
5190 * our traversal stopped last time, does that guarantee that we've visited
5191 * every node under subtree_root? Therefore, we can't just use the raw output
5192 * of zbookmark_compare. We have to pass in a modified version of
5193 * subtree_root; by incrementing the block id, and then checking whether
5194 * last_block is before or equal to that, we can tell whether or not having
5195 * visited last_block implies that all of subtree_root's children have been
5196 * visited.
5197 */
5198 boolean_t
zbookmark_subtree_completed(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)5199 zbookmark_subtree_completed(const dnode_phys_t *dnp,
5200 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5201 {
5202 zbookmark_phys_t mod_zb = *subtree_root;
5203 mod_zb.zb_blkid++;
5204 ASSERT0(last_block->zb_level);
5205
5206 /* The objset_phys_t isn't before anything. */
5207 if (dnp == NULL)
5208 return (B_FALSE);
5209
5210 /*
5211 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
5212 * data block size in sectors, because that variable is only used if
5213 * the bookmark refers to a block in the meta-dnode. Since we don't
5214 * know without examining it what object it refers to, and there's no
5215 * harm in passing in this value in other cases, we always pass it in.
5216 *
5217 * We pass in 0 for the indirect block size shift because zb2 must be
5218 * level 0. The indirect block size is only used to calculate the span
5219 * of the bookmark, but since the bookmark must be level 0, the span is
5220 * always 1, so the math works out.
5221 *
5222 * If you make changes to how the zbookmark_compare code works, be sure
5223 * to make sure that this code still works afterwards.
5224 */
5225 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5226 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
5227 last_block) <= 0);
5228 }
5229
5230 /*
5231 * This function is similar to zbookmark_subtree_completed(), but returns true
5232 * if subtree_root is equal or ahead of last_block, i.e. still to be done.
5233 */
5234 boolean_t
zbookmark_subtree_tbd(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)5235 zbookmark_subtree_tbd(const dnode_phys_t *dnp,
5236 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5237 {
5238 ASSERT0(last_block->zb_level);
5239 if (dnp == NULL)
5240 return (B_FALSE);
5241 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5242 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
5243 last_block) >= 0);
5244 }
5245
5246 EXPORT_SYMBOL(zio_type_name);
5247 EXPORT_SYMBOL(zio_buf_alloc);
5248 EXPORT_SYMBOL(zio_data_buf_alloc);
5249 EXPORT_SYMBOL(zio_buf_free);
5250 EXPORT_SYMBOL(zio_data_buf_free);
5251
5252 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
5253 "Max I/O completion time (milliseconds) before marking it as slow");
5254
5255 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
5256 "Prioritize requeued I/O");
5257
5258 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
5259 "Defer frees starting in this pass");
5260
5261 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
5262 "Don't compress starting in this pass");
5263
5264 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
5265 "Rewrite new bps starting in this pass");
5266
5267 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
5268 "Throttle block allocations in the ZIO pipeline");
5269
5270 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
5271 "Log all slow ZIOs, not just those with vdevs");
5272