1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
26 * Copyright (c) 2019, 2023, 2024, Klara Inc.
27 * Copyright (c) 2019, Allan Jude
28 * Copyright (c) 2021, Datto, Inc.
29 */
30
31 #include <sys/sysmacros.h>
32 #include <sys/zfs_context.h>
33 #include <sys/fm/fs/zfs.h>
34 #include <sys/spa.h>
35 #include <sys/txg.h>
36 #include <sys/spa_impl.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/vdev_trim.h>
39 #include <sys/zio_impl.h>
40 #include <sys/zio_compress.h>
41 #include <sys/zio_checksum.h>
42 #include <sys/dmu_objset.h>
43 #include <sys/arc.h>
44 #include <sys/brt.h>
45 #include <sys/ddt.h>
46 #include <sys/blkptr.h>
47 #include <sys/zfeature.h>
48 #include <sys/dsl_scan.h>
49 #include <sys/metaslab_impl.h>
50 #include <sys/time.h>
51 #include <sys/trace_zfs.h>
52 #include <sys/abd.h>
53 #include <sys/dsl_crypt.h>
54 #include <cityhash.h>
55
56 /*
57 * ==========================================================================
58 * I/O type descriptions
59 * ==========================================================================
60 */
61 const char *const zio_type_name[ZIO_TYPES] = {
62 /*
63 * Note: Linux kernel thread name length is limited
64 * so these names will differ from upstream open zfs.
65 */
66 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim"
67 };
68
69 int zio_dva_throttle_enabled = B_TRUE;
70 static int zio_deadman_log_all = B_FALSE;
71
72 /*
73 * ==========================================================================
74 * I/O kmem caches
75 * ==========================================================================
76 */
77 static kmem_cache_t *zio_cache;
78 static kmem_cache_t *zio_link_cache;
79 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
80 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
81 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
82 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
83 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
84 #endif
85
86 /* Mark IOs as "slow" if they take longer than 30 seconds */
87 static uint_t zio_slow_io_ms = (30 * MILLISEC);
88
89 #define BP_SPANB(indblkshift, level) \
90 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
91 #define COMPARE_META_LEVEL 0x80000000ul
92 /*
93 * The following actions directly effect the spa's sync-to-convergence logic.
94 * The values below define the sync pass when we start performing the action.
95 * Care should be taken when changing these values as they directly impact
96 * spa_sync() performance. Tuning these values may introduce subtle performance
97 * pathologies and should only be done in the context of performance analysis.
98 * These tunables will eventually be removed and replaced with #defines once
99 * enough analysis has been done to determine optimal values.
100 *
101 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
102 * regular blocks are not deferred.
103 *
104 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
105 * compression (including of metadata). In practice, we don't have this
106 * many sync passes, so this has no effect.
107 *
108 * The original intent was that disabling compression would help the sync
109 * passes to converge. However, in practice disabling compression increases
110 * the average number of sync passes, because when we turn compression off, a
111 * lot of block's size will change and thus we have to re-allocate (not
112 * overwrite) them. It also increases the number of 128KB allocations (e.g.
113 * for indirect blocks and spacemaps) because these will not be compressed.
114 * The 128K allocations are especially detrimental to performance on highly
115 * fragmented systems, which may have very few free segments of this size,
116 * and may need to load new metaslabs to satisfy 128K allocations.
117 */
118
119 /* defer frees starting in this pass */
120 uint_t zfs_sync_pass_deferred_free = 2;
121
122 /* don't compress starting in this pass */
123 static uint_t zfs_sync_pass_dont_compress = 8;
124
125 /* rewrite new bps starting in this pass */
126 static uint_t zfs_sync_pass_rewrite = 2;
127
128 /*
129 * An allocating zio is one that either currently has the DVA allocate
130 * stage set or will have it later in its lifetime.
131 */
132 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
133
134 /*
135 * Enable smaller cores by excluding metadata
136 * allocations as well.
137 */
138 int zio_exclude_metadata = 0;
139 static int zio_requeue_io_start_cut_in_line = 1;
140
141 #ifdef ZFS_DEBUG
142 static const int zio_buf_debug_limit = 16384;
143 #else
144 static const int zio_buf_debug_limit = 0;
145 #endif
146
147 static inline void __zio_execute(zio_t *zio);
148
149 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
150
151 void
zio_init(void)152 zio_init(void)
153 {
154 size_t c;
155
156 zio_cache = kmem_cache_create("zio_cache",
157 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
158 zio_link_cache = kmem_cache_create("zio_link_cache",
159 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
160
161 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
162 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
163 size_t align, cflags, data_cflags;
164 char name[32];
165
166 /*
167 * Create cache for each half-power of 2 size, starting from
168 * SPA_MINBLOCKSIZE. It should give us memory space efficiency
169 * of ~7/8, sufficient for transient allocations mostly using
170 * these caches.
171 */
172 size_t p2 = size;
173 while (!ISP2(p2))
174 p2 &= p2 - 1;
175 if (!IS_P2ALIGNED(size, p2 / 2))
176 continue;
177
178 #ifndef _KERNEL
179 /*
180 * If we are using watchpoints, put each buffer on its own page,
181 * to eliminate the performance overhead of trapping to the
182 * kernel when modifying a non-watched buffer that shares the
183 * page with a watched buffer.
184 */
185 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
186 continue;
187 #endif
188
189 if (IS_P2ALIGNED(size, PAGESIZE))
190 align = PAGESIZE;
191 else
192 align = 1 << (highbit64(size ^ (size - 1)) - 1);
193
194 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
195 KMC_NODEBUG : 0;
196 data_cflags = KMC_NODEBUG;
197 if (cflags == data_cflags) {
198 /*
199 * Resulting kmem caches would be identical.
200 * Save memory by creating only one.
201 */
202 (void) snprintf(name, sizeof (name),
203 "zio_buf_comb_%lu", (ulong_t)size);
204 zio_buf_cache[c] = kmem_cache_create(name, size, align,
205 NULL, NULL, NULL, NULL, NULL, cflags);
206 zio_data_buf_cache[c] = zio_buf_cache[c];
207 continue;
208 }
209 (void) snprintf(name, sizeof (name), "zio_buf_%lu",
210 (ulong_t)size);
211 zio_buf_cache[c] = kmem_cache_create(name, size, align,
212 NULL, NULL, NULL, NULL, NULL, cflags);
213
214 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
215 (ulong_t)size);
216 zio_data_buf_cache[c] = kmem_cache_create(name, size, align,
217 NULL, NULL, NULL, NULL, NULL, data_cflags);
218 }
219
220 while (--c != 0) {
221 ASSERT(zio_buf_cache[c] != NULL);
222 if (zio_buf_cache[c - 1] == NULL)
223 zio_buf_cache[c - 1] = zio_buf_cache[c];
224
225 ASSERT(zio_data_buf_cache[c] != NULL);
226 if (zio_data_buf_cache[c - 1] == NULL)
227 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
228 }
229
230 zio_inject_init();
231
232 lz4_init();
233 }
234
235 void
zio_fini(void)236 zio_fini(void)
237 {
238 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
239
240 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
241 for (size_t i = 0; i < n; i++) {
242 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
243 (void) printf("zio_fini: [%d] %llu != %llu\n",
244 (int)((i + 1) << SPA_MINBLOCKSHIFT),
245 (long long unsigned)zio_buf_cache_allocs[i],
246 (long long unsigned)zio_buf_cache_frees[i]);
247 }
248 #endif
249
250 /*
251 * The same kmem cache can show up multiple times in both zio_buf_cache
252 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
253 * sort it out.
254 */
255 for (size_t i = 0; i < n; i++) {
256 kmem_cache_t *cache = zio_buf_cache[i];
257 if (cache == NULL)
258 continue;
259 for (size_t j = i; j < n; j++) {
260 if (cache == zio_buf_cache[j])
261 zio_buf_cache[j] = NULL;
262 if (cache == zio_data_buf_cache[j])
263 zio_data_buf_cache[j] = NULL;
264 }
265 kmem_cache_destroy(cache);
266 }
267
268 for (size_t i = 0; i < n; i++) {
269 kmem_cache_t *cache = zio_data_buf_cache[i];
270 if (cache == NULL)
271 continue;
272 for (size_t j = i; j < n; j++) {
273 if (cache == zio_data_buf_cache[j])
274 zio_data_buf_cache[j] = NULL;
275 }
276 kmem_cache_destroy(cache);
277 }
278
279 for (size_t i = 0; i < n; i++) {
280 VERIFY3P(zio_buf_cache[i], ==, NULL);
281 VERIFY3P(zio_data_buf_cache[i], ==, NULL);
282 }
283
284 kmem_cache_destroy(zio_link_cache);
285 kmem_cache_destroy(zio_cache);
286
287 zio_inject_fini();
288
289 lz4_fini();
290 }
291
292 /*
293 * ==========================================================================
294 * Allocate and free I/O buffers
295 * ==========================================================================
296 */
297
298 #ifdef ZFS_DEBUG
299 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
300 #endif
301
302 /*
303 * Use empty space after the buffer to detect overflows.
304 *
305 * Since zio_init() creates kmem caches only for certain set of buffer sizes,
306 * allocations of different sizes may have some unused space after the data.
307 * Filling part of that space with a known pattern on allocation and checking
308 * it on free should allow us to detect some buffer overflows.
309 */
310 static void
zio_buf_put_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)311 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
312 {
313 #ifdef ZFS_DEBUG
314 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
315 ulong_t *canary = p + off / sizeof (ulong_t);
316 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
317 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
318 cache[c] == cache[c + 1])
319 asize = (c + 2) << SPA_MINBLOCKSHIFT;
320 for (; off < asize; canary++, off += sizeof (ulong_t))
321 *canary = zio_buf_canary;
322 #endif
323 }
324
325 static void
zio_buf_check_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)326 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
327 {
328 #ifdef ZFS_DEBUG
329 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
330 ulong_t *canary = p + off / sizeof (ulong_t);
331 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
332 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
333 cache[c] == cache[c + 1])
334 asize = (c + 2) << SPA_MINBLOCKSHIFT;
335 for (; off < asize; canary++, off += sizeof (ulong_t)) {
336 if (unlikely(*canary != zio_buf_canary)) {
337 PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
338 p, size, (canary - p) * sizeof (ulong_t),
339 *canary, zio_buf_canary);
340 }
341 }
342 #endif
343 }
344
345 /*
346 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
347 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
348 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
349 * excess / transient data in-core during a crashdump.
350 */
351 void *
zio_buf_alloc(size_t size)352 zio_buf_alloc(size_t size)
353 {
354 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
355
356 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
357 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
358 atomic_add_64(&zio_buf_cache_allocs[c], 1);
359 #endif
360
361 void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
362 zio_buf_put_canary(p, size, zio_buf_cache, c);
363 return (p);
364 }
365
366 /*
367 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
368 * crashdump if the kernel panics. This exists so that we will limit the amount
369 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
370 * of kernel heap dumped to disk when the kernel panics)
371 */
372 void *
zio_data_buf_alloc(size_t size)373 zio_data_buf_alloc(size_t size)
374 {
375 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
376
377 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
378
379 void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
380 zio_buf_put_canary(p, size, zio_data_buf_cache, c);
381 return (p);
382 }
383
384 void
zio_buf_free(void * buf,size_t size)385 zio_buf_free(void *buf, size_t size)
386 {
387 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
388
389 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
390 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
391 atomic_add_64(&zio_buf_cache_frees[c], 1);
392 #endif
393
394 zio_buf_check_canary(buf, size, zio_buf_cache, c);
395 kmem_cache_free(zio_buf_cache[c], buf);
396 }
397
398 void
zio_data_buf_free(void * buf,size_t size)399 zio_data_buf_free(void *buf, size_t size)
400 {
401 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
402
403 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
404
405 zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
406 kmem_cache_free(zio_data_buf_cache[c], buf);
407 }
408
409 static void
zio_abd_free(void * abd,size_t size)410 zio_abd_free(void *abd, size_t size)
411 {
412 (void) size;
413 abd_free((abd_t *)abd);
414 }
415
416 /*
417 * ==========================================================================
418 * Push and pop I/O transform buffers
419 * ==========================================================================
420 */
421 void
zio_push_transform(zio_t * zio,abd_t * data,uint64_t size,uint64_t bufsize,zio_transform_func_t * transform)422 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
423 zio_transform_func_t *transform)
424 {
425 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
426
427 zt->zt_orig_abd = zio->io_abd;
428 zt->zt_orig_size = zio->io_size;
429 zt->zt_bufsize = bufsize;
430 zt->zt_transform = transform;
431
432 zt->zt_next = zio->io_transform_stack;
433 zio->io_transform_stack = zt;
434
435 zio->io_abd = data;
436 zio->io_size = size;
437 }
438
439 void
zio_pop_transforms(zio_t * zio)440 zio_pop_transforms(zio_t *zio)
441 {
442 zio_transform_t *zt;
443
444 while ((zt = zio->io_transform_stack) != NULL) {
445 if (zt->zt_transform != NULL)
446 zt->zt_transform(zio,
447 zt->zt_orig_abd, zt->zt_orig_size);
448
449 if (zt->zt_bufsize != 0)
450 abd_free(zio->io_abd);
451
452 zio->io_abd = zt->zt_orig_abd;
453 zio->io_size = zt->zt_orig_size;
454 zio->io_transform_stack = zt->zt_next;
455
456 kmem_free(zt, sizeof (zio_transform_t));
457 }
458 }
459
460 /*
461 * ==========================================================================
462 * I/O transform callbacks for subblocks, decompression, and decryption
463 * ==========================================================================
464 */
465 static void
zio_subblock(zio_t * zio,abd_t * data,uint64_t size)466 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
467 {
468 ASSERT(zio->io_size > size);
469
470 if (zio->io_type == ZIO_TYPE_READ)
471 abd_copy(data, zio->io_abd, size);
472 }
473
474 static void
zio_decompress(zio_t * zio,abd_t * data,uint64_t size)475 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
476 {
477 if (zio->io_error == 0) {
478 void *tmp = abd_borrow_buf(data, size);
479 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
480 zio->io_abd, tmp, zio->io_size, size,
481 &zio->io_prop.zp_complevel);
482 abd_return_buf_copy(data, tmp, size);
483
484 if (zio_injection_enabled && ret == 0)
485 ret = zio_handle_fault_injection(zio, EINVAL);
486
487 if (ret != 0)
488 zio->io_error = SET_ERROR(EIO);
489 }
490 }
491
492 static void
zio_decrypt(zio_t * zio,abd_t * data,uint64_t size)493 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
494 {
495 int ret;
496 void *tmp;
497 blkptr_t *bp = zio->io_bp;
498 spa_t *spa = zio->io_spa;
499 uint64_t dsobj = zio->io_bookmark.zb_objset;
500 uint64_t lsize = BP_GET_LSIZE(bp);
501 dmu_object_type_t ot = BP_GET_TYPE(bp);
502 uint8_t salt[ZIO_DATA_SALT_LEN];
503 uint8_t iv[ZIO_DATA_IV_LEN];
504 uint8_t mac[ZIO_DATA_MAC_LEN];
505 boolean_t no_crypt = B_FALSE;
506
507 ASSERT(BP_USES_CRYPT(bp));
508 ASSERT3U(size, !=, 0);
509
510 if (zio->io_error != 0)
511 return;
512
513 /*
514 * Verify the cksum of MACs stored in an indirect bp. It will always
515 * be possible to verify this since it does not require an encryption
516 * key.
517 */
518 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
519 zio_crypt_decode_mac_bp(bp, mac);
520
521 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
522 /*
523 * We haven't decompressed the data yet, but
524 * zio_crypt_do_indirect_mac_checksum() requires
525 * decompressed data to be able to parse out the MACs
526 * from the indirect block. We decompress it now and
527 * throw away the result after we are finished.
528 */
529 tmp = zio_buf_alloc(lsize);
530 ret = zio_decompress_data(BP_GET_COMPRESS(bp),
531 zio->io_abd, tmp, zio->io_size, lsize,
532 &zio->io_prop.zp_complevel);
533 if (ret != 0) {
534 ret = SET_ERROR(EIO);
535 goto error;
536 }
537 ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
538 tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
539 zio_buf_free(tmp, lsize);
540 } else {
541 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
542 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
543 }
544 abd_copy(data, zio->io_abd, size);
545
546 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
547 ret = zio_handle_decrypt_injection(spa,
548 &zio->io_bookmark, ot, ECKSUM);
549 }
550 if (ret != 0)
551 goto error;
552
553 return;
554 }
555
556 /*
557 * If this is an authenticated block, just check the MAC. It would be
558 * nice to separate this out into its own flag, but when this was done,
559 * we had run out of bits in what is now zio_flag_t. Future cleanup
560 * could make this a flag bit.
561 */
562 if (BP_IS_AUTHENTICATED(bp)) {
563 if (ot == DMU_OT_OBJSET) {
564 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
565 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
566 } else {
567 zio_crypt_decode_mac_bp(bp, mac);
568 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
569 zio->io_abd, size, mac);
570 if (zio_injection_enabled && ret == 0) {
571 ret = zio_handle_decrypt_injection(spa,
572 &zio->io_bookmark, ot, ECKSUM);
573 }
574 }
575 abd_copy(data, zio->io_abd, size);
576
577 if (ret != 0)
578 goto error;
579
580 return;
581 }
582
583 zio_crypt_decode_params_bp(bp, salt, iv);
584
585 if (ot == DMU_OT_INTENT_LOG) {
586 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
587 zio_crypt_decode_mac_zil(tmp, mac);
588 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
589 } else {
590 zio_crypt_decode_mac_bp(bp, mac);
591 }
592
593 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
594 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
595 zio->io_abd, &no_crypt);
596 if (no_crypt)
597 abd_copy(data, zio->io_abd, size);
598
599 if (ret != 0)
600 goto error;
601
602 return;
603
604 error:
605 /* assert that the key was found unless this was speculative */
606 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
607
608 /*
609 * If there was a decryption / authentication error return EIO as
610 * the io_error. If this was not a speculative zio, create an ereport.
611 */
612 if (ret == ECKSUM) {
613 zio->io_error = SET_ERROR(EIO);
614 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
615 spa_log_error(spa, &zio->io_bookmark,
616 BP_GET_LOGICAL_BIRTH(zio->io_bp));
617 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
618 spa, NULL, &zio->io_bookmark, zio, 0);
619 }
620 } else {
621 zio->io_error = ret;
622 }
623 }
624
625 /*
626 * ==========================================================================
627 * I/O parent/child relationships and pipeline interlocks
628 * ==========================================================================
629 */
630 zio_t *
zio_walk_parents(zio_t * cio,zio_link_t ** zl)631 zio_walk_parents(zio_t *cio, zio_link_t **zl)
632 {
633 list_t *pl = &cio->io_parent_list;
634
635 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
636 if (*zl == NULL)
637 return (NULL);
638
639 ASSERT((*zl)->zl_child == cio);
640 return ((*zl)->zl_parent);
641 }
642
643 zio_t *
zio_walk_children(zio_t * pio,zio_link_t ** zl)644 zio_walk_children(zio_t *pio, zio_link_t **zl)
645 {
646 list_t *cl = &pio->io_child_list;
647
648 ASSERT(MUTEX_HELD(&pio->io_lock));
649
650 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
651 if (*zl == NULL)
652 return (NULL);
653
654 ASSERT((*zl)->zl_parent == pio);
655 return ((*zl)->zl_child);
656 }
657
658 zio_t *
zio_unique_parent(zio_t * cio)659 zio_unique_parent(zio_t *cio)
660 {
661 zio_link_t *zl = NULL;
662 zio_t *pio = zio_walk_parents(cio, &zl);
663
664 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
665 return (pio);
666 }
667
668 void
zio_add_child(zio_t * pio,zio_t * cio)669 zio_add_child(zio_t *pio, zio_t *cio)
670 {
671 /*
672 * Logical I/Os can have logical, gang, or vdev children.
673 * Gang I/Os can have gang or vdev children.
674 * Vdev I/Os can only have vdev children.
675 * The following ASSERT captures all of these constraints.
676 */
677 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
678
679 /* Parent should not have READY stage if child doesn't have it. */
680 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
681 (cio->io_child_type != ZIO_CHILD_VDEV),
682 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
683
684 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
685 zl->zl_parent = pio;
686 zl->zl_child = cio;
687
688 mutex_enter(&pio->io_lock);
689 mutex_enter(&cio->io_lock);
690
691 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
692
693 uint64_t *countp = pio->io_children[cio->io_child_type];
694 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
695 countp[w] += !cio->io_state[w];
696
697 list_insert_head(&pio->io_child_list, zl);
698 list_insert_head(&cio->io_parent_list, zl);
699
700 mutex_exit(&cio->io_lock);
701 mutex_exit(&pio->io_lock);
702 }
703
704 void
zio_add_child_first(zio_t * pio,zio_t * cio)705 zio_add_child_first(zio_t *pio, zio_t *cio)
706 {
707 /*
708 * Logical I/Os can have logical, gang, or vdev children.
709 * Gang I/Os can have gang or vdev children.
710 * Vdev I/Os can only have vdev children.
711 * The following ASSERT captures all of these constraints.
712 */
713 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
714
715 /* Parent should not have READY stage if child doesn't have it. */
716 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
717 (cio->io_child_type != ZIO_CHILD_VDEV),
718 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
719
720 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
721 zl->zl_parent = pio;
722 zl->zl_child = cio;
723
724 ASSERT(list_is_empty(&cio->io_parent_list));
725 list_insert_head(&cio->io_parent_list, zl);
726
727 mutex_enter(&pio->io_lock);
728
729 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
730
731 uint64_t *countp = pio->io_children[cio->io_child_type];
732 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
733 countp[w] += !cio->io_state[w];
734
735 list_insert_head(&pio->io_child_list, zl);
736
737 mutex_exit(&pio->io_lock);
738 }
739
740 static void
zio_remove_child(zio_t * pio,zio_t * cio,zio_link_t * zl)741 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
742 {
743 ASSERT(zl->zl_parent == pio);
744 ASSERT(zl->zl_child == cio);
745
746 mutex_enter(&pio->io_lock);
747 mutex_enter(&cio->io_lock);
748
749 list_remove(&pio->io_child_list, zl);
750 list_remove(&cio->io_parent_list, zl);
751
752 mutex_exit(&cio->io_lock);
753 mutex_exit(&pio->io_lock);
754 kmem_cache_free(zio_link_cache, zl);
755 }
756
757 static boolean_t
zio_wait_for_children(zio_t * zio,uint8_t childbits,enum zio_wait_type wait)758 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
759 {
760 boolean_t waiting = B_FALSE;
761
762 mutex_enter(&zio->io_lock);
763 ASSERT(zio->io_stall == NULL);
764 for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
765 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
766 continue;
767
768 uint64_t *countp = &zio->io_children[c][wait];
769 if (*countp != 0) {
770 zio->io_stage >>= 1;
771 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
772 zio->io_stall = countp;
773 waiting = B_TRUE;
774 break;
775 }
776 }
777 mutex_exit(&zio->io_lock);
778 return (waiting);
779 }
780
781 __attribute__((always_inline))
782 static inline void
zio_notify_parent(zio_t * pio,zio_t * zio,enum zio_wait_type wait,zio_t ** next_to_executep)783 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
784 zio_t **next_to_executep)
785 {
786 uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
787 int *errorp = &pio->io_child_error[zio->io_child_type];
788
789 mutex_enter(&pio->io_lock);
790 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
791 *errorp = zio_worst_error(*errorp, zio->io_error);
792 pio->io_reexecute |= zio->io_reexecute;
793 ASSERT3U(*countp, >, 0);
794
795 (*countp)--;
796
797 if (*countp == 0 && pio->io_stall == countp) {
798 zio_taskq_type_t type =
799 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
800 ZIO_TASKQ_INTERRUPT;
801 pio->io_stall = NULL;
802 mutex_exit(&pio->io_lock);
803
804 /*
805 * If we can tell the caller to execute this parent next, do
806 * so. We do this if the parent's zio type matches the child's
807 * type, or if it's a zio_null() with no done callback, and so
808 * has no actual work to do. Otherwise dispatch the parent zio
809 * in its own taskq.
810 *
811 * Having the caller execute the parent when possible reduces
812 * locking on the zio taskq's, reduces context switch
813 * overhead, and has no recursion penalty. Note that one
814 * read from disk typically causes at least 3 zio's: a
815 * zio_null(), the logical zio_read(), and then a physical
816 * zio. When the physical ZIO completes, we are able to call
817 * zio_done() on all 3 of these zio's from one invocation of
818 * zio_execute() by returning the parent back to
819 * zio_execute(). Since the parent isn't executed until this
820 * thread returns back to zio_execute(), the caller should do
821 * so promptly.
822 *
823 * In other cases, dispatching the parent prevents
824 * overflowing the stack when we have deeply nested
825 * parent-child relationships, as we do with the "mega zio"
826 * of writes for spa_sync(), and the chain of ZIL blocks.
827 */
828 if (next_to_executep != NULL && *next_to_executep == NULL &&
829 (pio->io_type == zio->io_type ||
830 (pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) {
831 *next_to_executep = pio;
832 } else {
833 zio_taskq_dispatch(pio, type, B_FALSE);
834 }
835 } else {
836 mutex_exit(&pio->io_lock);
837 }
838 }
839
840 static void
zio_inherit_child_errors(zio_t * zio,enum zio_child c)841 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
842 {
843 if (zio->io_child_error[c] != 0 && zio->io_error == 0)
844 zio->io_error = zio->io_child_error[c];
845 }
846
847 int
zio_bookmark_compare(const void * x1,const void * x2)848 zio_bookmark_compare(const void *x1, const void *x2)
849 {
850 const zio_t *z1 = x1;
851 const zio_t *z2 = x2;
852
853 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
854 return (-1);
855 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
856 return (1);
857
858 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
859 return (-1);
860 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
861 return (1);
862
863 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
864 return (-1);
865 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
866 return (1);
867
868 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
869 return (-1);
870 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
871 return (1);
872
873 if (z1 < z2)
874 return (-1);
875 if (z1 > z2)
876 return (1);
877
878 return (0);
879 }
880
881 /*
882 * ==========================================================================
883 * Create the various types of I/O (read, write, free, etc)
884 * ==========================================================================
885 */
886 static zio_t *
zio_create(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,zio_done_func_t * done,void * private,zio_type_t type,zio_priority_t priority,zio_flag_t flags,vdev_t * vd,uint64_t offset,const zbookmark_phys_t * zb,enum zio_stage stage,enum zio_stage pipeline)887 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
888 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
889 void *private, zio_type_t type, zio_priority_t priority,
890 zio_flag_t flags, vdev_t *vd, uint64_t offset,
891 const zbookmark_phys_t *zb, enum zio_stage stage,
892 enum zio_stage pipeline)
893 {
894 zio_t *zio;
895
896 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
897 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
898 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
899
900 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
901 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
902 ASSERT(vd || stage == ZIO_STAGE_OPEN);
903
904 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
905
906 zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
907 memset(zio, 0, sizeof (zio_t));
908
909 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
910 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
911
912 list_create(&zio->io_parent_list, sizeof (zio_link_t),
913 offsetof(zio_link_t, zl_parent_node));
914 list_create(&zio->io_child_list, sizeof (zio_link_t),
915 offsetof(zio_link_t, zl_child_node));
916 metaslab_trace_init(&zio->io_alloc_list);
917
918 if (vd != NULL)
919 zio->io_child_type = ZIO_CHILD_VDEV;
920 else if (flags & ZIO_FLAG_GANG_CHILD)
921 zio->io_child_type = ZIO_CHILD_GANG;
922 else if (flags & ZIO_FLAG_DDT_CHILD)
923 zio->io_child_type = ZIO_CHILD_DDT;
924 else
925 zio->io_child_type = ZIO_CHILD_LOGICAL;
926
927 if (bp != NULL) {
928 if (type != ZIO_TYPE_WRITE ||
929 zio->io_child_type == ZIO_CHILD_DDT) {
930 zio->io_bp_copy = *bp;
931 zio->io_bp = &zio->io_bp_copy; /* so caller can free */
932 } else {
933 zio->io_bp = (blkptr_t *)bp;
934 }
935 zio->io_bp_orig = *bp;
936 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
937 zio->io_logical = zio;
938 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
939 pipeline |= ZIO_GANG_STAGES;
940 }
941
942 zio->io_spa = spa;
943 zio->io_txg = txg;
944 zio->io_done = done;
945 zio->io_private = private;
946 zio->io_type = type;
947 zio->io_priority = priority;
948 zio->io_vd = vd;
949 zio->io_offset = offset;
950 zio->io_orig_abd = zio->io_abd = data;
951 zio->io_orig_size = zio->io_size = psize;
952 zio->io_lsize = lsize;
953 zio->io_orig_flags = zio->io_flags = flags;
954 zio->io_orig_stage = zio->io_stage = stage;
955 zio->io_orig_pipeline = zio->io_pipeline = pipeline;
956 zio->io_pipeline_trace = ZIO_STAGE_OPEN;
957 zio->io_allocator = ZIO_ALLOCATOR_NONE;
958
959 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) ||
960 (pipeline & ZIO_STAGE_READY) == 0;
961 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
962
963 if (zb != NULL)
964 zio->io_bookmark = *zb;
965
966 if (pio != NULL) {
967 zio->io_metaslab_class = pio->io_metaslab_class;
968 if (zio->io_logical == NULL)
969 zio->io_logical = pio->io_logical;
970 if (zio->io_child_type == ZIO_CHILD_GANG)
971 zio->io_gang_leader = pio->io_gang_leader;
972 zio_add_child_first(pio, zio);
973 }
974
975 taskq_init_ent(&zio->io_tqent);
976
977 return (zio);
978 }
979
980 void
zio_destroy(zio_t * zio)981 zio_destroy(zio_t *zio)
982 {
983 metaslab_trace_fini(&zio->io_alloc_list);
984 list_destroy(&zio->io_parent_list);
985 list_destroy(&zio->io_child_list);
986 mutex_destroy(&zio->io_lock);
987 cv_destroy(&zio->io_cv);
988 kmem_cache_free(zio_cache, zio);
989 }
990
991 /*
992 * ZIO intended to be between others. Provides synchronization at READY
993 * and DONE pipeline stages and calls the respective callbacks.
994 */
995 zio_t *
zio_null(zio_t * pio,spa_t * spa,vdev_t * vd,zio_done_func_t * done,void * private,zio_flag_t flags)996 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
997 void *private, zio_flag_t flags)
998 {
999 zio_t *zio;
1000
1001 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
1002 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
1003 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
1004
1005 return (zio);
1006 }
1007
1008 /*
1009 * ZIO intended to be a root of a tree. Unlike null ZIO does not have a
1010 * READY pipeline stage (is ready on creation), so it should not be used
1011 * as child of any ZIO that may need waiting for grandchildren READY stage
1012 * (any other ZIO type).
1013 */
1014 zio_t *
zio_root(spa_t * spa,zio_done_func_t * done,void * private,zio_flag_t flags)1015 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
1016 {
1017 zio_t *zio;
1018
1019 zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private,
1020 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL,
1021 ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE);
1022
1023 return (zio);
1024 }
1025
1026 static int
zfs_blkptr_verify_log(spa_t * spa,const blkptr_t * bp,enum blk_verify_flag blk_verify,const char * fmt,...)1027 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
1028 enum blk_verify_flag blk_verify, const char *fmt, ...)
1029 {
1030 va_list adx;
1031 char buf[256];
1032
1033 va_start(adx, fmt);
1034 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
1035 va_end(adx);
1036
1037 zfs_dbgmsg("bad blkptr at %px: "
1038 "DVA[0]=%#llx/%#llx "
1039 "DVA[1]=%#llx/%#llx "
1040 "DVA[2]=%#llx/%#llx "
1041 "prop=%#llx "
1042 "pad=%#llx,%#llx "
1043 "phys_birth=%#llx "
1044 "birth=%#llx "
1045 "fill=%#llx "
1046 "cksum=%#llx/%#llx/%#llx/%#llx",
1047 bp,
1048 (long long)bp->blk_dva[0].dva_word[0],
1049 (long long)bp->blk_dva[0].dva_word[1],
1050 (long long)bp->blk_dva[1].dva_word[0],
1051 (long long)bp->blk_dva[1].dva_word[1],
1052 (long long)bp->blk_dva[2].dva_word[0],
1053 (long long)bp->blk_dva[2].dva_word[1],
1054 (long long)bp->blk_prop,
1055 (long long)bp->blk_pad[0],
1056 (long long)bp->blk_pad[1],
1057 (long long)BP_GET_PHYSICAL_BIRTH(bp),
1058 (long long)BP_GET_LOGICAL_BIRTH(bp),
1059 (long long)bp->blk_fill,
1060 (long long)bp->blk_cksum.zc_word[0],
1061 (long long)bp->blk_cksum.zc_word[1],
1062 (long long)bp->blk_cksum.zc_word[2],
1063 (long long)bp->blk_cksum.zc_word[3]);
1064 switch (blk_verify) {
1065 case BLK_VERIFY_HALT:
1066 zfs_panic_recover("%s: %s", spa_name(spa), buf);
1067 break;
1068 case BLK_VERIFY_LOG:
1069 zfs_dbgmsg("%s: %s", spa_name(spa), buf);
1070 break;
1071 case BLK_VERIFY_ONLY:
1072 break;
1073 }
1074
1075 return (1);
1076 }
1077
1078 /*
1079 * Verify the block pointer fields contain reasonable values. This means
1080 * it only contains known object types, checksum/compression identifiers,
1081 * block sizes within the maximum allowed limits, valid DVAs, etc.
1082 *
1083 * If everything checks out B_TRUE is returned. The zfs_blkptr_verify
1084 * argument controls the behavior when an invalid field is detected.
1085 *
1086 * Values for blk_verify_flag:
1087 * BLK_VERIFY_ONLY: evaluate the block
1088 * BLK_VERIFY_LOG: evaluate the block and log problems
1089 * BLK_VERIFY_HALT: call zfs_panic_recover on error
1090 *
1091 * Values for blk_config_flag:
1092 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
1093 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
1094 * obtained for reader
1095 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
1096 * performance
1097 */
1098 boolean_t
zfs_blkptr_verify(spa_t * spa,const blkptr_t * bp,enum blk_config_flag blk_config,enum blk_verify_flag blk_verify)1099 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
1100 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
1101 {
1102 int errors = 0;
1103
1104 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
1105 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1106 "blkptr at %px has invalid TYPE %llu",
1107 bp, (longlong_t)BP_GET_TYPE(bp));
1108 }
1109 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
1110 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1111 "blkptr at %px has invalid CHECKSUM %llu",
1112 bp, (longlong_t)BP_GET_CHECKSUM(bp));
1113 }
1114 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
1115 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1116 "blkptr at %px has invalid COMPRESS %llu",
1117 bp, (longlong_t)BP_GET_COMPRESS(bp));
1118 }
1119 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
1120 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1121 "blkptr at %px has invalid LSIZE %llu",
1122 bp, (longlong_t)BP_GET_LSIZE(bp));
1123 }
1124 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
1125 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1126 "blkptr at %px has invalid PSIZE %llu",
1127 bp, (longlong_t)BP_GET_PSIZE(bp));
1128 }
1129
1130 if (BP_IS_EMBEDDED(bp)) {
1131 if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
1132 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1133 "blkptr at %px has invalid ETYPE %llu",
1134 bp, (longlong_t)BPE_GET_ETYPE(bp));
1135 }
1136 }
1137
1138 /*
1139 * Do not verify individual DVAs if the config is not trusted. This
1140 * will be done once the zio is executed in vdev_mirror_map_alloc.
1141 */
1142 if (!spa->spa_trust_config)
1143 return (errors == 0);
1144
1145 switch (blk_config) {
1146 case BLK_CONFIG_HELD:
1147 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
1148 break;
1149 case BLK_CONFIG_NEEDED:
1150 spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
1151 break;
1152 case BLK_CONFIG_SKIP:
1153 return (errors == 0);
1154 default:
1155 panic("invalid blk_config %u", blk_config);
1156 }
1157
1158 /*
1159 * Pool-specific checks.
1160 *
1161 * Note: it would be nice to verify that the logical birth
1162 * and physical birth are not too large. However,
1163 * spa_freeze() allows the birth time of log blocks (and
1164 * dmu_sync()-ed blocks that are in the log) to be arbitrarily
1165 * large.
1166 */
1167 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1168 const dva_t *dva = &bp->blk_dva[i];
1169 uint64_t vdevid = DVA_GET_VDEV(dva);
1170
1171 if (vdevid >= spa->spa_root_vdev->vdev_children) {
1172 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1173 "blkptr at %px DVA %u has invalid VDEV %llu",
1174 bp, i, (longlong_t)vdevid);
1175 continue;
1176 }
1177 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1178 if (vd == NULL) {
1179 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1180 "blkptr at %px DVA %u has invalid VDEV %llu",
1181 bp, i, (longlong_t)vdevid);
1182 continue;
1183 }
1184 if (vd->vdev_ops == &vdev_hole_ops) {
1185 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1186 "blkptr at %px DVA %u has hole VDEV %llu",
1187 bp, i, (longlong_t)vdevid);
1188 continue;
1189 }
1190 if (vd->vdev_ops == &vdev_missing_ops) {
1191 /*
1192 * "missing" vdevs are valid during import, but we
1193 * don't have their detailed info (e.g. asize), so
1194 * we can't perform any more checks on them.
1195 */
1196 continue;
1197 }
1198 uint64_t offset = DVA_GET_OFFSET(dva);
1199 uint64_t asize = DVA_GET_ASIZE(dva);
1200 if (DVA_GET_GANG(dva))
1201 asize = vdev_gang_header_asize(vd);
1202 if (offset + asize > vd->vdev_asize) {
1203 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1204 "blkptr at %px DVA %u has invalid OFFSET %llu",
1205 bp, i, (longlong_t)offset);
1206 }
1207 }
1208 if (blk_config == BLK_CONFIG_NEEDED)
1209 spa_config_exit(spa, SCL_VDEV, bp);
1210
1211 return (errors == 0);
1212 }
1213
1214 boolean_t
zfs_dva_valid(spa_t * spa,const dva_t * dva,const blkptr_t * bp)1215 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
1216 {
1217 (void) bp;
1218 uint64_t vdevid = DVA_GET_VDEV(dva);
1219
1220 if (vdevid >= spa->spa_root_vdev->vdev_children)
1221 return (B_FALSE);
1222
1223 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1224 if (vd == NULL)
1225 return (B_FALSE);
1226
1227 if (vd->vdev_ops == &vdev_hole_ops)
1228 return (B_FALSE);
1229
1230 if (vd->vdev_ops == &vdev_missing_ops) {
1231 return (B_FALSE);
1232 }
1233
1234 uint64_t offset = DVA_GET_OFFSET(dva);
1235 uint64_t asize = DVA_GET_ASIZE(dva);
1236
1237 if (DVA_GET_GANG(dva))
1238 asize = vdev_gang_header_asize(vd);
1239 if (offset + asize > vd->vdev_asize)
1240 return (B_FALSE);
1241
1242 return (B_TRUE);
1243 }
1244
1245 zio_t *
zio_read(zio_t * pio,spa_t * spa,const blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1246 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
1247 abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
1248 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
1249 {
1250 zio_t *zio;
1251
1252 zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp,
1253 data, size, size, done, private,
1254 ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
1255 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1256 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
1257
1258 return (zio);
1259 }
1260
1261 zio_t *
zio_write(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,const zio_prop_t * zp,zio_done_func_t * ready,zio_done_func_t * children_ready,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1262 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
1263 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
1264 zio_done_func_t *ready, zio_done_func_t *children_ready,
1265 zio_done_func_t *done, void *private, zio_priority_t priority,
1266 zio_flag_t flags, const zbookmark_phys_t *zb)
1267 {
1268 zio_t *zio;
1269
1270 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
1271 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
1272 zp->zp_compress >= ZIO_COMPRESS_OFF &&
1273 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
1274 DMU_OT_IS_VALID(zp->zp_type) &&
1275 zp->zp_level < 32 &&
1276 zp->zp_copies > 0 &&
1277 zp->zp_copies <= spa_max_replication(spa));
1278
1279 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
1280 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
1281 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1282 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
1283
1284 zio->io_ready = ready;
1285 zio->io_children_ready = children_ready;
1286 zio->io_prop = *zp;
1287
1288 /*
1289 * Data can be NULL if we are going to call zio_write_override() to
1290 * provide the already-allocated BP. But we may need the data to
1291 * verify a dedup hit (if requested). In this case, don't try to
1292 * dedup (just take the already-allocated BP verbatim). Encrypted
1293 * dedup blocks need data as well so we also disable dedup in this
1294 * case.
1295 */
1296 if (data == NULL &&
1297 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
1298 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
1299 }
1300
1301 return (zio);
1302 }
1303
1304 zio_t *
zio_rewrite(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,zbookmark_phys_t * zb)1305 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
1306 uint64_t size, zio_done_func_t *done, void *private,
1307 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
1308 {
1309 zio_t *zio;
1310
1311 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
1312 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
1313 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
1314
1315 return (zio);
1316 }
1317
1318 void
zio_write_override(zio_t * zio,blkptr_t * bp,int copies,boolean_t nopwrite,boolean_t brtwrite)1319 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
1320 boolean_t brtwrite)
1321 {
1322 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1323 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1324 ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1325 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
1326 ASSERT(!brtwrite || !nopwrite);
1327
1328 /*
1329 * We must reset the io_prop to match the values that existed
1330 * when the bp was first written by dmu_sync() keeping in mind
1331 * that nopwrite and dedup are mutually exclusive.
1332 */
1333 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1334 zio->io_prop.zp_nopwrite = nopwrite;
1335 zio->io_prop.zp_brtwrite = brtwrite;
1336 zio->io_prop.zp_copies = copies;
1337 zio->io_bp_override = bp;
1338 }
1339
1340 void
zio_free(spa_t * spa,uint64_t txg,const blkptr_t * bp)1341 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1342 {
1343
1344 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1345
1346 /*
1347 * The check for EMBEDDED is a performance optimization. We
1348 * process the free here (by ignoring it) rather than
1349 * putting it on the list and then processing it in zio_free_sync().
1350 */
1351 if (BP_IS_EMBEDDED(bp))
1352 return;
1353
1354 /*
1355 * Frees that are for the currently-syncing txg, are not going to be
1356 * deferred, and which will not need to do a read (i.e. not GANG or
1357 * DEDUP), can be processed immediately. Otherwise, put them on the
1358 * in-memory list for later processing.
1359 *
1360 * Note that we only defer frees after zfs_sync_pass_deferred_free
1361 * when the log space map feature is disabled. [see relevant comment
1362 * in spa_sync_iterate_to_convergence()]
1363 */
1364 if (BP_IS_GANG(bp) ||
1365 BP_GET_DEDUP(bp) ||
1366 txg != spa->spa_syncing_txg ||
1367 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
1368 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
1369 brt_maybe_exists(spa, bp)) {
1370 metaslab_check_free(spa, bp);
1371 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1372 } else {
1373 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
1374 }
1375 }
1376
1377 /*
1378 * To improve performance, this function may return NULL if we were able
1379 * to do the free immediately. This avoids the cost of creating a zio
1380 * (and linking it to the parent, etc).
1381 */
1382 zio_t *
zio_free_sync(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_flag_t flags)1383 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1384 zio_flag_t flags)
1385 {
1386 ASSERT(!BP_IS_HOLE(bp));
1387 ASSERT(spa_syncing_txg(spa) == txg);
1388
1389 if (BP_IS_EMBEDDED(bp))
1390 return (NULL);
1391
1392 metaslab_check_free(spa, bp);
1393 arc_freed(spa, bp);
1394 dsl_scan_freed(spa, bp);
1395
1396 if (BP_IS_GANG(bp) ||
1397 BP_GET_DEDUP(bp) ||
1398 brt_maybe_exists(spa, bp)) {
1399 /*
1400 * GANG, DEDUP and BRT blocks can induce a read (for the gang
1401 * block header, the DDT or the BRT), so issue them
1402 * asynchronously so that this thread is not tied up.
1403 */
1404 enum zio_stage stage =
1405 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
1406
1407 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1408 BP_GET_PSIZE(bp), NULL, NULL,
1409 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1410 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
1411 } else {
1412 metaslab_free(spa, bp, txg, B_FALSE);
1413 return (NULL);
1414 }
1415 }
1416
1417 zio_t *
zio_claim(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_done_func_t * done,void * private,zio_flag_t flags)1418 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1419 zio_done_func_t *done, void *private, zio_flag_t flags)
1420 {
1421 zio_t *zio;
1422
1423 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
1424 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1425
1426 if (BP_IS_EMBEDDED(bp))
1427 return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1428
1429 /*
1430 * A claim is an allocation of a specific block. Claims are needed
1431 * to support immediate writes in the intent log. The issue is that
1432 * immediate writes contain committed data, but in a txg that was
1433 * *not* committed. Upon opening the pool after an unclean shutdown,
1434 * the intent log claims all blocks that contain immediate write data
1435 * so that the SPA knows they're in use.
1436 *
1437 * All claims *must* be resolved in the first txg -- before the SPA
1438 * starts allocating blocks -- so that nothing is allocated twice.
1439 * If txg == 0 we just verify that the block is claimable.
1440 */
1441 ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <,
1442 spa_min_claim_txg(spa));
1443 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
1444 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
1445
1446 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1447 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1448 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
1449 ASSERT0(zio->io_queued_timestamp);
1450
1451 return (zio);
1452 }
1453
1454 zio_t *
zio_trim(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,enum trim_flag trim_flags)1455 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1456 zio_done_func_t *done, void *private, zio_priority_t priority,
1457 zio_flag_t flags, enum trim_flag trim_flags)
1458 {
1459 zio_t *zio;
1460
1461 ASSERT0(vd->vdev_children);
1462 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1463 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1464 ASSERT3U(size, !=, 0);
1465
1466 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1467 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1468 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1469 zio->io_trim_flags = trim_flags;
1470
1471 return (zio);
1472 }
1473
1474 zio_t *
zio_read_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1475 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1476 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1477 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1478 {
1479 zio_t *zio;
1480
1481 ASSERT(vd->vdev_children == 0);
1482 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1483 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1484 ASSERT3U(offset + size, <=, vd->vdev_psize);
1485
1486 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1487 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1488 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
1489
1490 zio->io_prop.zp_checksum = checksum;
1491
1492 return (zio);
1493 }
1494
1495 zio_t *
zio_write_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1496 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1497 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1498 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1499 {
1500 zio_t *zio;
1501
1502 ASSERT(vd->vdev_children == 0);
1503 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1504 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1505 ASSERT3U(offset + size, <=, vd->vdev_psize);
1506
1507 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1508 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1509 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
1510
1511 zio->io_prop.zp_checksum = checksum;
1512
1513 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
1514 /*
1515 * zec checksums are necessarily destructive -- they modify
1516 * the end of the write buffer to hold the verifier/checksum.
1517 * Therefore, we must make a local copy in case the data is
1518 * being written to multiple places in parallel.
1519 */
1520 abd_t *wbuf = abd_alloc_sametype(data, size);
1521 abd_copy(wbuf, data, size);
1522
1523 zio_push_transform(zio, wbuf, size, size, NULL);
1524 }
1525
1526 return (zio);
1527 }
1528
1529 /*
1530 * Create a child I/O to do some work for us.
1531 */
1532 zio_t *
zio_vdev_child_io(zio_t * pio,blkptr_t * bp,vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,int type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1533 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
1534 abd_t *data, uint64_t size, int type, zio_priority_t priority,
1535 zio_flag_t flags, zio_done_func_t *done, void *private)
1536 {
1537 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1538 zio_t *zio;
1539
1540 /*
1541 * vdev child I/Os do not propagate their error to the parent.
1542 * Therefore, for correct operation the caller *must* check for
1543 * and handle the error in the child i/o's done callback.
1544 * The only exceptions are i/os that we don't care about
1545 * (OPTIONAL or REPAIR).
1546 */
1547 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1548 done != NULL);
1549
1550 if (type == ZIO_TYPE_READ && bp != NULL) {
1551 /*
1552 * If we have the bp, then the child should perform the
1553 * checksum and the parent need not. This pushes error
1554 * detection as close to the leaves as possible and
1555 * eliminates redundant checksums in the interior nodes.
1556 */
1557 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1558 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1559 }
1560
1561 if (vd->vdev_ops->vdev_op_leaf) {
1562 ASSERT0(vd->vdev_children);
1563 offset += VDEV_LABEL_START_SIZE;
1564 }
1565
1566 flags |= ZIO_VDEV_CHILD_FLAGS(pio);
1567
1568 /*
1569 * If we've decided to do a repair, the write is not speculative --
1570 * even if the original read was.
1571 */
1572 if (flags & ZIO_FLAG_IO_REPAIR)
1573 flags &= ~ZIO_FLAG_SPECULATIVE;
1574
1575 /*
1576 * If we're creating a child I/O that is not associated with a
1577 * top-level vdev, then the child zio is not an allocating I/O.
1578 * If this is a retried I/O then we ignore it since we will
1579 * have already processed the original allocating I/O.
1580 */
1581 if (flags & ZIO_FLAG_IO_ALLOCATING &&
1582 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
1583 ASSERT(pio->io_metaslab_class != NULL);
1584 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
1585 ASSERT(type == ZIO_TYPE_WRITE);
1586 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1587 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1588 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1589 pio->io_child_type == ZIO_CHILD_GANG);
1590
1591 flags &= ~ZIO_FLAG_IO_ALLOCATING;
1592 }
1593
1594 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1595 done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1596 ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1597 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1598
1599 return (zio);
1600 }
1601
1602 zio_t *
zio_vdev_delegated_io(vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,zio_type_t type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1603 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
1604 zio_type_t type, zio_priority_t priority, zio_flag_t flags,
1605 zio_done_func_t *done, void *private)
1606 {
1607 zio_t *zio;
1608
1609 ASSERT(vd->vdev_ops->vdev_op_leaf);
1610
1611 zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1612 data, size, size, done, private, type, priority,
1613 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1614 vd, offset, NULL,
1615 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1616
1617 return (zio);
1618 }
1619
1620
1621 /*
1622 * Send a flush command to the given vdev. Unlike most zio creation functions,
1623 * the flush zios are issued immediately. You can wait on pio to pause until
1624 * the flushes complete.
1625 */
1626 void
zio_flush(zio_t * pio,vdev_t * vd)1627 zio_flush(zio_t *pio, vdev_t *vd)
1628 {
1629 const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
1630 ZIO_FLAG_DONT_RETRY;
1631
1632 if (vd->vdev_nowritecache)
1633 return;
1634
1635 if (vd->vdev_children == 0) {
1636 zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0,
1637 NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0,
1638 NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE));
1639 } else {
1640 for (uint64_t c = 0; c < vd->vdev_children; c++)
1641 zio_flush(pio, vd->vdev_child[c]);
1642 }
1643 }
1644
1645 void
zio_shrink(zio_t * zio,uint64_t size)1646 zio_shrink(zio_t *zio, uint64_t size)
1647 {
1648 ASSERT3P(zio->io_executor, ==, NULL);
1649 ASSERT3U(zio->io_orig_size, ==, zio->io_size);
1650 ASSERT3U(size, <=, zio->io_size);
1651
1652 /*
1653 * We don't shrink for raidz because of problems with the
1654 * reconstruction when reading back less than the block size.
1655 * Note, BP_IS_RAIDZ() assumes no compression.
1656 */
1657 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1658 if (!BP_IS_RAIDZ(zio->io_bp)) {
1659 /* we are not doing a raw write */
1660 ASSERT3U(zio->io_size, ==, zio->io_lsize);
1661 zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1662 }
1663 }
1664
1665 /*
1666 * Round provided allocation size up to a value that can be allocated
1667 * by at least some vdev(s) in the pool with minimum or no additional
1668 * padding and without extra space usage on others
1669 */
1670 static uint64_t
zio_roundup_alloc_size(spa_t * spa,uint64_t size)1671 zio_roundup_alloc_size(spa_t *spa, uint64_t size)
1672 {
1673 if (size > spa->spa_min_alloc)
1674 return (roundup(size, spa->spa_gcd_alloc));
1675 return (spa->spa_min_alloc);
1676 }
1677
1678 /*
1679 * ==========================================================================
1680 * Prepare to read and write logical blocks
1681 * ==========================================================================
1682 */
1683
1684 static zio_t *
zio_read_bp_init(zio_t * zio)1685 zio_read_bp_init(zio_t *zio)
1686 {
1687 blkptr_t *bp = zio->io_bp;
1688 uint64_t psize =
1689 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1690
1691 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1692
1693 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1694 zio->io_child_type == ZIO_CHILD_LOGICAL &&
1695 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1696 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1697 psize, psize, zio_decompress);
1698 }
1699
1700 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1701 BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1702 zio->io_child_type == ZIO_CHILD_LOGICAL) {
1703 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1704 psize, psize, zio_decrypt);
1705 }
1706
1707 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1708 int psize = BPE_GET_PSIZE(bp);
1709 void *data = abd_borrow_buf(zio->io_abd, psize);
1710
1711 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1712 decode_embedded_bp_compressed(bp, data);
1713 abd_return_buf_copy(zio->io_abd, data, psize);
1714 } else {
1715 ASSERT(!BP_IS_EMBEDDED(bp));
1716 }
1717
1718 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1719 zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1720
1721 return (zio);
1722 }
1723
1724 static zio_t *
zio_write_bp_init(zio_t * zio)1725 zio_write_bp_init(zio_t *zio)
1726 {
1727 if (!IO_IS_ALLOCATING(zio))
1728 return (zio);
1729
1730 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1731
1732 if (zio->io_bp_override) {
1733 blkptr_t *bp = zio->io_bp;
1734 zio_prop_t *zp = &zio->io_prop;
1735
1736 ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg);
1737
1738 *bp = *zio->io_bp_override;
1739 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1740
1741 if (zp->zp_brtwrite)
1742 return (zio);
1743
1744 ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
1745
1746 if (BP_IS_EMBEDDED(bp))
1747 return (zio);
1748
1749 /*
1750 * If we've been overridden and nopwrite is set then
1751 * set the flag accordingly to indicate that a nopwrite
1752 * has already occurred.
1753 */
1754 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1755 ASSERT(!zp->zp_dedup);
1756 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1757 zio->io_flags |= ZIO_FLAG_NOPWRITE;
1758 return (zio);
1759 }
1760
1761 ASSERT(!zp->zp_nopwrite);
1762
1763 if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1764 return (zio);
1765
1766 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1767 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1768
1769 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1770 !zp->zp_encrypt) {
1771 BP_SET_DEDUP(bp, 1);
1772 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1773 return (zio);
1774 }
1775
1776 /*
1777 * We were unable to handle this as an override bp, treat
1778 * it as a regular write I/O.
1779 */
1780 zio->io_bp_override = NULL;
1781 *bp = zio->io_bp_orig;
1782 zio->io_pipeline = zio->io_orig_pipeline;
1783 }
1784
1785 return (zio);
1786 }
1787
1788 static zio_t *
zio_write_compress(zio_t * zio)1789 zio_write_compress(zio_t *zio)
1790 {
1791 spa_t *spa = zio->io_spa;
1792 zio_prop_t *zp = &zio->io_prop;
1793 enum zio_compress compress = zp->zp_compress;
1794 blkptr_t *bp = zio->io_bp;
1795 uint64_t lsize = zio->io_lsize;
1796 uint64_t psize = zio->io_size;
1797 uint32_t pass = 1;
1798
1799 /*
1800 * If our children haven't all reached the ready stage,
1801 * wait for them and then repeat this pipeline stage.
1802 */
1803 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1804 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
1805 return (NULL);
1806 }
1807
1808 if (!IO_IS_ALLOCATING(zio))
1809 return (zio);
1810
1811 if (zio->io_children_ready != NULL) {
1812 /*
1813 * Now that all our children are ready, run the callback
1814 * associated with this zio in case it wants to modify the
1815 * data to be written.
1816 */
1817 ASSERT3U(zp->zp_level, >, 0);
1818 zio->io_children_ready(zio);
1819 }
1820
1821 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1822 ASSERT(zio->io_bp_override == NULL);
1823
1824 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) {
1825 /*
1826 * We're rewriting an existing block, which means we're
1827 * working on behalf of spa_sync(). For spa_sync() to
1828 * converge, it must eventually be the case that we don't
1829 * have to allocate new blocks. But compression changes
1830 * the blocksize, which forces a reallocate, and makes
1831 * convergence take longer. Therefore, after the first
1832 * few passes, stop compressing to ensure convergence.
1833 */
1834 pass = spa_sync_pass(spa);
1835
1836 ASSERT(zio->io_txg == spa_syncing_txg(spa));
1837 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1838 ASSERT(!BP_GET_DEDUP(bp));
1839
1840 if (pass >= zfs_sync_pass_dont_compress)
1841 compress = ZIO_COMPRESS_OFF;
1842
1843 /* Make sure someone doesn't change their mind on overwrites */
1844 ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
1845 MIN(zp->zp_copies, spa_max_replication(spa))
1846 == BP_GET_NDVAS(bp));
1847 }
1848
1849 /* If it's a compressed write that is not raw, compress the buffer. */
1850 if (compress != ZIO_COMPRESS_OFF &&
1851 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1852 void *cbuf = NULL;
1853 psize = zio_compress_data(compress, zio->io_abd, &cbuf, lsize,
1854 zp->zp_complevel);
1855 if (psize == 0) {
1856 compress = ZIO_COMPRESS_OFF;
1857 } else if (psize >= lsize) {
1858 compress = ZIO_COMPRESS_OFF;
1859 if (cbuf != NULL)
1860 zio_buf_free(cbuf, lsize);
1861 } else if (!zp->zp_dedup && !zp->zp_encrypt &&
1862 psize <= BPE_PAYLOAD_SIZE &&
1863 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1864 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1865 encode_embedded_bp_compressed(bp,
1866 cbuf, compress, lsize, psize);
1867 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1868 BP_SET_TYPE(bp, zio->io_prop.zp_type);
1869 BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1870 zio_buf_free(cbuf, lsize);
1871 BP_SET_LOGICAL_BIRTH(bp, zio->io_txg);
1872 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1873 ASSERT(spa_feature_is_active(spa,
1874 SPA_FEATURE_EMBEDDED_DATA));
1875 return (zio);
1876 } else {
1877 /*
1878 * Round compressed size up to the minimum allocation
1879 * size of the smallest-ashift device, and zero the
1880 * tail. This ensures that the compressed size of the
1881 * BP (and thus compressratio property) are correct,
1882 * in that we charge for the padding used to fill out
1883 * the last sector.
1884 */
1885 size_t rounded = (size_t)zio_roundup_alloc_size(spa,
1886 psize);
1887 if (rounded >= lsize) {
1888 compress = ZIO_COMPRESS_OFF;
1889 zio_buf_free(cbuf, lsize);
1890 psize = lsize;
1891 } else {
1892 abd_t *cdata = abd_get_from_buf(cbuf, lsize);
1893 abd_take_ownership_of_buf(cdata, B_TRUE);
1894 abd_zero_off(cdata, psize, rounded - psize);
1895 psize = rounded;
1896 zio_push_transform(zio, cdata,
1897 psize, lsize, NULL);
1898 }
1899 }
1900
1901 /*
1902 * We were unable to handle this as an override bp, treat
1903 * it as a regular write I/O.
1904 */
1905 zio->io_bp_override = NULL;
1906 *bp = zio->io_bp_orig;
1907 zio->io_pipeline = zio->io_orig_pipeline;
1908
1909 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
1910 zp->zp_type == DMU_OT_DNODE) {
1911 /*
1912 * The DMU actually relies on the zio layer's compression
1913 * to free metadnode blocks that have had all contained
1914 * dnodes freed. As a result, even when doing a raw
1915 * receive, we must check whether the block can be compressed
1916 * to a hole.
1917 */
1918 psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
1919 zio->io_abd, NULL, lsize, zp->zp_complevel);
1920 if (psize == 0 || psize >= lsize)
1921 compress = ZIO_COMPRESS_OFF;
1922 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
1923 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
1924 /*
1925 * If we are raw receiving an encrypted dataset we should not
1926 * take this codepath because it will change the on-disk block
1927 * and decryption will fail.
1928 */
1929 size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
1930 lsize);
1931
1932 if (rounded != psize) {
1933 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
1934 abd_zero_off(cdata, psize, rounded - psize);
1935 abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
1936 psize = rounded;
1937 zio_push_transform(zio, cdata,
1938 psize, rounded, NULL);
1939 }
1940 } else {
1941 ASSERT3U(psize, !=, 0);
1942 }
1943
1944 /*
1945 * The final pass of spa_sync() must be all rewrites, but the first
1946 * few passes offer a trade-off: allocating blocks defers convergence,
1947 * but newly allocated blocks are sequential, so they can be written
1948 * to disk faster. Therefore, we allow the first few passes of
1949 * spa_sync() to allocate new blocks, but force rewrites after that.
1950 * There should only be a handful of blocks after pass 1 in any case.
1951 */
1952 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg &&
1953 BP_GET_PSIZE(bp) == psize &&
1954 pass >= zfs_sync_pass_rewrite) {
1955 VERIFY3U(psize, !=, 0);
1956 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
1957
1958 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1959 zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1960 } else {
1961 BP_ZERO(bp);
1962 zio->io_pipeline = ZIO_WRITE_PIPELINE;
1963 }
1964
1965 if (psize == 0) {
1966 if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 &&
1967 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1968 BP_SET_LSIZE(bp, lsize);
1969 BP_SET_TYPE(bp, zp->zp_type);
1970 BP_SET_LEVEL(bp, zp->zp_level);
1971 BP_SET_BIRTH(bp, zio->io_txg, 0);
1972 }
1973 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1974 } else {
1975 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1976 BP_SET_LSIZE(bp, lsize);
1977 BP_SET_TYPE(bp, zp->zp_type);
1978 BP_SET_LEVEL(bp, zp->zp_level);
1979 BP_SET_PSIZE(bp, psize);
1980 BP_SET_COMPRESS(bp, compress);
1981 BP_SET_CHECKSUM(bp, zp->zp_checksum);
1982 BP_SET_DEDUP(bp, zp->zp_dedup);
1983 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
1984 if (zp->zp_dedup) {
1985 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1986 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1987 ASSERT(!zp->zp_encrypt ||
1988 DMU_OT_IS_ENCRYPTED(zp->zp_type));
1989 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
1990 }
1991 if (zp->zp_nopwrite) {
1992 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1993 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1994 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
1995 }
1996 }
1997 return (zio);
1998 }
1999
2000 static zio_t *
zio_free_bp_init(zio_t * zio)2001 zio_free_bp_init(zio_t *zio)
2002 {
2003 blkptr_t *bp = zio->io_bp;
2004
2005 if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
2006 if (BP_GET_DEDUP(bp))
2007 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
2008 }
2009
2010 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
2011
2012 return (zio);
2013 }
2014
2015 /*
2016 * ==========================================================================
2017 * Execute the I/O pipeline
2018 * ==========================================================================
2019 */
2020
2021 static void
zio_taskq_dispatch(zio_t * zio,zio_taskq_type_t q,boolean_t cutinline)2022 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
2023 {
2024 spa_t *spa = zio->io_spa;
2025 zio_type_t t = zio->io_type;
2026
2027 /*
2028 * If we're a config writer or a probe, the normal issue and
2029 * interrupt threads may all be blocked waiting for the config lock.
2030 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
2031 */
2032 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
2033 t = ZIO_TYPE_NULL;
2034
2035 /*
2036 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
2037 */
2038 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
2039 t = ZIO_TYPE_NULL;
2040
2041 /*
2042 * If this is a high priority I/O, then use the high priority taskq if
2043 * available or cut the line otherwise.
2044 */
2045 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) {
2046 if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
2047 q++;
2048 else
2049 cutinline = B_TRUE;
2050 }
2051
2052 ASSERT3U(q, <, ZIO_TASKQ_TYPES);
2053
2054 spa_taskq_dispatch(spa, t, q, zio_execute, zio, cutinline);
2055 }
2056
2057 static boolean_t
zio_taskq_member(zio_t * zio,zio_taskq_type_t q)2058 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
2059 {
2060 spa_t *spa = zio->io_spa;
2061
2062 taskq_t *tq = taskq_of_curthread();
2063
2064 for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
2065 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
2066 uint_t i;
2067 for (i = 0; i < tqs->stqs_count; i++) {
2068 if (tqs->stqs_taskq[i] == tq)
2069 return (B_TRUE);
2070 }
2071 }
2072
2073 return (B_FALSE);
2074 }
2075
2076 static zio_t *
zio_issue_async(zio_t * zio)2077 zio_issue_async(zio_t *zio)
2078 {
2079 ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio));
2080 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2081 return (NULL);
2082 }
2083
2084 void
zio_interrupt(void * zio)2085 zio_interrupt(void *zio)
2086 {
2087 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
2088 }
2089
2090 void
zio_delay_interrupt(zio_t * zio)2091 zio_delay_interrupt(zio_t *zio)
2092 {
2093 /*
2094 * The timeout_generic() function isn't defined in userspace, so
2095 * rather than trying to implement the function, the zio delay
2096 * functionality has been disabled for userspace builds.
2097 */
2098
2099 #ifdef _KERNEL
2100 /*
2101 * If io_target_timestamp is zero, then no delay has been registered
2102 * for this IO, thus jump to the end of this function and "skip" the
2103 * delay; issuing it directly to the zio layer.
2104 */
2105 if (zio->io_target_timestamp != 0) {
2106 hrtime_t now = gethrtime();
2107
2108 if (now >= zio->io_target_timestamp) {
2109 /*
2110 * This IO has already taken longer than the target
2111 * delay to complete, so we don't want to delay it
2112 * any longer; we "miss" the delay and issue it
2113 * directly to the zio layer. This is likely due to
2114 * the target latency being set to a value less than
2115 * the underlying hardware can satisfy (e.g. delay
2116 * set to 1ms, but the disks take 10ms to complete an
2117 * IO request).
2118 */
2119
2120 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
2121 hrtime_t, now);
2122
2123 zio_interrupt(zio);
2124 } else {
2125 taskqid_t tid;
2126 hrtime_t diff = zio->io_target_timestamp - now;
2127 clock_t expire_at_tick = ddi_get_lbolt() +
2128 NSEC_TO_TICK(diff);
2129
2130 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
2131 hrtime_t, now, hrtime_t, diff);
2132
2133 if (NSEC_TO_TICK(diff) == 0) {
2134 /* Our delay is less than a jiffy - just spin */
2135 zfs_sleep_until(zio->io_target_timestamp);
2136 zio_interrupt(zio);
2137 } else {
2138 /*
2139 * Use taskq_dispatch_delay() in the place of
2140 * OpenZFS's timeout_generic().
2141 */
2142 tid = taskq_dispatch_delay(system_taskq,
2143 zio_interrupt, zio, TQ_NOSLEEP,
2144 expire_at_tick);
2145 if (tid == TASKQID_INVALID) {
2146 /*
2147 * Couldn't allocate a task. Just
2148 * finish the zio without a delay.
2149 */
2150 zio_interrupt(zio);
2151 }
2152 }
2153 }
2154 return;
2155 }
2156 #endif
2157 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
2158 zio_interrupt(zio);
2159 }
2160
2161 static void
zio_deadman_impl(zio_t * pio,int ziodepth)2162 zio_deadman_impl(zio_t *pio, int ziodepth)
2163 {
2164 zio_t *cio, *cio_next;
2165 zio_link_t *zl = NULL;
2166 vdev_t *vd = pio->io_vd;
2167
2168 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
2169 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
2170 zbookmark_phys_t *zb = &pio->io_bookmark;
2171 uint64_t delta = gethrtime() - pio->io_timestamp;
2172 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
2173
2174 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
2175 "delta=%llu queued=%llu io=%llu "
2176 "path=%s "
2177 "last=%llu type=%d "
2178 "priority=%d flags=0x%llx stage=0x%x "
2179 "pipeline=0x%x pipeline-trace=0x%x "
2180 "objset=%llu object=%llu "
2181 "level=%llu blkid=%llu "
2182 "offset=%llu size=%llu "
2183 "error=%d",
2184 ziodepth, pio, pio->io_timestamp,
2185 (u_longlong_t)delta, pio->io_delta, pio->io_delay,
2186 vd ? vd->vdev_path : "NULL",
2187 vq ? vq->vq_io_complete_ts : 0, pio->io_type,
2188 pio->io_priority, (u_longlong_t)pio->io_flags,
2189 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
2190 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
2191 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
2192 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
2193 pio->io_error);
2194 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
2195 pio->io_spa, vd, zb, pio, 0);
2196
2197 if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
2198 taskq_empty_ent(&pio->io_tqent)) {
2199 zio_interrupt(pio);
2200 }
2201 }
2202
2203 mutex_enter(&pio->io_lock);
2204 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2205 cio_next = zio_walk_children(pio, &zl);
2206 zio_deadman_impl(cio, ziodepth + 1);
2207 }
2208 mutex_exit(&pio->io_lock);
2209 }
2210
2211 /*
2212 * Log the critical information describing this zio and all of its children
2213 * using the zfs_dbgmsg() interface then post deadman event for the ZED.
2214 */
2215 void
zio_deadman(zio_t * pio,const char * tag)2216 zio_deadman(zio_t *pio, const char *tag)
2217 {
2218 spa_t *spa = pio->io_spa;
2219 char *name = spa_name(spa);
2220
2221 if (!zfs_deadman_enabled || spa_suspended(spa))
2222 return;
2223
2224 zio_deadman_impl(pio, 0);
2225
2226 switch (spa_get_deadman_failmode(spa)) {
2227 case ZIO_FAILURE_MODE_WAIT:
2228 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
2229 break;
2230
2231 case ZIO_FAILURE_MODE_CONTINUE:
2232 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
2233 break;
2234
2235 case ZIO_FAILURE_MODE_PANIC:
2236 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
2237 break;
2238 }
2239 }
2240
2241 /*
2242 * Execute the I/O pipeline until one of the following occurs:
2243 * (1) the I/O completes; (2) the pipeline stalls waiting for
2244 * dependent child I/Os; (3) the I/O issues, so we're waiting
2245 * for an I/O completion interrupt; (4) the I/O is delegated by
2246 * vdev-level caching or aggregation; (5) the I/O is deferred
2247 * due to vdev-level queueing; (6) the I/O is handed off to
2248 * another thread. In all cases, the pipeline stops whenever
2249 * there's no CPU work; it never burns a thread in cv_wait_io().
2250 *
2251 * There's no locking on io_stage because there's no legitimate way
2252 * for multiple threads to be attempting to process the same I/O.
2253 */
2254 static zio_pipe_stage_t *zio_pipeline[];
2255
2256 /*
2257 * zio_execute() is a wrapper around the static function
2258 * __zio_execute() so that we can force __zio_execute() to be
2259 * inlined. This reduces stack overhead which is important
2260 * because __zio_execute() is called recursively in several zio
2261 * code paths. zio_execute() itself cannot be inlined because
2262 * it is externally visible.
2263 */
2264 void
zio_execute(void * zio)2265 zio_execute(void *zio)
2266 {
2267 fstrans_cookie_t cookie;
2268
2269 cookie = spl_fstrans_mark();
2270 __zio_execute(zio);
2271 spl_fstrans_unmark(cookie);
2272 }
2273
2274 /*
2275 * Used to determine if in the current context the stack is sized large
2276 * enough to allow zio_execute() to be called recursively. A minimum
2277 * stack size of 16K is required to avoid needing to re-dispatch the zio.
2278 */
2279 static boolean_t
zio_execute_stack_check(zio_t * zio)2280 zio_execute_stack_check(zio_t *zio)
2281 {
2282 #if !defined(HAVE_LARGE_STACKS)
2283 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
2284
2285 /* Executing in txg_sync_thread() context. */
2286 if (dp && curthread == dp->dp_tx.tx_sync_thread)
2287 return (B_TRUE);
2288
2289 /* Pool initialization outside of zio_taskq context. */
2290 if (dp && spa_is_initializing(dp->dp_spa) &&
2291 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
2292 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
2293 return (B_TRUE);
2294 #else
2295 (void) zio;
2296 #endif /* HAVE_LARGE_STACKS */
2297
2298 return (B_FALSE);
2299 }
2300
2301 __attribute__((always_inline))
2302 static inline void
__zio_execute(zio_t * zio)2303 __zio_execute(zio_t *zio)
2304 {
2305 ASSERT3U(zio->io_queued_timestamp, >, 0);
2306
2307 while (zio->io_stage < ZIO_STAGE_DONE) {
2308 enum zio_stage pipeline = zio->io_pipeline;
2309 enum zio_stage stage = zio->io_stage;
2310
2311 zio->io_executor = curthread;
2312
2313 ASSERT(!MUTEX_HELD(&zio->io_lock));
2314 ASSERT(ISP2(stage));
2315 ASSERT(zio->io_stall == NULL);
2316
2317 do {
2318 stage <<= 1;
2319 } while ((stage & pipeline) == 0);
2320
2321 ASSERT(stage <= ZIO_STAGE_DONE);
2322
2323 /*
2324 * If we are in interrupt context and this pipeline stage
2325 * will grab a config lock that is held across I/O,
2326 * or may wait for an I/O that needs an interrupt thread
2327 * to complete, issue async to avoid deadlock.
2328 *
2329 * For VDEV_IO_START, we cut in line so that the io will
2330 * be sent to disk promptly.
2331 */
2332 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
2333 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
2334 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2335 zio_requeue_io_start_cut_in_line : B_FALSE;
2336 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2337 return;
2338 }
2339
2340 /*
2341 * If the current context doesn't have large enough stacks
2342 * the zio must be issued asynchronously to prevent overflow.
2343 */
2344 if (zio_execute_stack_check(zio)) {
2345 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2346 zio_requeue_io_start_cut_in_line : B_FALSE;
2347 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2348 return;
2349 }
2350
2351 zio->io_stage = stage;
2352 zio->io_pipeline_trace |= zio->io_stage;
2353
2354 /*
2355 * The zio pipeline stage returns the next zio to execute
2356 * (typically the same as this one), or NULL if we should
2357 * stop.
2358 */
2359 zio = zio_pipeline[highbit64(stage) - 1](zio);
2360
2361 if (zio == NULL)
2362 return;
2363 }
2364 }
2365
2366
2367 /*
2368 * ==========================================================================
2369 * Initiate I/O, either sync or async
2370 * ==========================================================================
2371 */
2372 int
zio_wait(zio_t * zio)2373 zio_wait(zio_t *zio)
2374 {
2375 /*
2376 * Some routines, like zio_free_sync(), may return a NULL zio
2377 * to avoid the performance overhead of creating and then destroying
2378 * an unneeded zio. For the callers' simplicity, we accept a NULL
2379 * zio and ignore it.
2380 */
2381 if (zio == NULL)
2382 return (0);
2383
2384 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
2385 int error;
2386
2387 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
2388 ASSERT3P(zio->io_executor, ==, NULL);
2389
2390 zio->io_waiter = curthread;
2391 ASSERT0(zio->io_queued_timestamp);
2392 zio->io_queued_timestamp = gethrtime();
2393
2394 if (zio->io_type == ZIO_TYPE_WRITE) {
2395 spa_select_allocator(zio);
2396 }
2397 __zio_execute(zio);
2398
2399 mutex_enter(&zio->io_lock);
2400 while (zio->io_executor != NULL) {
2401 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
2402 ddi_get_lbolt() + timeout);
2403
2404 if (zfs_deadman_enabled && error == -1 &&
2405 gethrtime() - zio->io_queued_timestamp >
2406 spa_deadman_ziotime(zio->io_spa)) {
2407 mutex_exit(&zio->io_lock);
2408 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
2409 zio_deadman(zio, FTAG);
2410 mutex_enter(&zio->io_lock);
2411 }
2412 }
2413 mutex_exit(&zio->io_lock);
2414
2415 error = zio->io_error;
2416 zio_destroy(zio);
2417
2418 return (error);
2419 }
2420
2421 void
zio_nowait(zio_t * zio)2422 zio_nowait(zio_t *zio)
2423 {
2424 /*
2425 * See comment in zio_wait().
2426 */
2427 if (zio == NULL)
2428 return;
2429
2430 ASSERT3P(zio->io_executor, ==, NULL);
2431
2432 if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
2433 list_is_empty(&zio->io_parent_list)) {
2434 zio_t *pio;
2435
2436 /*
2437 * This is a logical async I/O with no parent to wait for it.
2438 * We add it to the spa_async_root_zio "Godfather" I/O which
2439 * will ensure they complete prior to unloading the pool.
2440 */
2441 spa_t *spa = zio->io_spa;
2442 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
2443
2444 zio_add_child(pio, zio);
2445 }
2446
2447 ASSERT0(zio->io_queued_timestamp);
2448 zio->io_queued_timestamp = gethrtime();
2449 if (zio->io_type == ZIO_TYPE_WRITE) {
2450 spa_select_allocator(zio);
2451 }
2452 __zio_execute(zio);
2453 }
2454
2455 /*
2456 * ==========================================================================
2457 * Reexecute, cancel, or suspend/resume failed I/O
2458 * ==========================================================================
2459 */
2460
2461 static void
zio_reexecute(void * arg)2462 zio_reexecute(void *arg)
2463 {
2464 zio_t *pio = arg;
2465 zio_t *cio, *cio_next, *gio;
2466
2467 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
2468 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
2469 ASSERT(pio->io_gang_leader == NULL);
2470 ASSERT(pio->io_gang_tree == NULL);
2471
2472 mutex_enter(&pio->io_lock);
2473 pio->io_flags = pio->io_orig_flags;
2474 pio->io_stage = pio->io_orig_stage;
2475 pio->io_pipeline = pio->io_orig_pipeline;
2476 pio->io_reexecute = 0;
2477 pio->io_flags |= ZIO_FLAG_REEXECUTED;
2478 pio->io_pipeline_trace = 0;
2479 pio->io_error = 0;
2480 pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) ||
2481 (pio->io_pipeline & ZIO_STAGE_READY) == 0;
2482 pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE);
2483 zio_link_t *zl = NULL;
2484 while ((gio = zio_walk_parents(pio, &zl)) != NULL) {
2485 for (int w = 0; w < ZIO_WAIT_TYPES; w++) {
2486 gio->io_children[pio->io_child_type][w] +=
2487 !pio->io_state[w];
2488 }
2489 }
2490 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
2491 pio->io_child_error[c] = 0;
2492
2493 if (IO_IS_ALLOCATING(pio))
2494 BP_ZERO(pio->io_bp);
2495
2496 /*
2497 * As we reexecute pio's children, new children could be created.
2498 * New children go to the head of pio's io_child_list, however,
2499 * so we will (correctly) not reexecute them. The key is that
2500 * the remainder of pio's io_child_list, from 'cio_next' onward,
2501 * cannot be affected by any side effects of reexecuting 'cio'.
2502 */
2503 zl = NULL;
2504 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2505 cio_next = zio_walk_children(pio, &zl);
2506 mutex_exit(&pio->io_lock);
2507 zio_reexecute(cio);
2508 mutex_enter(&pio->io_lock);
2509 }
2510 mutex_exit(&pio->io_lock);
2511
2512 /*
2513 * Now that all children have been reexecuted, execute the parent.
2514 * We don't reexecute "The Godfather" I/O here as it's the
2515 * responsibility of the caller to wait on it.
2516 */
2517 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2518 pio->io_queued_timestamp = gethrtime();
2519 __zio_execute(pio);
2520 }
2521 }
2522
2523 void
zio_suspend(spa_t * spa,zio_t * zio,zio_suspend_reason_t reason)2524 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
2525 {
2526 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2527 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2528 "failure and the failure mode property for this pool "
2529 "is set to panic.", spa_name(spa));
2530
2531 if (reason != ZIO_SUSPEND_MMP) {
2532 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable "
2533 "I/O failure and has been suspended.\n", spa_name(spa));
2534 }
2535
2536 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
2537 NULL, NULL, 0);
2538
2539 mutex_enter(&spa->spa_suspend_lock);
2540
2541 if (spa->spa_suspend_zio_root == NULL)
2542 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2543 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2544 ZIO_FLAG_GODFATHER);
2545
2546 spa->spa_suspended = reason;
2547
2548 if (zio != NULL) {
2549 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
2550 ASSERT(zio != spa->spa_suspend_zio_root);
2551 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2552 ASSERT(zio_unique_parent(zio) == NULL);
2553 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2554 zio_add_child(spa->spa_suspend_zio_root, zio);
2555 }
2556
2557 mutex_exit(&spa->spa_suspend_lock);
2558 }
2559
2560 int
zio_resume(spa_t * spa)2561 zio_resume(spa_t *spa)
2562 {
2563 zio_t *pio;
2564
2565 /*
2566 * Reexecute all previously suspended i/o.
2567 */
2568 mutex_enter(&spa->spa_suspend_lock);
2569 spa->spa_suspended = ZIO_SUSPEND_NONE;
2570 cv_broadcast(&spa->spa_suspend_cv);
2571 pio = spa->spa_suspend_zio_root;
2572 spa->spa_suspend_zio_root = NULL;
2573 mutex_exit(&spa->spa_suspend_lock);
2574
2575 if (pio == NULL)
2576 return (0);
2577
2578 zio_reexecute(pio);
2579 return (zio_wait(pio));
2580 }
2581
2582 void
zio_resume_wait(spa_t * spa)2583 zio_resume_wait(spa_t *spa)
2584 {
2585 mutex_enter(&spa->spa_suspend_lock);
2586 while (spa_suspended(spa))
2587 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2588 mutex_exit(&spa->spa_suspend_lock);
2589 }
2590
2591 /*
2592 * ==========================================================================
2593 * Gang blocks.
2594 *
2595 * A gang block is a collection of small blocks that looks to the DMU
2596 * like one large block. When zio_dva_allocate() cannot find a block
2597 * of the requested size, due to either severe fragmentation or the pool
2598 * being nearly full, it calls zio_write_gang_block() to construct the
2599 * block from smaller fragments.
2600 *
2601 * A gang block consists of a gang header (zio_gbh_phys_t) and up to
2602 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
2603 * an indirect block: it's an array of block pointers. It consumes
2604 * only one sector and hence is allocatable regardless of fragmentation.
2605 * The gang header's bps point to its gang members, which hold the data.
2606 *
2607 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2608 * as the verifier to ensure uniqueness of the SHA256 checksum.
2609 * Critically, the gang block bp's blk_cksum is the checksum of the data,
2610 * not the gang header. This ensures that data block signatures (needed for
2611 * deduplication) are independent of how the block is physically stored.
2612 *
2613 * Gang blocks can be nested: a gang member may itself be a gang block.
2614 * Thus every gang block is a tree in which root and all interior nodes are
2615 * gang headers, and the leaves are normal blocks that contain user data.
2616 * The root of the gang tree is called the gang leader.
2617 *
2618 * To perform any operation (read, rewrite, free, claim) on a gang block,
2619 * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2620 * in the io_gang_tree field of the original logical i/o by recursively
2621 * reading the gang leader and all gang headers below it. This yields
2622 * an in-core tree containing the contents of every gang header and the
2623 * bps for every constituent of the gang block.
2624 *
2625 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2626 * and invokes a callback on each bp. To free a gang block, zio_gang_issue()
2627 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2628 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2629 * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2630 * headers, since we already have those in io_gang_tree. zio_rewrite_gang()
2631 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2632 * of the gang header plus zio_checksum_compute() of the data to update the
2633 * gang header's blk_cksum as described above.
2634 *
2635 * The two-phase assemble/issue model solves the problem of partial failure --
2636 * what if you'd freed part of a gang block but then couldn't read the
2637 * gang header for another part? Assembling the entire gang tree first
2638 * ensures that all the necessary gang header I/O has succeeded before
2639 * starting the actual work of free, claim, or write. Once the gang tree
2640 * is assembled, free and claim are in-memory operations that cannot fail.
2641 *
2642 * In the event that a gang write fails, zio_dva_unallocate() walks the
2643 * gang tree to immediately free (i.e. insert back into the space map)
2644 * everything we've allocated. This ensures that we don't get ENOSPC
2645 * errors during repeated suspend/resume cycles due to a flaky device.
2646 *
2647 * Gang rewrites only happen during sync-to-convergence. If we can't assemble
2648 * the gang tree, we won't modify the block, so we can safely defer the free
2649 * (knowing that the block is still intact). If we *can* assemble the gang
2650 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2651 * each constituent bp and we can allocate a new block on the next sync pass.
2652 *
2653 * In all cases, the gang tree allows complete recovery from partial failure.
2654 * ==========================================================================
2655 */
2656
2657 static void
zio_gang_issue_func_done(zio_t * zio)2658 zio_gang_issue_func_done(zio_t *zio)
2659 {
2660 abd_free(zio->io_abd);
2661 }
2662
2663 static zio_t *
zio_read_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2664 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2665 uint64_t offset)
2666 {
2667 if (gn != NULL)
2668 return (pio);
2669
2670 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2671 BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2672 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2673 &pio->io_bookmark));
2674 }
2675
2676 static zio_t *
zio_rewrite_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2677 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2678 uint64_t offset)
2679 {
2680 zio_t *zio;
2681
2682 if (gn != NULL) {
2683 abd_t *gbh_abd =
2684 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2685 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2686 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
2687 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2688 &pio->io_bookmark);
2689 /*
2690 * As we rewrite each gang header, the pipeline will compute
2691 * a new gang block header checksum for it; but no one will
2692 * compute a new data checksum, so we do that here. The one
2693 * exception is the gang leader: the pipeline already computed
2694 * its data checksum because that stage precedes gang assembly.
2695 * (Presently, nothing actually uses interior data checksums;
2696 * this is just good hygiene.)
2697 */
2698 if (gn != pio->io_gang_leader->io_gang_tree) {
2699 abd_t *buf = abd_get_offset(data, offset);
2700
2701 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
2702 buf, BP_GET_PSIZE(bp));
2703
2704 abd_free(buf);
2705 }
2706 /*
2707 * If we are here to damage data for testing purposes,
2708 * leave the GBH alone so that we can detect the damage.
2709 */
2710 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2711 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
2712 } else {
2713 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2714 abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2715 zio_gang_issue_func_done, NULL, pio->io_priority,
2716 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2717 }
2718
2719 return (zio);
2720 }
2721
2722 static zio_t *
zio_free_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2723 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2724 uint64_t offset)
2725 {
2726 (void) gn, (void) data, (void) offset;
2727
2728 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2729 ZIO_GANG_CHILD_FLAGS(pio));
2730 if (zio == NULL) {
2731 zio = zio_null(pio, pio->io_spa,
2732 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
2733 }
2734 return (zio);
2735 }
2736
2737 static zio_t *
zio_claim_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2738 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2739 uint64_t offset)
2740 {
2741 (void) gn, (void) data, (void) offset;
2742 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2743 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2744 }
2745
2746 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2747 NULL,
2748 zio_read_gang,
2749 zio_rewrite_gang,
2750 zio_free_gang,
2751 zio_claim_gang,
2752 NULL
2753 };
2754
2755 static void zio_gang_tree_assemble_done(zio_t *zio);
2756
2757 static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t ** gnpp)2758 zio_gang_node_alloc(zio_gang_node_t **gnpp)
2759 {
2760 zio_gang_node_t *gn;
2761
2762 ASSERT(*gnpp == NULL);
2763
2764 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
2765 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2766 *gnpp = gn;
2767
2768 return (gn);
2769 }
2770
2771 static void
zio_gang_node_free(zio_gang_node_t ** gnpp)2772 zio_gang_node_free(zio_gang_node_t **gnpp)
2773 {
2774 zio_gang_node_t *gn = *gnpp;
2775
2776 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2777 ASSERT(gn->gn_child[g] == NULL);
2778
2779 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2780 kmem_free(gn, sizeof (*gn));
2781 *gnpp = NULL;
2782 }
2783
2784 static void
zio_gang_tree_free(zio_gang_node_t ** gnpp)2785 zio_gang_tree_free(zio_gang_node_t **gnpp)
2786 {
2787 zio_gang_node_t *gn = *gnpp;
2788
2789 if (gn == NULL)
2790 return;
2791
2792 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2793 zio_gang_tree_free(&gn->gn_child[g]);
2794
2795 zio_gang_node_free(gnpp);
2796 }
2797
2798 static void
zio_gang_tree_assemble(zio_t * gio,blkptr_t * bp,zio_gang_node_t ** gnpp)2799 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
2800 {
2801 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
2802 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2803
2804 ASSERT(gio->io_gang_leader == gio);
2805 ASSERT(BP_IS_GANG(bp));
2806
2807 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2808 zio_gang_tree_assemble_done, gn, gio->io_priority,
2809 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
2810 }
2811
2812 static void
zio_gang_tree_assemble_done(zio_t * zio)2813 zio_gang_tree_assemble_done(zio_t *zio)
2814 {
2815 zio_t *gio = zio->io_gang_leader;
2816 zio_gang_node_t *gn = zio->io_private;
2817 blkptr_t *bp = zio->io_bp;
2818
2819 ASSERT(gio == zio_unique_parent(zio));
2820 ASSERT(list_is_empty(&zio->io_child_list));
2821
2822 if (zio->io_error)
2823 return;
2824
2825 /* this ABD was created from a linear buf in zio_gang_tree_assemble */
2826 if (BP_SHOULD_BYTESWAP(bp))
2827 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
2828
2829 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
2830 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
2831 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2832
2833 abd_free(zio->io_abd);
2834
2835 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2836 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2837 if (!BP_IS_GANG(gbp))
2838 continue;
2839 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
2840 }
2841 }
2842
2843 static void
zio_gang_tree_issue(zio_t * pio,zio_gang_node_t * gn,blkptr_t * bp,abd_t * data,uint64_t offset)2844 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
2845 uint64_t offset)
2846 {
2847 zio_t *gio = pio->io_gang_leader;
2848 zio_t *zio;
2849
2850 ASSERT(BP_IS_GANG(bp) == !!gn);
2851 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
2852 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
2853
2854 /*
2855 * If you're a gang header, your data is in gn->gn_gbh.
2856 * If you're a gang member, your data is in 'data' and gn == NULL.
2857 */
2858 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
2859
2860 if (gn != NULL) {
2861 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2862
2863 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2864 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2865 if (BP_IS_HOLE(gbp))
2866 continue;
2867 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
2868 offset);
2869 offset += BP_GET_PSIZE(gbp);
2870 }
2871 }
2872
2873 if (gn == gio->io_gang_tree)
2874 ASSERT3U(gio->io_size, ==, offset);
2875
2876 if (zio != pio)
2877 zio_nowait(zio);
2878 }
2879
2880 static zio_t *
zio_gang_assemble(zio_t * zio)2881 zio_gang_assemble(zio_t *zio)
2882 {
2883 blkptr_t *bp = zio->io_bp;
2884
2885 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
2886 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2887
2888 zio->io_gang_leader = zio;
2889
2890 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
2891
2892 return (zio);
2893 }
2894
2895 static zio_t *
zio_gang_issue(zio_t * zio)2896 zio_gang_issue(zio_t *zio)
2897 {
2898 blkptr_t *bp = zio->io_bp;
2899
2900 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
2901 return (NULL);
2902 }
2903
2904 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
2905 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2906
2907 if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
2908 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
2909 0);
2910 else
2911 zio_gang_tree_free(&zio->io_gang_tree);
2912
2913 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2914
2915 return (zio);
2916 }
2917
2918 static void
zio_gang_inherit_allocator(zio_t * pio,zio_t * cio)2919 zio_gang_inherit_allocator(zio_t *pio, zio_t *cio)
2920 {
2921 cio->io_allocator = pio->io_allocator;
2922 }
2923
2924 static void
zio_write_gang_member_ready(zio_t * zio)2925 zio_write_gang_member_ready(zio_t *zio)
2926 {
2927 zio_t *pio = zio_unique_parent(zio);
2928 dva_t *cdva = zio->io_bp->blk_dva;
2929 dva_t *pdva = pio->io_bp->blk_dva;
2930 uint64_t asize;
2931 zio_t *gio __maybe_unused = zio->io_gang_leader;
2932
2933 if (BP_IS_HOLE(zio->io_bp))
2934 return;
2935
2936 ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
2937
2938 ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
2939 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
2940 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
2941 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
2942 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
2943
2944 mutex_enter(&pio->io_lock);
2945 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
2946 ASSERT(DVA_GET_GANG(&pdva[d]));
2947 asize = DVA_GET_ASIZE(&pdva[d]);
2948 asize += DVA_GET_ASIZE(&cdva[d]);
2949 DVA_SET_ASIZE(&pdva[d], asize);
2950 }
2951 mutex_exit(&pio->io_lock);
2952 }
2953
2954 static void
zio_write_gang_done(zio_t * zio)2955 zio_write_gang_done(zio_t *zio)
2956 {
2957 /*
2958 * The io_abd field will be NULL for a zio with no data. The io_flags
2959 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
2960 * check for it here as it is cleared in zio_ready.
2961 */
2962 if (zio->io_abd != NULL)
2963 abd_free(zio->io_abd);
2964 }
2965
2966 static zio_t *
zio_write_gang_block(zio_t * pio,metaslab_class_t * mc)2967 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
2968 {
2969 spa_t *spa = pio->io_spa;
2970 blkptr_t *bp = pio->io_bp;
2971 zio_t *gio = pio->io_gang_leader;
2972 zio_t *zio;
2973 zio_gang_node_t *gn, **gnpp;
2974 zio_gbh_phys_t *gbh;
2975 abd_t *gbh_abd;
2976 uint64_t txg = pio->io_txg;
2977 uint64_t resid = pio->io_size;
2978 uint64_t lsize;
2979 int copies = gio->io_prop.zp_copies;
2980 zio_prop_t zp;
2981 int error;
2982 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
2983
2984 /*
2985 * If one copy was requested, store 2 copies of the GBH, so that we
2986 * can still traverse all the data (e.g. to free or scrub) even if a
2987 * block is damaged. Note that we can't store 3 copies of the GBH in
2988 * all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
2989 */
2990 int gbh_copies = copies;
2991 if (gbh_copies == 1) {
2992 gbh_copies = MIN(2, spa_max_replication(spa));
2993 }
2994
2995 ASSERT(ZIO_HAS_ALLOCATOR(pio));
2996 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
2997 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2998 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2999 ASSERT(has_data);
3000
3001 flags |= METASLAB_ASYNC_ALLOC;
3002 VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
3003 mca_alloc_slots, pio));
3004
3005 /*
3006 * The logical zio has already placed a reservation for
3007 * 'copies' allocation slots but gang blocks may require
3008 * additional copies. These additional copies
3009 * (i.e. gbh_copies - copies) are guaranteed to succeed
3010 * since metaslab_class_throttle_reserve() always allows
3011 * additional reservations for gang blocks.
3012 */
3013 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
3014 pio->io_allocator, pio, flags));
3015 }
3016
3017 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
3018 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
3019 &pio->io_alloc_list, pio, pio->io_allocator);
3020 if (error) {
3021 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3022 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3023 ASSERT(has_data);
3024
3025 /*
3026 * If we failed to allocate the gang block header then
3027 * we remove any additional allocation reservations that
3028 * we placed here. The original reservation will
3029 * be removed when the logical I/O goes to the ready
3030 * stage.
3031 */
3032 metaslab_class_throttle_unreserve(mc,
3033 gbh_copies - copies, pio->io_allocator, pio);
3034 }
3035
3036 pio->io_error = error;
3037 return (pio);
3038 }
3039
3040 if (pio == gio) {
3041 gnpp = &gio->io_gang_tree;
3042 } else {
3043 gnpp = pio->io_private;
3044 ASSERT(pio->io_ready == zio_write_gang_member_ready);
3045 }
3046
3047 gn = zio_gang_node_alloc(gnpp);
3048 gbh = gn->gn_gbh;
3049 memset(gbh, 0, SPA_GANGBLOCKSIZE);
3050 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
3051
3052 /*
3053 * Create the gang header.
3054 */
3055 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
3056 zio_write_gang_done, NULL, pio->io_priority,
3057 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3058
3059 zio_gang_inherit_allocator(pio, zio);
3060
3061 /*
3062 * Create and nowait the gang children.
3063 */
3064 for (int g = 0; resid != 0; resid -= lsize, g++) {
3065 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
3066 SPA_MINBLOCKSIZE);
3067 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
3068
3069 zp.zp_checksum = gio->io_prop.zp_checksum;
3070 zp.zp_compress = ZIO_COMPRESS_OFF;
3071 zp.zp_complevel = gio->io_prop.zp_complevel;
3072 zp.zp_type = DMU_OT_NONE;
3073 zp.zp_level = 0;
3074 zp.zp_copies = gio->io_prop.zp_copies;
3075 zp.zp_dedup = B_FALSE;
3076 zp.zp_dedup_verify = B_FALSE;
3077 zp.zp_nopwrite = B_FALSE;
3078 zp.zp_encrypt = gio->io_prop.zp_encrypt;
3079 zp.zp_byteorder = gio->io_prop.zp_byteorder;
3080 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
3081 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
3082 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
3083
3084 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
3085 has_data ? abd_get_offset(pio->io_abd, pio->io_size -
3086 resid) : NULL, lsize, lsize, &zp,
3087 zio_write_gang_member_ready, NULL,
3088 zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
3089 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3090
3091 zio_gang_inherit_allocator(zio, cio);
3092
3093 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3094 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3095 ASSERT(has_data);
3096
3097 /*
3098 * Gang children won't throttle but we should
3099 * account for their work, so reserve an allocation
3100 * slot for them here.
3101 */
3102 VERIFY(metaslab_class_throttle_reserve(mc,
3103 zp.zp_copies, cio->io_allocator, cio, flags));
3104 }
3105 zio_nowait(cio);
3106 }
3107
3108 /*
3109 * Set pio's pipeline to just wait for zio to finish.
3110 */
3111 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3112
3113 zio_nowait(zio);
3114
3115 return (pio);
3116 }
3117
3118 /*
3119 * The zio_nop_write stage in the pipeline determines if allocating a
3120 * new bp is necessary. The nopwrite feature can handle writes in
3121 * either syncing or open context (i.e. zil writes) and as a result is
3122 * mutually exclusive with dedup.
3123 *
3124 * By leveraging a cryptographically secure checksum, such as SHA256, we
3125 * can compare the checksums of the new data and the old to determine if
3126 * allocating a new block is required. Note that our requirements for
3127 * cryptographic strength are fairly weak: there can't be any accidental
3128 * hash collisions, but we don't need to be secure against intentional
3129 * (malicious) collisions. To trigger a nopwrite, you have to be able
3130 * to write the file to begin with, and triggering an incorrect (hash
3131 * collision) nopwrite is no worse than simply writing to the file.
3132 * That said, there are no known attacks against the checksum algorithms
3133 * used for nopwrite, assuming that the salt and the checksums
3134 * themselves remain secret.
3135 */
3136 static zio_t *
zio_nop_write(zio_t * zio)3137 zio_nop_write(zio_t *zio)
3138 {
3139 blkptr_t *bp = zio->io_bp;
3140 blkptr_t *bp_orig = &zio->io_bp_orig;
3141 zio_prop_t *zp = &zio->io_prop;
3142
3143 ASSERT(BP_IS_HOLE(bp));
3144 ASSERT(BP_GET_LEVEL(bp) == 0);
3145 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
3146 ASSERT(zp->zp_nopwrite);
3147 ASSERT(!zp->zp_dedup);
3148 ASSERT(zio->io_bp_override == NULL);
3149 ASSERT(IO_IS_ALLOCATING(zio));
3150
3151 /*
3152 * Check to see if the original bp and the new bp have matching
3153 * characteristics (i.e. same checksum, compression algorithms, etc).
3154 * If they don't then just continue with the pipeline which will
3155 * allocate a new bp.
3156 */
3157 if (BP_IS_HOLE(bp_orig) ||
3158 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
3159 ZCHECKSUM_FLAG_NOPWRITE) ||
3160 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
3161 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
3162 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
3163 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
3164 zp->zp_copies != BP_GET_NDVAS(bp_orig))
3165 return (zio);
3166
3167 /*
3168 * If the checksums match then reset the pipeline so that we
3169 * avoid allocating a new bp and issuing any I/O.
3170 */
3171 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
3172 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
3173 ZCHECKSUM_FLAG_NOPWRITE);
3174 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
3175 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
3176 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
3177 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
3178
3179 /*
3180 * If we're overwriting a block that is currently on an
3181 * indirect vdev, then ignore the nopwrite request and
3182 * allow a new block to be allocated on a concrete vdev.
3183 */
3184 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
3185 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
3186 vdev_t *tvd = vdev_lookup_top(zio->io_spa,
3187 DVA_GET_VDEV(&bp_orig->blk_dva[d]));
3188 if (tvd->vdev_ops == &vdev_indirect_ops) {
3189 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3190 return (zio);
3191 }
3192 }
3193 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3194
3195 *bp = *bp_orig;
3196 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3197 zio->io_flags |= ZIO_FLAG_NOPWRITE;
3198 }
3199
3200 return (zio);
3201 }
3202
3203 /*
3204 * ==========================================================================
3205 * Block Reference Table
3206 * ==========================================================================
3207 */
3208 static zio_t *
zio_brt_free(zio_t * zio)3209 zio_brt_free(zio_t *zio)
3210 {
3211 blkptr_t *bp;
3212
3213 bp = zio->io_bp;
3214
3215 if (BP_GET_LEVEL(bp) > 0 ||
3216 BP_IS_METADATA(bp) ||
3217 !brt_maybe_exists(zio->io_spa, bp)) {
3218 return (zio);
3219 }
3220
3221 if (!brt_entry_decref(zio->io_spa, bp)) {
3222 /*
3223 * This isn't the last reference, so we cannot free
3224 * the data yet.
3225 */
3226 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3227 }
3228
3229 return (zio);
3230 }
3231
3232 /*
3233 * ==========================================================================
3234 * Dedup
3235 * ==========================================================================
3236 */
3237 static void
zio_ddt_child_read_done(zio_t * zio)3238 zio_ddt_child_read_done(zio_t *zio)
3239 {
3240 blkptr_t *bp = zio->io_bp;
3241 ddt_entry_t *dde = zio->io_private;
3242 ddt_phys_t *ddp;
3243 zio_t *pio = zio_unique_parent(zio);
3244
3245 mutex_enter(&pio->io_lock);
3246 ddp = ddt_phys_select(dde, bp);
3247 if (zio->io_error == 0)
3248 ddt_phys_clear(ddp); /* this ddp doesn't need repair */
3249
3250 if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
3251 dde->dde_repair_abd = zio->io_abd;
3252 else
3253 abd_free(zio->io_abd);
3254 mutex_exit(&pio->io_lock);
3255 }
3256
3257 static zio_t *
zio_ddt_read_start(zio_t * zio)3258 zio_ddt_read_start(zio_t *zio)
3259 {
3260 blkptr_t *bp = zio->io_bp;
3261
3262 ASSERT(BP_GET_DEDUP(bp));
3263 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3264 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3265
3266 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3267 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3268 ddt_entry_t *dde = ddt_repair_start(ddt, bp);
3269 ddt_phys_t *ddp = dde->dde_phys;
3270 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
3271 blkptr_t blk;
3272
3273 ASSERT(zio->io_vsd == NULL);
3274 zio->io_vsd = dde;
3275
3276 if (ddp_self == NULL)
3277 return (zio);
3278
3279 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
3280 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
3281 continue;
3282 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
3283 &blk);
3284 zio_nowait(zio_read(zio, zio->io_spa, &blk,
3285 abd_alloc_for_io(zio->io_size, B_TRUE),
3286 zio->io_size, zio_ddt_child_read_done, dde,
3287 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
3288 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
3289 }
3290 return (zio);
3291 }
3292
3293 zio_nowait(zio_read(zio, zio->io_spa, bp,
3294 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
3295 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
3296
3297 return (zio);
3298 }
3299
3300 static zio_t *
zio_ddt_read_done(zio_t * zio)3301 zio_ddt_read_done(zio_t *zio)
3302 {
3303 blkptr_t *bp = zio->io_bp;
3304
3305 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
3306 return (NULL);
3307 }
3308
3309 ASSERT(BP_GET_DEDUP(bp));
3310 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3311 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3312
3313 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3314 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3315 ddt_entry_t *dde = zio->io_vsd;
3316 if (ddt == NULL) {
3317 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
3318 return (zio);
3319 }
3320 if (dde == NULL) {
3321 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
3322 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
3323 return (NULL);
3324 }
3325 if (dde->dde_repair_abd != NULL) {
3326 abd_copy(zio->io_abd, dde->dde_repair_abd,
3327 zio->io_size);
3328 zio->io_child_error[ZIO_CHILD_DDT] = 0;
3329 }
3330 ddt_repair_done(ddt, dde);
3331 zio->io_vsd = NULL;
3332 }
3333
3334 ASSERT(zio->io_vsd == NULL);
3335
3336 return (zio);
3337 }
3338
3339 static boolean_t
zio_ddt_collision(zio_t * zio,ddt_t * ddt,ddt_entry_t * dde)3340 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
3341 {
3342 spa_t *spa = zio->io_spa;
3343 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
3344
3345 ASSERT(!(zio->io_bp_override && do_raw));
3346
3347 /*
3348 * Note: we compare the original data, not the transformed data,
3349 * because when zio->io_bp is an override bp, we will not have
3350 * pushed the I/O transforms. That's an important optimization
3351 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
3352 * However, we should never get a raw, override zio so in these
3353 * cases we can compare the io_abd directly. This is useful because
3354 * it allows us to do dedup verification even if we don't have access
3355 * to the original data (for instance, if the encryption keys aren't
3356 * loaded).
3357 */
3358
3359 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
3360 zio_t *lio = dde->dde_lead_zio[p];
3361
3362 if (lio != NULL && do_raw) {
3363 return (lio->io_size != zio->io_size ||
3364 abd_cmp(zio->io_abd, lio->io_abd) != 0);
3365 } else if (lio != NULL) {
3366 return (lio->io_orig_size != zio->io_orig_size ||
3367 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
3368 }
3369 }
3370
3371 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
3372 ddt_phys_t *ddp = &dde->dde_phys[p];
3373
3374 if (ddp->ddp_phys_birth != 0 && do_raw) {
3375 blkptr_t blk = *zio->io_bp;
3376 uint64_t psize;
3377 abd_t *tmpabd;
3378 int error;
3379
3380 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3381 psize = BP_GET_PSIZE(&blk);
3382
3383 if (psize != zio->io_size)
3384 return (B_TRUE);
3385
3386 ddt_exit(ddt);
3387
3388 tmpabd = abd_alloc_for_io(psize, B_TRUE);
3389
3390 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
3391 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
3392 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3393 ZIO_FLAG_RAW, &zio->io_bookmark));
3394
3395 if (error == 0) {
3396 if (abd_cmp(tmpabd, zio->io_abd) != 0)
3397 error = SET_ERROR(ENOENT);
3398 }
3399
3400 abd_free(tmpabd);
3401 ddt_enter(ddt);
3402 return (error != 0);
3403 } else if (ddp->ddp_phys_birth != 0) {
3404 arc_buf_t *abuf = NULL;
3405 arc_flags_t aflags = ARC_FLAG_WAIT;
3406 blkptr_t blk = *zio->io_bp;
3407 int error;
3408
3409 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3410
3411 if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
3412 return (B_TRUE);
3413
3414 ddt_exit(ddt);
3415
3416 error = arc_read(NULL, spa, &blk,
3417 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
3418 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3419 &aflags, &zio->io_bookmark);
3420
3421 if (error == 0) {
3422 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
3423 zio->io_orig_size) != 0)
3424 error = SET_ERROR(ENOENT);
3425 arc_buf_destroy(abuf, &abuf);
3426 }
3427
3428 ddt_enter(ddt);
3429 return (error != 0);
3430 }
3431 }
3432
3433 return (B_FALSE);
3434 }
3435
3436 static void
zio_ddt_child_write_ready(zio_t * zio)3437 zio_ddt_child_write_ready(zio_t *zio)
3438 {
3439 int p = zio->io_prop.zp_copies;
3440 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3441 ddt_entry_t *dde = zio->io_private;
3442 ddt_phys_t *ddp = &dde->dde_phys[p];
3443 zio_t *pio;
3444
3445 if (zio->io_error)
3446 return;
3447
3448 ddt_enter(ddt);
3449
3450 ASSERT(dde->dde_lead_zio[p] == zio);
3451
3452 ddt_phys_fill(ddp, zio->io_bp);
3453
3454 zio_link_t *zl = NULL;
3455 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
3456 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
3457
3458 ddt_exit(ddt);
3459 }
3460
3461 static void
zio_ddt_child_write_done(zio_t * zio)3462 zio_ddt_child_write_done(zio_t *zio)
3463 {
3464 int p = zio->io_prop.zp_copies;
3465 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3466 ddt_entry_t *dde = zio->io_private;
3467 ddt_phys_t *ddp = &dde->dde_phys[p];
3468
3469 ddt_enter(ddt);
3470
3471 ASSERT(ddp->ddp_refcnt == 0);
3472 ASSERT(dde->dde_lead_zio[p] == zio);
3473 dde->dde_lead_zio[p] = NULL;
3474
3475 if (zio->io_error == 0) {
3476 zio_link_t *zl = NULL;
3477 while (zio_walk_parents(zio, &zl) != NULL)
3478 ddt_phys_addref(ddp);
3479 } else {
3480 ddt_phys_clear(ddp);
3481 }
3482
3483 ddt_exit(ddt);
3484 }
3485
3486 static zio_t *
zio_ddt_write(zio_t * zio)3487 zio_ddt_write(zio_t *zio)
3488 {
3489 spa_t *spa = zio->io_spa;
3490 blkptr_t *bp = zio->io_bp;
3491 uint64_t txg = zio->io_txg;
3492 zio_prop_t *zp = &zio->io_prop;
3493 int p = zp->zp_copies;
3494 zio_t *cio = NULL;
3495 ddt_t *ddt = ddt_select(spa, bp);
3496 ddt_entry_t *dde;
3497 ddt_phys_t *ddp;
3498
3499 ASSERT(BP_GET_DEDUP(bp));
3500 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
3501 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
3502 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
3503
3504 ddt_enter(ddt);
3505 dde = ddt_lookup(ddt, bp, B_TRUE);
3506 ddp = &dde->dde_phys[p];
3507
3508 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
3509 /*
3510 * If we're using a weak checksum, upgrade to a strong checksum
3511 * and try again. If we're already using a strong checksum,
3512 * we can't resolve it, so just convert to an ordinary write.
3513 * (And automatically e-mail a paper to Nature?)
3514 */
3515 if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
3516 ZCHECKSUM_FLAG_DEDUP)) {
3517 zp->zp_checksum = spa_dedup_checksum(spa);
3518 zio_pop_transforms(zio);
3519 zio->io_stage = ZIO_STAGE_OPEN;
3520 BP_ZERO(bp);
3521 } else {
3522 zp->zp_dedup = B_FALSE;
3523 BP_SET_DEDUP(bp, B_FALSE);
3524 }
3525 ASSERT(!BP_GET_DEDUP(bp));
3526 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3527 ddt_exit(ddt);
3528 return (zio);
3529 }
3530
3531 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
3532 if (ddp->ddp_phys_birth != 0)
3533 ddt_bp_fill(ddp, bp, txg);
3534 if (dde->dde_lead_zio[p] != NULL)
3535 zio_add_child(zio, dde->dde_lead_zio[p]);
3536 else
3537 ddt_phys_addref(ddp);
3538 } else if (zio->io_bp_override) {
3539 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg);
3540 ASSERT(BP_EQUAL(bp, zio->io_bp_override));
3541 ddt_phys_fill(ddp, bp);
3542 ddt_phys_addref(ddp);
3543 } else {
3544 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
3545 zio->io_orig_size, zio->io_orig_size, zp,
3546 zio_ddt_child_write_ready, NULL,
3547 zio_ddt_child_write_done, dde, zio->io_priority,
3548 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
3549
3550 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
3551 dde->dde_lead_zio[p] = cio;
3552 }
3553
3554 ddt_exit(ddt);
3555
3556 zio_nowait(cio);
3557
3558 return (zio);
3559 }
3560
3561 static ddt_entry_t *freedde; /* for debugging */
3562
3563 static zio_t *
zio_ddt_free(zio_t * zio)3564 zio_ddt_free(zio_t *zio)
3565 {
3566 spa_t *spa = zio->io_spa;
3567 blkptr_t *bp = zio->io_bp;
3568 ddt_t *ddt = ddt_select(spa, bp);
3569 ddt_entry_t *dde;
3570 ddt_phys_t *ddp;
3571
3572 ASSERT(BP_GET_DEDUP(bp));
3573 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3574
3575 ddt_enter(ddt);
3576 freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
3577 if (dde) {
3578 ddp = ddt_phys_select(dde, bp);
3579 if (ddp)
3580 ddt_phys_decref(ddp);
3581 }
3582 ddt_exit(ddt);
3583
3584 return (zio);
3585 }
3586
3587 /*
3588 * ==========================================================================
3589 * Allocate and free blocks
3590 * ==========================================================================
3591 */
3592
3593 static zio_t *
zio_io_to_allocate(spa_t * spa,int allocator)3594 zio_io_to_allocate(spa_t *spa, int allocator)
3595 {
3596 zio_t *zio;
3597
3598 ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
3599
3600 zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
3601 if (zio == NULL)
3602 return (NULL);
3603
3604 ASSERT(IO_IS_ALLOCATING(zio));
3605 ASSERT(ZIO_HAS_ALLOCATOR(zio));
3606
3607 /*
3608 * Try to place a reservation for this zio. If we're unable to
3609 * reserve then we throttle.
3610 */
3611 ASSERT3U(zio->io_allocator, ==, allocator);
3612 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
3613 zio->io_prop.zp_copies, allocator, zio, 0)) {
3614 return (NULL);
3615 }
3616
3617 avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
3618 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
3619
3620 return (zio);
3621 }
3622
3623 static zio_t *
zio_dva_throttle(zio_t * zio)3624 zio_dva_throttle(zio_t *zio)
3625 {
3626 spa_t *spa = zio->io_spa;
3627 zio_t *nio;
3628 metaslab_class_t *mc;
3629
3630 /* locate an appropriate allocation class */
3631 mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
3632 zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
3633
3634 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
3635 !mc->mc_alloc_throttle_enabled ||
3636 zio->io_child_type == ZIO_CHILD_GANG ||
3637 zio->io_flags & ZIO_FLAG_NODATA) {
3638 return (zio);
3639 }
3640
3641 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3642 ASSERT(ZIO_HAS_ALLOCATOR(zio));
3643 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3644 ASSERT3U(zio->io_queued_timestamp, >, 0);
3645 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
3646
3647 int allocator = zio->io_allocator;
3648 zio->io_metaslab_class = mc;
3649 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3650 avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
3651 nio = zio_io_to_allocate(spa, allocator);
3652 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3653 return (nio);
3654 }
3655
3656 static void
zio_allocate_dispatch(spa_t * spa,int allocator)3657 zio_allocate_dispatch(spa_t *spa, int allocator)
3658 {
3659 zio_t *zio;
3660
3661 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3662 zio = zio_io_to_allocate(spa, allocator);
3663 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3664 if (zio == NULL)
3665 return;
3666
3667 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
3668 ASSERT0(zio->io_error);
3669 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
3670 }
3671
3672 static zio_t *
zio_dva_allocate(zio_t * zio)3673 zio_dva_allocate(zio_t *zio)
3674 {
3675 spa_t *spa = zio->io_spa;
3676 metaslab_class_t *mc;
3677 blkptr_t *bp = zio->io_bp;
3678 int error;
3679 int flags = 0;
3680
3681 if (zio->io_gang_leader == NULL) {
3682 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3683 zio->io_gang_leader = zio;
3684 }
3685
3686 ASSERT(BP_IS_HOLE(bp));
3687 ASSERT0(BP_GET_NDVAS(bp));
3688 ASSERT3U(zio->io_prop.zp_copies, >, 0);
3689 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
3690 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
3691
3692 if (zio->io_flags & ZIO_FLAG_NODATA)
3693 flags |= METASLAB_DONT_THROTTLE;
3694 if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
3695 flags |= METASLAB_GANG_CHILD;
3696 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
3697 flags |= METASLAB_ASYNC_ALLOC;
3698
3699 /*
3700 * if not already chosen, locate an appropriate allocation class
3701 */
3702 mc = zio->io_metaslab_class;
3703 if (mc == NULL) {
3704 mc = spa_preferred_class(spa, zio->io_size,
3705 zio->io_prop.zp_type, zio->io_prop.zp_level,
3706 zio->io_prop.zp_zpl_smallblk);
3707 zio->io_metaslab_class = mc;
3708 }
3709
3710 /*
3711 * Try allocating the block in the usual metaslab class.
3712 * If that's full, allocate it in the normal class.
3713 * If that's full, allocate as a gang block,
3714 * and if all are full, the allocation fails (which shouldn't happen).
3715 *
3716 * Note that we do not fall back on embedded slog (ZIL) space, to
3717 * preserve unfragmented slog space, which is critical for decent
3718 * sync write performance. If a log allocation fails, we will fall
3719 * back to spa_sync() which is abysmal for performance.
3720 */
3721 ASSERT(ZIO_HAS_ALLOCATOR(zio));
3722 error = metaslab_alloc(spa, mc, zio->io_size, bp,
3723 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
3724 &zio->io_alloc_list, zio, zio->io_allocator);
3725
3726 /*
3727 * Fallback to normal class when an alloc class is full
3728 */
3729 if (error == ENOSPC && mc != spa_normal_class(spa)) {
3730 /*
3731 * If throttling, transfer reservation over to normal class.
3732 * The io_allocator slot can remain the same even though we
3733 * are switching classes.
3734 */
3735 if (mc->mc_alloc_throttle_enabled &&
3736 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
3737 metaslab_class_throttle_unreserve(mc,
3738 zio->io_prop.zp_copies, zio->io_allocator, zio);
3739 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
3740
3741 VERIFY(metaslab_class_throttle_reserve(
3742 spa_normal_class(spa),
3743 zio->io_prop.zp_copies, zio->io_allocator, zio,
3744 flags | METASLAB_MUST_RESERVE));
3745 }
3746 zio->io_metaslab_class = mc = spa_normal_class(spa);
3747 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3748 zfs_dbgmsg("%s: metaslab allocation failure, "
3749 "trying normal class: zio %px, size %llu, error %d",
3750 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3751 error);
3752 }
3753
3754 error = metaslab_alloc(spa, mc, zio->io_size, bp,
3755 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
3756 &zio->io_alloc_list, zio, zio->io_allocator);
3757 }
3758
3759 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
3760 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3761 zfs_dbgmsg("%s: metaslab allocation failure, "
3762 "trying ganging: zio %px, size %llu, error %d",
3763 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3764 error);
3765 }
3766 return (zio_write_gang_block(zio, mc));
3767 }
3768 if (error != 0) {
3769 if (error != ENOSPC ||
3770 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
3771 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
3772 "size %llu, error %d",
3773 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3774 error);
3775 }
3776 zio->io_error = error;
3777 }
3778
3779 return (zio);
3780 }
3781
3782 static zio_t *
zio_dva_free(zio_t * zio)3783 zio_dva_free(zio_t *zio)
3784 {
3785 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
3786
3787 return (zio);
3788 }
3789
3790 static zio_t *
zio_dva_claim(zio_t * zio)3791 zio_dva_claim(zio_t *zio)
3792 {
3793 int error;
3794
3795 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
3796 if (error)
3797 zio->io_error = error;
3798
3799 return (zio);
3800 }
3801
3802 /*
3803 * Undo an allocation. This is used by zio_done() when an I/O fails
3804 * and we want to give back the block we just allocated.
3805 * This handles both normal blocks and gang blocks.
3806 */
3807 static void
zio_dva_unallocate(zio_t * zio,zio_gang_node_t * gn,blkptr_t * bp)3808 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
3809 {
3810 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
3811 ASSERT(zio->io_bp_override == NULL);
3812
3813 if (!BP_IS_HOLE(bp)) {
3814 metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp),
3815 B_TRUE);
3816 }
3817
3818 if (gn != NULL) {
3819 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
3820 zio_dva_unallocate(zio, gn->gn_child[g],
3821 &gn->gn_gbh->zg_blkptr[g]);
3822 }
3823 }
3824 }
3825
3826 /*
3827 * Try to allocate an intent log block. Return 0 on success, errno on failure.
3828 */
3829 int
zio_alloc_zil(spa_t * spa,objset_t * os,uint64_t txg,blkptr_t * new_bp,uint64_t size,boolean_t * slog)3830 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
3831 uint64_t size, boolean_t *slog)
3832 {
3833 int error = 1;
3834 zio_alloc_list_t io_alloc_list;
3835
3836 ASSERT(txg > spa_syncing_txg(spa));
3837
3838 metaslab_trace_init(&io_alloc_list);
3839
3840 /*
3841 * Block pointer fields are useful to metaslabs for stats and debugging.
3842 * Fill in the obvious ones before calling into metaslab_alloc().
3843 */
3844 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3845 BP_SET_PSIZE(new_bp, size);
3846 BP_SET_LEVEL(new_bp, 0);
3847
3848 /*
3849 * When allocating a zil block, we don't have information about
3850 * the final destination of the block except the objset it's part
3851 * of, so we just hash the objset ID to pick the allocator to get
3852 * some parallelism.
3853 */
3854 int flags = METASLAB_ZIL;
3855 int allocator = (uint_t)cityhash4(0, 0, 0,
3856 os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
3857 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
3858 txg, NULL, flags, &io_alloc_list, NULL, allocator);
3859 *slog = (error == 0);
3860 if (error != 0) {
3861 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
3862 new_bp, 1, txg, NULL, flags,
3863 &io_alloc_list, NULL, allocator);
3864 }
3865 if (error != 0) {
3866 error = metaslab_alloc(spa, spa_normal_class(spa), size,
3867 new_bp, 1, txg, NULL, flags,
3868 &io_alloc_list, NULL, allocator);
3869 }
3870 metaslab_trace_fini(&io_alloc_list);
3871
3872 if (error == 0) {
3873 BP_SET_LSIZE(new_bp, size);
3874 BP_SET_PSIZE(new_bp, size);
3875 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
3876 BP_SET_CHECKSUM(new_bp,
3877 spa_version(spa) >= SPA_VERSION_SLIM_ZIL
3878 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
3879 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3880 BP_SET_LEVEL(new_bp, 0);
3881 BP_SET_DEDUP(new_bp, 0);
3882 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
3883
3884 /*
3885 * encrypted blocks will require an IV and salt. We generate
3886 * these now since we will not be rewriting the bp at
3887 * rewrite time.
3888 */
3889 if (os->os_encrypted) {
3890 uint8_t iv[ZIO_DATA_IV_LEN];
3891 uint8_t salt[ZIO_DATA_SALT_LEN];
3892
3893 BP_SET_CRYPT(new_bp, B_TRUE);
3894 VERIFY0(spa_crypt_get_salt(spa,
3895 dmu_objset_id(os), salt));
3896 VERIFY0(zio_crypt_generate_iv(iv));
3897
3898 zio_crypt_encode_params_bp(new_bp, salt, iv);
3899 }
3900 } else {
3901 zfs_dbgmsg("%s: zil block allocation failure: "
3902 "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
3903 error);
3904 }
3905
3906 return (error);
3907 }
3908
3909 /*
3910 * ==========================================================================
3911 * Read and write to physical devices
3912 * ==========================================================================
3913 */
3914
3915 /*
3916 * Issue an I/O to the underlying vdev. Typically the issue pipeline
3917 * stops after this stage and will resume upon I/O completion.
3918 * However, there are instances where the vdev layer may need to
3919 * continue the pipeline when an I/O was not issued. Since the I/O
3920 * that was sent to the vdev layer might be different than the one
3921 * currently active in the pipeline (see vdev_queue_io()), we explicitly
3922 * force the underlying vdev layers to call either zio_execute() or
3923 * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
3924 */
3925 static zio_t *
zio_vdev_io_start(zio_t * zio)3926 zio_vdev_io_start(zio_t *zio)
3927 {
3928 vdev_t *vd = zio->io_vd;
3929 uint64_t align;
3930 spa_t *spa = zio->io_spa;
3931
3932 zio->io_delay = 0;
3933
3934 ASSERT(zio->io_error == 0);
3935 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
3936
3937 if (vd == NULL) {
3938 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
3939 spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
3940
3941 /*
3942 * The mirror_ops handle multiple DVAs in a single BP.
3943 */
3944 vdev_mirror_ops.vdev_op_io_start(zio);
3945 return (NULL);
3946 }
3947
3948 ASSERT3P(zio->io_logical, !=, zio);
3949 if (zio->io_type == ZIO_TYPE_WRITE) {
3950 ASSERT(spa->spa_trust_config);
3951
3952 /*
3953 * Note: the code can handle other kinds of writes,
3954 * but we don't expect them.
3955 */
3956 if (zio->io_vd->vdev_noalloc) {
3957 ASSERT(zio->io_flags &
3958 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
3959 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
3960 }
3961 }
3962
3963 align = 1ULL << vd->vdev_top->vdev_ashift;
3964
3965 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
3966 P2PHASE(zio->io_size, align) != 0) {
3967 /* Transform logical writes to be a full physical block size. */
3968 uint64_t asize = P2ROUNDUP(zio->io_size, align);
3969 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
3970 ASSERT(vd == vd->vdev_top);
3971 if (zio->io_type == ZIO_TYPE_WRITE) {
3972 abd_copy(abuf, zio->io_abd, zio->io_size);
3973 abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
3974 }
3975 zio_push_transform(zio, abuf, asize, asize, zio_subblock);
3976 }
3977
3978 /*
3979 * If this is not a physical io, make sure that it is properly aligned
3980 * before proceeding.
3981 */
3982 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
3983 ASSERT0(P2PHASE(zio->io_offset, align));
3984 ASSERT0(P2PHASE(zio->io_size, align));
3985 } else {
3986 /*
3987 * For physical writes, we allow 512b aligned writes and assume
3988 * the device will perform a read-modify-write as necessary.
3989 */
3990 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
3991 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
3992 }
3993
3994 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
3995
3996 /*
3997 * If this is a repair I/O, and there's no self-healing involved --
3998 * that is, we're just resilvering what we expect to resilver --
3999 * then don't do the I/O unless zio's txg is actually in vd's DTL.
4000 * This prevents spurious resilvering.
4001 *
4002 * There are a few ways that we can end up creating these spurious
4003 * resilver i/os:
4004 *
4005 * 1. A resilver i/o will be issued if any DVA in the BP has a
4006 * dirty DTL. The mirror code will issue resilver writes to
4007 * each DVA, including the one(s) that are not on vdevs with dirty
4008 * DTLs.
4009 *
4010 * 2. With nested replication, which happens when we have a
4011 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
4012 * For example, given mirror(replacing(A+B), C), it's likely that
4013 * only A is out of date (it's the new device). In this case, we'll
4014 * read from C, then use the data to resilver A+B -- but we don't
4015 * actually want to resilver B, just A. The top-level mirror has no
4016 * way to know this, so instead we just discard unnecessary repairs
4017 * as we work our way down the vdev tree.
4018 *
4019 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
4020 * The same logic applies to any form of nested replication: ditto
4021 * + mirror, RAID-Z + replacing, etc.
4022 *
4023 * However, indirect vdevs point off to other vdevs which may have
4024 * DTL's, so we never bypass them. The child i/os on concrete vdevs
4025 * will be properly bypassed instead.
4026 *
4027 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
4028 * a dRAID spare vdev. For example, when a dRAID spare is first
4029 * used, its spare blocks need to be written to but the leaf vdev's
4030 * of such blocks can have empty DTL_PARTIAL.
4031 *
4032 * There seemed no clean way to allow such writes while bypassing
4033 * spurious ones. At this point, just avoid all bypassing for dRAID
4034 * for correctness.
4035 */
4036 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
4037 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
4038 zio->io_txg != 0 && /* not a delegated i/o */
4039 vd->vdev_ops != &vdev_indirect_ops &&
4040 vd->vdev_top->vdev_ops != &vdev_draid_ops &&
4041 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
4042 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4043 zio_vdev_io_bypass(zio);
4044 return (zio);
4045 }
4046
4047 /*
4048 * Select the next best leaf I/O to process. Distributed spares are
4049 * excluded since they dispatch the I/O directly to a leaf vdev after
4050 * applying the dRAID mapping.
4051 */
4052 if (vd->vdev_ops->vdev_op_leaf &&
4053 vd->vdev_ops != &vdev_draid_spare_ops &&
4054 (zio->io_type == ZIO_TYPE_READ ||
4055 zio->io_type == ZIO_TYPE_WRITE ||
4056 zio->io_type == ZIO_TYPE_TRIM)) {
4057
4058 if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) {
4059 /*
4060 * "no-op" injections return success, but do no actual
4061 * work. Just skip the remaining vdev stages.
4062 */
4063 zio_vdev_io_bypass(zio);
4064 zio_interrupt(zio);
4065 return (NULL);
4066 }
4067
4068 if ((zio = vdev_queue_io(zio)) == NULL)
4069 return (NULL);
4070
4071 if (!vdev_accessible(vd, zio)) {
4072 zio->io_error = SET_ERROR(ENXIO);
4073 zio_interrupt(zio);
4074 return (NULL);
4075 }
4076 zio->io_delay = gethrtime();
4077 }
4078
4079 vd->vdev_ops->vdev_op_io_start(zio);
4080 return (NULL);
4081 }
4082
4083 static zio_t *
zio_vdev_io_done(zio_t * zio)4084 zio_vdev_io_done(zio_t *zio)
4085 {
4086 vdev_t *vd = zio->io_vd;
4087 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
4088 boolean_t unexpected_error = B_FALSE;
4089
4090 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4091 return (NULL);
4092 }
4093
4094 ASSERT(zio->io_type == ZIO_TYPE_READ ||
4095 zio->io_type == ZIO_TYPE_WRITE ||
4096 zio->io_type == ZIO_TYPE_FLUSH ||
4097 zio->io_type == ZIO_TYPE_TRIM);
4098
4099 if (zio->io_delay)
4100 zio->io_delay = gethrtime() - zio->io_delay;
4101
4102 if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4103 vd->vdev_ops != &vdev_draid_spare_ops) {
4104 if (zio->io_type != ZIO_TYPE_FLUSH)
4105 vdev_queue_io_done(zio);
4106
4107 if (zio_injection_enabled && zio->io_error == 0)
4108 zio->io_error = zio_handle_device_injections(vd, zio,
4109 EIO, EILSEQ);
4110
4111 if (zio_injection_enabled && zio->io_error == 0)
4112 zio->io_error = zio_handle_label_injection(zio, EIO);
4113
4114 if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH &&
4115 zio->io_type != ZIO_TYPE_TRIM) {
4116 if (!vdev_accessible(vd, zio)) {
4117 zio->io_error = SET_ERROR(ENXIO);
4118 } else {
4119 unexpected_error = B_TRUE;
4120 }
4121 }
4122 }
4123
4124 ops->vdev_op_io_done(zio);
4125
4126 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
4127 VERIFY(vdev_probe(vd, zio) == NULL);
4128
4129 return (zio);
4130 }
4131
4132 /*
4133 * This function is used to change the priority of an existing zio that is
4134 * currently in-flight. This is used by the arc to upgrade priority in the
4135 * event that a demand read is made for a block that is currently queued
4136 * as a scrub or async read IO. Otherwise, the high priority read request
4137 * would end up having to wait for the lower priority IO.
4138 */
4139 void
zio_change_priority(zio_t * pio,zio_priority_t priority)4140 zio_change_priority(zio_t *pio, zio_priority_t priority)
4141 {
4142 zio_t *cio, *cio_next;
4143 zio_link_t *zl = NULL;
4144
4145 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
4146
4147 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
4148 vdev_queue_change_io_priority(pio, priority);
4149 } else {
4150 pio->io_priority = priority;
4151 }
4152
4153 mutex_enter(&pio->io_lock);
4154 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
4155 cio_next = zio_walk_children(pio, &zl);
4156 zio_change_priority(cio, priority);
4157 }
4158 mutex_exit(&pio->io_lock);
4159 }
4160
4161 /*
4162 * For non-raidz ZIOs, we can just copy aside the bad data read from the
4163 * disk, and use that to finish the checksum ereport later.
4164 */
4165 static void
zio_vsd_default_cksum_finish(zio_cksum_report_t * zcr,const abd_t * good_buf)4166 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
4167 const abd_t *good_buf)
4168 {
4169 /* no processing needed */
4170 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
4171 }
4172
4173 void
zio_vsd_default_cksum_report(zio_t * zio,zio_cksum_report_t * zcr)4174 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
4175 {
4176 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
4177
4178 abd_copy(abd, zio->io_abd, zio->io_size);
4179
4180 zcr->zcr_cbinfo = zio->io_size;
4181 zcr->zcr_cbdata = abd;
4182 zcr->zcr_finish = zio_vsd_default_cksum_finish;
4183 zcr->zcr_free = zio_abd_free;
4184 }
4185
4186 static zio_t *
zio_vdev_io_assess(zio_t * zio)4187 zio_vdev_io_assess(zio_t *zio)
4188 {
4189 vdev_t *vd = zio->io_vd;
4190
4191 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4192 return (NULL);
4193 }
4194
4195 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4196 spa_config_exit(zio->io_spa, SCL_ZIO, zio);
4197
4198 if (zio->io_vsd != NULL) {
4199 zio->io_vsd_ops->vsd_free(zio);
4200 zio->io_vsd = NULL;
4201 }
4202
4203 if (zio_injection_enabled && zio->io_error == 0)
4204 zio->io_error = zio_handle_fault_injection(zio, EIO);
4205
4206 /*
4207 * If the I/O failed, determine whether we should attempt to retry it.
4208 *
4209 * On retry, we cut in line in the issue queue, since we don't want
4210 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
4211 */
4212 if (zio->io_error && vd == NULL &&
4213 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
4214 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
4215 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
4216 zio->io_error = 0;
4217 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
4218 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
4219 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
4220 zio_requeue_io_start_cut_in_line);
4221 return (NULL);
4222 }
4223
4224 /*
4225 * If we got an error on a leaf device, convert it to ENXIO
4226 * if the device is not accessible at all.
4227 */
4228 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4229 !vdev_accessible(vd, zio))
4230 zio->io_error = SET_ERROR(ENXIO);
4231
4232 /*
4233 * If we can't write to an interior vdev (mirror or RAID-Z),
4234 * set vdev_cant_write so that we stop trying to allocate from it.
4235 */
4236 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
4237 vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
4238 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
4239 "cant_write=TRUE due to write failure with ENXIO",
4240 zio);
4241 vd->vdev_cant_write = B_TRUE;
4242 }
4243
4244 /*
4245 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future
4246 * attempts will ever succeed. In this case we set a persistent
4247 * boolean flag so that we don't bother with it in the future.
4248 */
4249 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
4250 zio->io_type == ZIO_TYPE_FLUSH && vd != NULL)
4251 vd->vdev_nowritecache = B_TRUE;
4252
4253 if (zio->io_error)
4254 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4255
4256 return (zio);
4257 }
4258
4259 void
zio_vdev_io_reissue(zio_t * zio)4260 zio_vdev_io_reissue(zio_t *zio)
4261 {
4262 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4263 ASSERT(zio->io_error == 0);
4264
4265 zio->io_stage >>= 1;
4266 }
4267
4268 void
zio_vdev_io_redone(zio_t * zio)4269 zio_vdev_io_redone(zio_t *zio)
4270 {
4271 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
4272
4273 zio->io_stage >>= 1;
4274 }
4275
4276 void
zio_vdev_io_bypass(zio_t * zio)4277 zio_vdev_io_bypass(zio_t *zio)
4278 {
4279 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4280 ASSERT(zio->io_error == 0);
4281
4282 zio->io_flags |= ZIO_FLAG_IO_BYPASS;
4283 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
4284 }
4285
4286 /*
4287 * ==========================================================================
4288 * Encrypt and store encryption parameters
4289 * ==========================================================================
4290 */
4291
4292
4293 /*
4294 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
4295 * managing the storage of encryption parameters and passing them to the
4296 * lower-level encryption functions.
4297 */
4298 static zio_t *
zio_encrypt(zio_t * zio)4299 zio_encrypt(zio_t *zio)
4300 {
4301 zio_prop_t *zp = &zio->io_prop;
4302 spa_t *spa = zio->io_spa;
4303 blkptr_t *bp = zio->io_bp;
4304 uint64_t psize = BP_GET_PSIZE(bp);
4305 uint64_t dsobj = zio->io_bookmark.zb_objset;
4306 dmu_object_type_t ot = BP_GET_TYPE(bp);
4307 void *enc_buf = NULL;
4308 abd_t *eabd = NULL;
4309 uint8_t salt[ZIO_DATA_SALT_LEN];
4310 uint8_t iv[ZIO_DATA_IV_LEN];
4311 uint8_t mac[ZIO_DATA_MAC_LEN];
4312 boolean_t no_crypt = B_FALSE;
4313
4314 /* the root zio already encrypted the data */
4315 if (zio->io_child_type == ZIO_CHILD_GANG)
4316 return (zio);
4317
4318 /* only ZIL blocks are re-encrypted on rewrite */
4319 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
4320 return (zio);
4321
4322 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
4323 BP_SET_CRYPT(bp, B_FALSE);
4324 return (zio);
4325 }
4326
4327 /* if we are doing raw encryption set the provided encryption params */
4328 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
4329 ASSERT0(BP_GET_LEVEL(bp));
4330 BP_SET_CRYPT(bp, B_TRUE);
4331 BP_SET_BYTEORDER(bp, zp->zp_byteorder);
4332 if (ot != DMU_OT_OBJSET)
4333 zio_crypt_encode_mac_bp(bp, zp->zp_mac);
4334
4335 /* dnode blocks must be written out in the provided byteorder */
4336 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
4337 ot == DMU_OT_DNODE) {
4338 void *bswap_buf = zio_buf_alloc(psize);
4339 abd_t *babd = abd_get_from_buf(bswap_buf, psize);
4340
4341 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4342 abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
4343 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
4344 psize);
4345
4346 abd_take_ownership_of_buf(babd, B_TRUE);
4347 zio_push_transform(zio, babd, psize, psize, NULL);
4348 }
4349
4350 if (DMU_OT_IS_ENCRYPTED(ot))
4351 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
4352 return (zio);
4353 }
4354
4355 /* indirect blocks only maintain a cksum of the lower level MACs */
4356 if (BP_GET_LEVEL(bp) > 0) {
4357 BP_SET_CRYPT(bp, B_TRUE);
4358 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
4359 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
4360 mac));
4361 zio_crypt_encode_mac_bp(bp, mac);
4362 return (zio);
4363 }
4364
4365 /*
4366 * Objset blocks are a special case since they have 2 256-bit MACs
4367 * embedded within them.
4368 */
4369 if (ot == DMU_OT_OBJSET) {
4370 ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
4371 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4372 BP_SET_CRYPT(bp, B_TRUE);
4373 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
4374 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
4375 return (zio);
4376 }
4377
4378 /* unencrypted object types are only authenticated with a MAC */
4379 if (!DMU_OT_IS_ENCRYPTED(ot)) {
4380 BP_SET_CRYPT(bp, B_TRUE);
4381 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
4382 zio->io_abd, psize, mac));
4383 zio_crypt_encode_mac_bp(bp, mac);
4384 return (zio);
4385 }
4386
4387 /*
4388 * Later passes of sync-to-convergence may decide to rewrite data
4389 * in place to avoid more disk reallocations. This presents a problem
4390 * for encryption because this constitutes rewriting the new data with
4391 * the same encryption key and IV. However, this only applies to blocks
4392 * in the MOS (particularly the spacemaps) and we do not encrypt the
4393 * MOS. We assert that the zio is allocating or an intent log write
4394 * to enforce this.
4395 */
4396 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
4397 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
4398 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
4399 ASSERT3U(psize, !=, 0);
4400
4401 enc_buf = zio_buf_alloc(psize);
4402 eabd = abd_get_from_buf(enc_buf, psize);
4403 abd_take_ownership_of_buf(eabd, B_TRUE);
4404
4405 /*
4406 * For an explanation of what encryption parameters are stored
4407 * where, see the block comment in zio_crypt.c.
4408 */
4409 if (ot == DMU_OT_INTENT_LOG) {
4410 zio_crypt_decode_params_bp(bp, salt, iv);
4411 } else {
4412 BP_SET_CRYPT(bp, B_TRUE);
4413 }
4414
4415 /* Perform the encryption. This should not fail */
4416 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
4417 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
4418 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
4419
4420 /* encode encryption metadata into the bp */
4421 if (ot == DMU_OT_INTENT_LOG) {
4422 /*
4423 * ZIL blocks store the MAC in the embedded checksum, so the
4424 * transform must always be applied.
4425 */
4426 zio_crypt_encode_mac_zil(enc_buf, mac);
4427 zio_push_transform(zio, eabd, psize, psize, NULL);
4428 } else {
4429 BP_SET_CRYPT(bp, B_TRUE);
4430 zio_crypt_encode_params_bp(bp, salt, iv);
4431 zio_crypt_encode_mac_bp(bp, mac);
4432
4433 if (no_crypt) {
4434 ASSERT3U(ot, ==, DMU_OT_DNODE);
4435 abd_free(eabd);
4436 } else {
4437 zio_push_transform(zio, eabd, psize, psize, NULL);
4438 }
4439 }
4440
4441 return (zio);
4442 }
4443
4444 /*
4445 * ==========================================================================
4446 * Generate and verify checksums
4447 * ==========================================================================
4448 */
4449 static zio_t *
zio_checksum_generate(zio_t * zio)4450 zio_checksum_generate(zio_t *zio)
4451 {
4452 blkptr_t *bp = zio->io_bp;
4453 enum zio_checksum checksum;
4454
4455 if (bp == NULL) {
4456 /*
4457 * This is zio_write_phys().
4458 * We're either generating a label checksum, or none at all.
4459 */
4460 checksum = zio->io_prop.zp_checksum;
4461
4462 if (checksum == ZIO_CHECKSUM_OFF)
4463 return (zio);
4464
4465 ASSERT(checksum == ZIO_CHECKSUM_LABEL);
4466 } else {
4467 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
4468 ASSERT(!IO_IS_ALLOCATING(zio));
4469 checksum = ZIO_CHECKSUM_GANG_HEADER;
4470 } else {
4471 checksum = BP_GET_CHECKSUM(bp);
4472 }
4473 }
4474
4475 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
4476
4477 return (zio);
4478 }
4479
4480 static zio_t *
zio_checksum_verify(zio_t * zio)4481 zio_checksum_verify(zio_t *zio)
4482 {
4483 zio_bad_cksum_t info;
4484 blkptr_t *bp = zio->io_bp;
4485 int error;
4486
4487 ASSERT(zio->io_vd != NULL);
4488
4489 if (bp == NULL) {
4490 /*
4491 * This is zio_read_phys().
4492 * We're either verifying a label checksum, or nothing at all.
4493 */
4494 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
4495 return (zio);
4496
4497 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
4498 }
4499
4500 if ((error = zio_checksum_error(zio, &info)) != 0) {
4501 zio->io_error = error;
4502 if (error == ECKSUM &&
4503 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
4504 mutex_enter(&zio->io_vd->vdev_stat_lock);
4505 zio->io_vd->vdev_stat.vs_checksum_errors++;
4506 mutex_exit(&zio->io_vd->vdev_stat_lock);
4507 (void) zfs_ereport_start_checksum(zio->io_spa,
4508 zio->io_vd, &zio->io_bookmark, zio,
4509 zio->io_offset, zio->io_size, &info);
4510 }
4511 }
4512
4513 return (zio);
4514 }
4515
4516 /*
4517 * Called by RAID-Z to ensure we don't compute the checksum twice.
4518 */
4519 void
zio_checksum_verified(zio_t * zio)4520 zio_checksum_verified(zio_t *zio)
4521 {
4522 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
4523 }
4524
4525 /*
4526 * ==========================================================================
4527 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
4528 * An error of 0 indicates success. ENXIO indicates whole-device failure,
4529 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
4530 * indicate errors that are specific to one I/O, and most likely permanent.
4531 * Any other error is presumed to be worse because we weren't expecting it.
4532 * ==========================================================================
4533 */
4534 int
zio_worst_error(int e1,int e2)4535 zio_worst_error(int e1, int e2)
4536 {
4537 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
4538 int r1, r2;
4539
4540 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
4541 if (e1 == zio_error_rank[r1])
4542 break;
4543
4544 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
4545 if (e2 == zio_error_rank[r2])
4546 break;
4547
4548 return (r1 > r2 ? e1 : e2);
4549 }
4550
4551 /*
4552 * ==========================================================================
4553 * I/O completion
4554 * ==========================================================================
4555 */
4556 static zio_t *
zio_ready(zio_t * zio)4557 zio_ready(zio_t *zio)
4558 {
4559 blkptr_t *bp = zio->io_bp;
4560 zio_t *pio, *pio_next;
4561 zio_link_t *zl = NULL;
4562
4563 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
4564 ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
4565 return (NULL);
4566 }
4567
4568 if (zio->io_ready) {
4569 ASSERT(IO_IS_ALLOCATING(zio));
4570 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg ||
4571 BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
4572 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
4573
4574 zio->io_ready(zio);
4575 }
4576
4577 #ifdef ZFS_DEBUG
4578 if (bp != NULL && bp != &zio->io_bp_copy)
4579 zio->io_bp_copy = *bp;
4580 #endif
4581
4582 if (zio->io_error != 0) {
4583 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4584
4585 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4586 ASSERT(IO_IS_ALLOCATING(zio));
4587 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
4588 ASSERT(zio->io_metaslab_class != NULL);
4589 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4590
4591 /*
4592 * We were unable to allocate anything, unreserve and
4593 * issue the next I/O to allocate.
4594 */
4595 metaslab_class_throttle_unreserve(
4596 zio->io_metaslab_class, zio->io_prop.zp_copies,
4597 zio->io_allocator, zio);
4598 zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
4599 }
4600 }
4601
4602 mutex_enter(&zio->io_lock);
4603 zio->io_state[ZIO_WAIT_READY] = 1;
4604 pio = zio_walk_parents(zio, &zl);
4605 mutex_exit(&zio->io_lock);
4606
4607 /*
4608 * As we notify zio's parents, new parents could be added.
4609 * New parents go to the head of zio's io_parent_list, however,
4610 * so we will (correctly) not notify them. The remainder of zio's
4611 * io_parent_list, from 'pio_next' onward, cannot change because
4612 * all parents must wait for us to be done before they can be done.
4613 */
4614 for (; pio != NULL; pio = pio_next) {
4615 pio_next = zio_walk_parents(zio, &zl);
4616 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
4617 }
4618
4619 if (zio->io_flags & ZIO_FLAG_NODATA) {
4620 if (bp != NULL && BP_IS_GANG(bp)) {
4621 zio->io_flags &= ~ZIO_FLAG_NODATA;
4622 } else {
4623 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
4624 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
4625 }
4626 }
4627
4628 if (zio_injection_enabled &&
4629 zio->io_spa->spa_syncing_txg == zio->io_txg)
4630 zio_handle_ignored_writes(zio);
4631
4632 return (zio);
4633 }
4634
4635 /*
4636 * Update the allocation throttle accounting.
4637 */
4638 static void
zio_dva_throttle_done(zio_t * zio)4639 zio_dva_throttle_done(zio_t *zio)
4640 {
4641 zio_t *lio __maybe_unused = zio->io_logical;
4642 zio_t *pio = zio_unique_parent(zio);
4643 vdev_t *vd = zio->io_vd;
4644 int flags = METASLAB_ASYNC_ALLOC;
4645
4646 ASSERT3P(zio->io_bp, !=, NULL);
4647 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
4648 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
4649 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
4650 ASSERT(vd != NULL);
4651 ASSERT3P(vd, ==, vd->vdev_top);
4652 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
4653 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4654 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
4655 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
4656 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
4657
4658 /*
4659 * Parents of gang children can have two flavors -- ones that
4660 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
4661 * and ones that allocated the constituent blocks. The allocation
4662 * throttle needs to know the allocating parent zio so we must find
4663 * it here.
4664 */
4665 if (pio->io_child_type == ZIO_CHILD_GANG) {
4666 /*
4667 * If our parent is a rewrite gang child then our grandparent
4668 * would have been the one that performed the allocation.
4669 */
4670 if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
4671 pio = zio_unique_parent(pio);
4672 flags |= METASLAB_GANG_CHILD;
4673 }
4674
4675 ASSERT(IO_IS_ALLOCATING(pio));
4676 ASSERT(ZIO_HAS_ALLOCATOR(pio));
4677 ASSERT3P(zio, !=, zio->io_logical);
4678 ASSERT(zio->io_logical != NULL);
4679 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4680 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
4681 ASSERT(zio->io_metaslab_class != NULL);
4682
4683 mutex_enter(&pio->io_lock);
4684 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
4685 pio->io_allocator, B_TRUE);
4686 mutex_exit(&pio->io_lock);
4687
4688 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
4689 pio->io_allocator, pio);
4690
4691 /*
4692 * Call into the pipeline to see if there is more work that
4693 * needs to be done. If there is work to be done it will be
4694 * dispatched to another taskq thread.
4695 */
4696 zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
4697 }
4698
4699 static zio_t *
zio_done(zio_t * zio)4700 zio_done(zio_t *zio)
4701 {
4702 /*
4703 * Always attempt to keep stack usage minimal here since
4704 * we can be called recursively up to 19 levels deep.
4705 */
4706 const uint64_t psize = zio->io_size;
4707 zio_t *pio, *pio_next;
4708 zio_link_t *zl = NULL;
4709
4710 /*
4711 * If our children haven't all completed,
4712 * wait for them and then repeat this pipeline stage.
4713 */
4714 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
4715 return (NULL);
4716 }
4717
4718 /*
4719 * If the allocation throttle is enabled, then update the accounting.
4720 * We only track child I/Os that are part of an allocating async
4721 * write. We must do this since the allocation is performed
4722 * by the logical I/O but the actual write is done by child I/Os.
4723 */
4724 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
4725 zio->io_child_type == ZIO_CHILD_VDEV) {
4726 ASSERT(zio->io_metaslab_class != NULL);
4727 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
4728 zio_dva_throttle_done(zio);
4729 }
4730
4731 /*
4732 * If the allocation throttle is enabled, verify that
4733 * we have decremented the refcounts for every I/O that was throttled.
4734 */
4735 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4736 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4737 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
4738 ASSERT(zio->io_bp != NULL);
4739 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4740
4741 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
4742 zio->io_allocator);
4743 VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
4744 mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
4745 }
4746
4747
4748 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
4749 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
4750 ASSERT(zio->io_children[c][w] == 0);
4751
4752 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
4753 ASSERT(zio->io_bp->blk_pad[0] == 0);
4754 ASSERT(zio->io_bp->blk_pad[1] == 0);
4755 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
4756 sizeof (blkptr_t)) == 0 ||
4757 (zio->io_bp == zio_unique_parent(zio)->io_bp));
4758 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
4759 zio->io_bp_override == NULL &&
4760 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
4761 ASSERT3U(zio->io_prop.zp_copies, <=,
4762 BP_GET_NDVAS(zio->io_bp));
4763 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
4764 (BP_COUNT_GANG(zio->io_bp) ==
4765 BP_GET_NDVAS(zio->io_bp)));
4766 }
4767 if (zio->io_flags & ZIO_FLAG_NOPWRITE)
4768 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4769 }
4770
4771 /*
4772 * If there were child vdev/gang/ddt errors, they apply to us now.
4773 */
4774 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
4775 zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
4776 zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
4777
4778 /*
4779 * If the I/O on the transformed data was successful, generate any
4780 * checksum reports now while we still have the transformed data.
4781 */
4782 if (zio->io_error == 0) {
4783 while (zio->io_cksum_report != NULL) {
4784 zio_cksum_report_t *zcr = zio->io_cksum_report;
4785 uint64_t align = zcr->zcr_align;
4786 uint64_t asize = P2ROUNDUP(psize, align);
4787 abd_t *adata = zio->io_abd;
4788
4789 if (adata != NULL && asize != psize) {
4790 adata = abd_alloc(asize, B_TRUE);
4791 abd_copy(adata, zio->io_abd, psize);
4792 abd_zero_off(adata, psize, asize - psize);
4793 }
4794
4795 zio->io_cksum_report = zcr->zcr_next;
4796 zcr->zcr_next = NULL;
4797 zcr->zcr_finish(zcr, adata);
4798 zfs_ereport_free_checksum(zcr);
4799
4800 if (adata != NULL && asize != psize)
4801 abd_free(adata);
4802 }
4803 }
4804
4805 zio_pop_transforms(zio); /* note: may set zio->io_error */
4806
4807 vdev_stat_update(zio, psize);
4808
4809 /*
4810 * If this I/O is attached to a particular vdev is slow, exceeding
4811 * 30 seconds to complete, post an error described the I/O delay.
4812 * We ignore these errors if the device is currently unavailable.
4813 */
4814 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
4815 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
4816 /*
4817 * We want to only increment our slow IO counters if
4818 * the IO is valid (i.e. not if the drive is removed).
4819 *
4820 * zfs_ereport_post() will also do these checks, but
4821 * it can also ratelimit and have other failures, so we
4822 * need to increment the slow_io counters independent
4823 * of it.
4824 */
4825 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
4826 zio->io_spa, zio->io_vd, zio)) {
4827 mutex_enter(&zio->io_vd->vdev_stat_lock);
4828 zio->io_vd->vdev_stat.vs_slow_ios++;
4829 mutex_exit(&zio->io_vd->vdev_stat_lock);
4830
4831 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
4832 zio->io_spa, zio->io_vd, &zio->io_bookmark,
4833 zio, 0);
4834 }
4835 }
4836 }
4837
4838 if (zio->io_error) {
4839 /*
4840 * If this I/O is attached to a particular vdev,
4841 * generate an error message describing the I/O failure
4842 * at the block level. We ignore these errors if the
4843 * device is currently unavailable.
4844 */
4845 if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
4846 !vdev_is_dead(zio->io_vd)) {
4847 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
4848 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
4849 if (ret != EALREADY) {
4850 mutex_enter(&zio->io_vd->vdev_stat_lock);
4851 if (zio->io_type == ZIO_TYPE_READ)
4852 zio->io_vd->vdev_stat.vs_read_errors++;
4853 else if (zio->io_type == ZIO_TYPE_WRITE)
4854 zio->io_vd->vdev_stat.vs_write_errors++;
4855 mutex_exit(&zio->io_vd->vdev_stat_lock);
4856 }
4857 }
4858
4859 if ((zio->io_error == EIO || !(zio->io_flags &
4860 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
4861 zio == zio->io_logical) {
4862 /*
4863 * For logical I/O requests, tell the SPA to log the
4864 * error and generate a logical data ereport.
4865 */
4866 spa_log_error(zio->io_spa, &zio->io_bookmark,
4867 BP_GET_LOGICAL_BIRTH(zio->io_bp));
4868 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
4869 zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
4870 }
4871 }
4872
4873 if (zio->io_error && zio == zio->io_logical) {
4874 /*
4875 * Determine whether zio should be reexecuted. This will
4876 * propagate all the way to the root via zio_notify_parent().
4877 */
4878 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
4879 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
4880
4881 if (IO_IS_ALLOCATING(zio) &&
4882 !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
4883 if (zio->io_error != ENOSPC)
4884 zio->io_reexecute |= ZIO_REEXECUTE_NOW;
4885 else
4886 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4887 }
4888
4889 if ((zio->io_type == ZIO_TYPE_READ ||
4890 zio->io_type == ZIO_TYPE_FREE) &&
4891 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
4892 zio->io_error == ENXIO &&
4893 spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
4894 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
4895 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4896
4897 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
4898 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4899
4900 /*
4901 * Here is a possibly good place to attempt to do
4902 * either combinatorial reconstruction or error correction
4903 * based on checksums. It also might be a good place
4904 * to send out preliminary ereports before we suspend
4905 * processing.
4906 */
4907 }
4908
4909 /*
4910 * If there were logical child errors, they apply to us now.
4911 * We defer this until now to avoid conflating logical child
4912 * errors with errors that happened to the zio itself when
4913 * updating vdev stats and reporting FMA events above.
4914 */
4915 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
4916
4917 if ((zio->io_error || zio->io_reexecute) &&
4918 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
4919 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
4920 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
4921
4922 zio_gang_tree_free(&zio->io_gang_tree);
4923
4924 /*
4925 * Godfather I/Os should never suspend.
4926 */
4927 if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
4928 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
4929 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
4930
4931 if (zio->io_reexecute) {
4932 /*
4933 * This is a logical I/O that wants to reexecute.
4934 *
4935 * Reexecute is top-down. When an i/o fails, if it's not
4936 * the root, it simply notifies its parent and sticks around.
4937 * The parent, seeing that it still has children in zio_done(),
4938 * does the same. This percolates all the way up to the root.
4939 * The root i/o will reexecute or suspend the entire tree.
4940 *
4941 * This approach ensures that zio_reexecute() honors
4942 * all the original i/o dependency relationships, e.g.
4943 * parents not executing until children are ready.
4944 */
4945 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
4946
4947 zio->io_gang_leader = NULL;
4948
4949 mutex_enter(&zio->io_lock);
4950 zio->io_state[ZIO_WAIT_DONE] = 1;
4951 mutex_exit(&zio->io_lock);
4952
4953 /*
4954 * "The Godfather" I/O monitors its children but is
4955 * not a true parent to them. It will track them through
4956 * the pipeline but severs its ties whenever they get into
4957 * trouble (e.g. suspended). This allows "The Godfather"
4958 * I/O to return status without blocking.
4959 */
4960 zl = NULL;
4961 for (pio = zio_walk_parents(zio, &zl); pio != NULL;
4962 pio = pio_next) {
4963 zio_link_t *remove_zl = zl;
4964 pio_next = zio_walk_parents(zio, &zl);
4965
4966 if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
4967 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
4968 zio_remove_child(pio, zio, remove_zl);
4969 /*
4970 * This is a rare code path, so we don't
4971 * bother with "next_to_execute".
4972 */
4973 zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
4974 NULL);
4975 }
4976 }
4977
4978 if ((pio = zio_unique_parent(zio)) != NULL) {
4979 /*
4980 * We're not a root i/o, so there's nothing to do
4981 * but notify our parent. Don't propagate errors
4982 * upward since we haven't permanently failed yet.
4983 */
4984 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
4985 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
4986 /*
4987 * This is a rare code path, so we don't bother with
4988 * "next_to_execute".
4989 */
4990 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
4991 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
4992 /*
4993 * We'd fail again if we reexecuted now, so suspend
4994 * until conditions improve (e.g. device comes online).
4995 */
4996 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
4997 } else {
4998 /*
4999 * Reexecution is potentially a huge amount of work.
5000 * Hand it off to the otherwise-unused claim taskq.
5001 */
5002 spa_taskq_dispatch(zio->io_spa,
5003 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
5004 zio_reexecute, zio, B_FALSE);
5005 }
5006 return (NULL);
5007 }
5008
5009 ASSERT(list_is_empty(&zio->io_child_list));
5010 ASSERT(zio->io_reexecute == 0);
5011 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
5012
5013 /*
5014 * Report any checksum errors, since the I/O is complete.
5015 */
5016 while (zio->io_cksum_report != NULL) {
5017 zio_cksum_report_t *zcr = zio->io_cksum_report;
5018 zio->io_cksum_report = zcr->zcr_next;
5019 zcr->zcr_next = NULL;
5020 zcr->zcr_finish(zcr, NULL);
5021 zfs_ereport_free_checksum(zcr);
5022 }
5023
5024 /*
5025 * It is the responsibility of the done callback to ensure that this
5026 * particular zio is no longer discoverable for adoption, and as
5027 * such, cannot acquire any new parents.
5028 */
5029 if (zio->io_done)
5030 zio->io_done(zio);
5031
5032 mutex_enter(&zio->io_lock);
5033 zio->io_state[ZIO_WAIT_DONE] = 1;
5034 mutex_exit(&zio->io_lock);
5035
5036 /*
5037 * We are done executing this zio. We may want to execute a parent
5038 * next. See the comment in zio_notify_parent().
5039 */
5040 zio_t *next_to_execute = NULL;
5041 zl = NULL;
5042 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
5043 zio_link_t *remove_zl = zl;
5044 pio_next = zio_walk_parents(zio, &zl);
5045 zio_remove_child(pio, zio, remove_zl);
5046 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
5047 }
5048
5049 if (zio->io_waiter != NULL) {
5050 mutex_enter(&zio->io_lock);
5051 zio->io_executor = NULL;
5052 cv_broadcast(&zio->io_cv);
5053 mutex_exit(&zio->io_lock);
5054 } else {
5055 zio_destroy(zio);
5056 }
5057
5058 return (next_to_execute);
5059 }
5060
5061 /*
5062 * ==========================================================================
5063 * I/O pipeline definition
5064 * ==========================================================================
5065 */
5066 static zio_pipe_stage_t *zio_pipeline[] = {
5067 NULL,
5068 zio_read_bp_init,
5069 zio_write_bp_init,
5070 zio_free_bp_init,
5071 zio_issue_async,
5072 zio_write_compress,
5073 zio_encrypt,
5074 zio_checksum_generate,
5075 zio_nop_write,
5076 zio_brt_free,
5077 zio_ddt_read_start,
5078 zio_ddt_read_done,
5079 zio_ddt_write,
5080 zio_ddt_free,
5081 zio_gang_assemble,
5082 zio_gang_issue,
5083 zio_dva_throttle,
5084 zio_dva_allocate,
5085 zio_dva_free,
5086 zio_dva_claim,
5087 zio_ready,
5088 zio_vdev_io_start,
5089 zio_vdev_io_done,
5090 zio_vdev_io_assess,
5091 zio_checksum_verify,
5092 zio_done
5093 };
5094
5095
5096
5097
5098 /*
5099 * Compare two zbookmark_phys_t's to see which we would reach first in a
5100 * pre-order traversal of the object tree.
5101 *
5102 * This is simple in every case aside from the meta-dnode object. For all other
5103 * objects, we traverse them in order (object 1 before object 2, and so on).
5104 * However, all of these objects are traversed while traversing object 0, since
5105 * the data it points to is the list of objects. Thus, we need to convert to a
5106 * canonical representation so we can compare meta-dnode bookmarks to
5107 * non-meta-dnode bookmarks.
5108 *
5109 * We do this by calculating "equivalents" for each field of the zbookmark.
5110 * zbookmarks outside of the meta-dnode use their own object and level, and
5111 * calculate the level 0 equivalent (the first L0 blkid that is contained in the
5112 * blocks this bookmark refers to) by multiplying their blkid by their span
5113 * (the number of L0 blocks contained within one block at their level).
5114 * zbookmarks inside the meta-dnode calculate their object equivalent
5115 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
5116 * level + 1<<31 (any value larger than a level could ever be) for their level.
5117 * This causes them to always compare before a bookmark in their object
5118 * equivalent, compare appropriately to bookmarks in other objects, and to
5119 * compare appropriately to other bookmarks in the meta-dnode.
5120 */
5121 int
zbookmark_compare(uint16_t dbss1,uint8_t ibs1,uint16_t dbss2,uint8_t ibs2,const zbookmark_phys_t * zb1,const zbookmark_phys_t * zb2)5122 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
5123 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
5124 {
5125 /*
5126 * These variables represent the "equivalent" values for the zbookmark,
5127 * after converting zbookmarks inside the meta dnode to their
5128 * normal-object equivalents.
5129 */
5130 uint64_t zb1obj, zb2obj;
5131 uint64_t zb1L0, zb2L0;
5132 uint64_t zb1level, zb2level;
5133
5134 if (zb1->zb_object == zb2->zb_object &&
5135 zb1->zb_level == zb2->zb_level &&
5136 zb1->zb_blkid == zb2->zb_blkid)
5137 return (0);
5138
5139 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
5140 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
5141
5142 /*
5143 * BP_SPANB calculates the span in blocks.
5144 */
5145 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
5146 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
5147
5148 if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
5149 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5150 zb1L0 = 0;
5151 zb1level = zb1->zb_level + COMPARE_META_LEVEL;
5152 } else {
5153 zb1obj = zb1->zb_object;
5154 zb1level = zb1->zb_level;
5155 }
5156
5157 if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
5158 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5159 zb2L0 = 0;
5160 zb2level = zb2->zb_level + COMPARE_META_LEVEL;
5161 } else {
5162 zb2obj = zb2->zb_object;
5163 zb2level = zb2->zb_level;
5164 }
5165
5166 /* Now that we have a canonical representation, do the comparison. */
5167 if (zb1obj != zb2obj)
5168 return (zb1obj < zb2obj ? -1 : 1);
5169 else if (zb1L0 != zb2L0)
5170 return (zb1L0 < zb2L0 ? -1 : 1);
5171 else if (zb1level != zb2level)
5172 return (zb1level > zb2level ? -1 : 1);
5173 /*
5174 * This can (theoretically) happen if the bookmarks have the same object
5175 * and level, but different blkids, if the block sizes are not the same.
5176 * There is presently no way to change the indirect block sizes
5177 */
5178 return (0);
5179 }
5180
5181 /*
5182 * This function checks the following: given that last_block is the place that
5183 * our traversal stopped last time, does that guarantee that we've visited
5184 * every node under subtree_root? Therefore, we can't just use the raw output
5185 * of zbookmark_compare. We have to pass in a modified version of
5186 * subtree_root; by incrementing the block id, and then checking whether
5187 * last_block is before or equal to that, we can tell whether or not having
5188 * visited last_block implies that all of subtree_root's children have been
5189 * visited.
5190 */
5191 boolean_t
zbookmark_subtree_completed(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)5192 zbookmark_subtree_completed(const dnode_phys_t *dnp,
5193 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5194 {
5195 zbookmark_phys_t mod_zb = *subtree_root;
5196 mod_zb.zb_blkid++;
5197 ASSERT0(last_block->zb_level);
5198
5199 /* The objset_phys_t isn't before anything. */
5200 if (dnp == NULL)
5201 return (B_FALSE);
5202
5203 /*
5204 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
5205 * data block size in sectors, because that variable is only used if
5206 * the bookmark refers to a block in the meta-dnode. Since we don't
5207 * know without examining it what object it refers to, and there's no
5208 * harm in passing in this value in other cases, we always pass it in.
5209 *
5210 * We pass in 0 for the indirect block size shift because zb2 must be
5211 * level 0. The indirect block size is only used to calculate the span
5212 * of the bookmark, but since the bookmark must be level 0, the span is
5213 * always 1, so the math works out.
5214 *
5215 * If you make changes to how the zbookmark_compare code works, be sure
5216 * to make sure that this code still works afterwards.
5217 */
5218 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5219 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
5220 last_block) <= 0);
5221 }
5222
5223 /*
5224 * This function is similar to zbookmark_subtree_completed(), but returns true
5225 * if subtree_root is equal or ahead of last_block, i.e. still to be done.
5226 */
5227 boolean_t
zbookmark_subtree_tbd(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)5228 zbookmark_subtree_tbd(const dnode_phys_t *dnp,
5229 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5230 {
5231 ASSERT0(last_block->zb_level);
5232 if (dnp == NULL)
5233 return (B_FALSE);
5234 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5235 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
5236 last_block) >= 0);
5237 }
5238
5239 EXPORT_SYMBOL(zio_type_name);
5240 EXPORT_SYMBOL(zio_buf_alloc);
5241 EXPORT_SYMBOL(zio_data_buf_alloc);
5242 EXPORT_SYMBOL(zio_buf_free);
5243 EXPORT_SYMBOL(zio_data_buf_free);
5244
5245 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
5246 "Max I/O completion time (milliseconds) before marking it as slow");
5247
5248 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
5249 "Prioritize requeued I/O");
5250
5251 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
5252 "Defer frees starting in this pass");
5253
5254 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
5255 "Don't compress starting in this pass");
5256
5257 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
5258 "Rewrite new bps starting in this pass");
5259
5260 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
5261 "Throttle block allocations in the ZIO pipeline");
5262
5263 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
5264 "Log all slow ZIOs, not just those with vdevs");
5265