1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2021 by Delphix. All rights reserved.
25 * Copyright 2017 Nexenta Systems, Inc.
26 * Copyright (c) 2014 Integros [integros.com]
27 * Copyright 2016 Toomas Soome <tsoome@me.com>
28 * Copyright 2017 Joyent, Inc.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2019, Datto Inc. All rights reserved.
31 * Copyright (c) 2021, Klara Inc.
32 * Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP.
33 */
34
35 #include <sys/zfs_context.h>
36 #include <sys/fm/fs/zfs.h>
37 #include <sys/spa.h>
38 #include <sys/spa_impl.h>
39 #include <sys/bpobj.h>
40 #include <sys/dmu.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/dsl_dir.h>
43 #include <sys/vdev_impl.h>
44 #include <sys/vdev_rebuild.h>
45 #include <sys/vdev_draid.h>
46 #include <sys/uberblock_impl.h>
47 #include <sys/metaslab.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/space_map.h>
50 #include <sys/space_reftree.h>
51 #include <sys/zio.h>
52 #include <sys/zap.h>
53 #include <sys/fs/zfs.h>
54 #include <sys/arc.h>
55 #include <sys/zil.h>
56 #include <sys/dsl_scan.h>
57 #include <sys/vdev_raidz.h>
58 #include <sys/abd.h>
59 #include <sys/vdev_initialize.h>
60 #include <sys/vdev_trim.h>
61 #include <sys/vdev_raidz.h>
62 #include <sys/zvol.h>
63 #include <sys/zfs_ratelimit.h>
64 #include "zfs_prop.h"
65
66 /*
67 * One metaslab from each (normal-class) vdev is used by the ZIL. These are
68 * called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
69 * part of the spa_embedded_log_class. The metaslab with the most free space
70 * in each vdev is selected for this purpose when the pool is opened (or a
71 * vdev is added). See vdev_metaslab_init().
72 *
73 * Log blocks can be allocated from the following locations. Each one is tried
74 * in order until the allocation succeeds:
75 * 1. dedicated log vdevs, aka "slog" (spa_log_class)
76 * 2. embedded slog metaslabs (spa_embedded_log_class)
77 * 3. other metaslabs in normal vdevs (spa_normal_class)
78 *
79 * zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
80 * than this number of metaslabs in the vdev. This ensures that we don't set
81 * aside an unreasonable amount of space for the ZIL. If set to less than
82 * 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
83 * (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
84 */
85 static uint_t zfs_embedded_slog_min_ms = 64;
86
87 /* default target for number of metaslabs per top-level vdev */
88 static uint_t zfs_vdev_default_ms_count = 200;
89
90 /* minimum number of metaslabs per top-level vdev */
91 static uint_t zfs_vdev_min_ms_count = 16;
92
93 /* practical upper limit of total metaslabs per top-level vdev */
94 static uint_t zfs_vdev_ms_count_limit = 1ULL << 17;
95
96 /* lower limit for metaslab size (512M) */
97 static uint_t zfs_vdev_default_ms_shift = 29;
98
99 /* upper limit for metaslab size (16G) */
100 static uint_t zfs_vdev_max_ms_shift = 34;
101
102 int vdev_validate_skip = B_FALSE;
103
104 /*
105 * Since the DTL space map of a vdev is not expected to have a lot of
106 * entries, we default its block size to 4K.
107 */
108 int zfs_vdev_dtl_sm_blksz = (1 << 12);
109
110 /*
111 * Rate limit slow IO (delay) events to this many per second.
112 */
113 static unsigned int zfs_slow_io_events_per_second = 20;
114
115 /*
116 * Rate limit deadman "hung IO" events to this many per second.
117 */
118 static unsigned int zfs_deadman_events_per_second = 1;
119
120 /*
121 * Rate limit checksum events after this many checksum errors per second.
122 */
123 static unsigned int zfs_checksum_events_per_second = 20;
124
125 /*
126 * Ignore errors during scrub/resilver. Allows to work around resilver
127 * upon import when there are pool errors.
128 */
129 static int zfs_scan_ignore_errors = 0;
130
131 /*
132 * vdev-wide space maps that have lots of entries written to them at
133 * the end of each transaction can benefit from a higher I/O bandwidth
134 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
135 */
136 int zfs_vdev_standard_sm_blksz = (1 << 17);
137
138 /*
139 * Tunable parameter for debugging or performance analysis. Setting this
140 * will cause pool corruption on power loss if a volatile out-of-order
141 * write cache is enabled.
142 */
143 int zfs_nocacheflush = 0;
144
145 /*
146 * Maximum and minimum ashift values that can be automatically set based on
147 * vdev's physical ashift (disk's physical sector size). While ASHIFT_MAX
148 * is higher than the maximum value, it is intentionally limited here to not
149 * excessively impact pool space efficiency. Higher ashift values may still
150 * be forced by vdev logical ashift or by user via ashift property, but won't
151 * be set automatically as a performance optimization.
152 */
153 uint_t zfs_vdev_max_auto_ashift = 14;
154 uint_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
155
156 void
vdev_dbgmsg(vdev_t * vd,const char * fmt,...)157 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
158 {
159 va_list adx;
160 char buf[256];
161
162 va_start(adx, fmt);
163 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
164 va_end(adx);
165
166 if (vd->vdev_path != NULL) {
167 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
168 vd->vdev_path, buf);
169 } else {
170 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
171 vd->vdev_ops->vdev_op_type,
172 (u_longlong_t)vd->vdev_id,
173 (u_longlong_t)vd->vdev_guid, buf);
174 }
175 }
176
177 void
vdev_dbgmsg_print_tree(vdev_t * vd,int indent)178 vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
179 {
180 char state[20];
181
182 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
183 zfs_dbgmsg("%*svdev %llu: %s", indent, "",
184 (u_longlong_t)vd->vdev_id,
185 vd->vdev_ops->vdev_op_type);
186 return;
187 }
188
189 switch (vd->vdev_state) {
190 case VDEV_STATE_UNKNOWN:
191 (void) snprintf(state, sizeof (state), "unknown");
192 break;
193 case VDEV_STATE_CLOSED:
194 (void) snprintf(state, sizeof (state), "closed");
195 break;
196 case VDEV_STATE_OFFLINE:
197 (void) snprintf(state, sizeof (state), "offline");
198 break;
199 case VDEV_STATE_REMOVED:
200 (void) snprintf(state, sizeof (state), "removed");
201 break;
202 case VDEV_STATE_CANT_OPEN:
203 (void) snprintf(state, sizeof (state), "can't open");
204 break;
205 case VDEV_STATE_FAULTED:
206 (void) snprintf(state, sizeof (state), "faulted");
207 break;
208 case VDEV_STATE_DEGRADED:
209 (void) snprintf(state, sizeof (state), "degraded");
210 break;
211 case VDEV_STATE_HEALTHY:
212 (void) snprintf(state, sizeof (state), "healthy");
213 break;
214 default:
215 (void) snprintf(state, sizeof (state), "<state %u>",
216 (uint_t)vd->vdev_state);
217 }
218
219 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
220 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
221 vd->vdev_islog ? " (log)" : "",
222 (u_longlong_t)vd->vdev_guid,
223 vd->vdev_path ? vd->vdev_path : "N/A", state);
224
225 for (uint64_t i = 0; i < vd->vdev_children; i++)
226 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
227 }
228
229 /*
230 * Virtual device management.
231 */
232
233 static vdev_ops_t *const vdev_ops_table[] = {
234 &vdev_root_ops,
235 &vdev_raidz_ops,
236 &vdev_draid_ops,
237 &vdev_draid_spare_ops,
238 &vdev_mirror_ops,
239 &vdev_replacing_ops,
240 &vdev_spare_ops,
241 &vdev_disk_ops,
242 &vdev_file_ops,
243 &vdev_missing_ops,
244 &vdev_hole_ops,
245 &vdev_indirect_ops,
246 NULL
247 };
248
249 /*
250 * Given a vdev type, return the appropriate ops vector.
251 */
252 static vdev_ops_t *
vdev_getops(const char * type)253 vdev_getops(const char *type)
254 {
255 vdev_ops_t *ops, *const *opspp;
256
257 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
258 if (strcmp(ops->vdev_op_type, type) == 0)
259 break;
260
261 return (ops);
262 }
263
264 /*
265 * Given a vdev and a metaslab class, find which metaslab group we're
266 * interested in. All vdevs may belong to two different metaslab classes.
267 * Dedicated slog devices use only the primary metaslab group, rather than a
268 * separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
269 */
270 metaslab_group_t *
vdev_get_mg(vdev_t * vd,metaslab_class_t * mc)271 vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
272 {
273 if (mc == spa_embedded_log_class(vd->vdev_spa) &&
274 vd->vdev_log_mg != NULL)
275 return (vd->vdev_log_mg);
276 else
277 return (vd->vdev_mg);
278 }
279
280 void
vdev_default_xlate(vdev_t * vd,const range_seg64_t * logical_rs,range_seg64_t * physical_rs,range_seg64_t * remain_rs)281 vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
282 range_seg64_t *physical_rs, range_seg64_t *remain_rs)
283 {
284 (void) vd, (void) remain_rs;
285
286 physical_rs->rs_start = logical_rs->rs_start;
287 physical_rs->rs_end = logical_rs->rs_end;
288 }
289
290 /*
291 * Derive the enumerated allocation bias from string input.
292 * String origin is either the per-vdev zap or zpool(8).
293 */
294 static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char * bias)295 vdev_derive_alloc_bias(const char *bias)
296 {
297 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
298
299 if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
300 alloc_bias = VDEV_BIAS_LOG;
301 else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
302 alloc_bias = VDEV_BIAS_SPECIAL;
303 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
304 alloc_bias = VDEV_BIAS_DEDUP;
305
306 return (alloc_bias);
307 }
308
309 /*
310 * Default asize function: return the MAX of psize with the asize of
311 * all children. This is what's used by anything other than RAID-Z.
312 */
313 uint64_t
vdev_default_asize(vdev_t * vd,uint64_t psize,uint64_t txg)314 vdev_default_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
315 {
316 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
317 uint64_t csize;
318
319 for (int c = 0; c < vd->vdev_children; c++) {
320 csize = vdev_psize_to_asize_txg(vd->vdev_child[c], psize, txg);
321 asize = MAX(asize, csize);
322 }
323
324 return (asize);
325 }
326
327 uint64_t
vdev_default_min_asize(vdev_t * vd)328 vdev_default_min_asize(vdev_t *vd)
329 {
330 return (vd->vdev_min_asize);
331 }
332
333 /*
334 * Get the minimum allocatable size. We define the allocatable size as
335 * the vdev's asize rounded to the nearest metaslab. This allows us to
336 * replace or attach devices which don't have the same physical size but
337 * can still satisfy the same number of allocations.
338 */
339 uint64_t
vdev_get_min_asize(vdev_t * vd)340 vdev_get_min_asize(vdev_t *vd)
341 {
342 vdev_t *pvd = vd->vdev_parent;
343
344 /*
345 * If our parent is NULL (inactive spare or cache) or is the root,
346 * just return our own asize.
347 */
348 if (pvd == NULL)
349 return (vd->vdev_asize);
350
351 /*
352 * The top-level vdev just returns the allocatable size rounded
353 * to the nearest metaslab.
354 */
355 if (vd == vd->vdev_top)
356 return (P2ALIGN_TYPED(vd->vdev_asize, 1ULL << vd->vdev_ms_shift,
357 uint64_t));
358
359 return (pvd->vdev_ops->vdev_op_min_asize(pvd));
360 }
361
362 void
vdev_set_min_asize(vdev_t * vd)363 vdev_set_min_asize(vdev_t *vd)
364 {
365 vd->vdev_min_asize = vdev_get_min_asize(vd);
366
367 for (int c = 0; c < vd->vdev_children; c++)
368 vdev_set_min_asize(vd->vdev_child[c]);
369 }
370
371 /*
372 * Get the minimal allocation size for the top-level vdev.
373 */
374 uint64_t
vdev_get_min_alloc(vdev_t * vd)375 vdev_get_min_alloc(vdev_t *vd)
376 {
377 uint64_t min_alloc = 1ULL << vd->vdev_ashift;
378
379 if (vd->vdev_ops->vdev_op_min_alloc != NULL)
380 min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
381
382 return (min_alloc);
383 }
384
385 /*
386 * Get the parity level for a top-level vdev.
387 */
388 uint64_t
vdev_get_nparity(vdev_t * vd)389 vdev_get_nparity(vdev_t *vd)
390 {
391 uint64_t nparity = 0;
392
393 if (vd->vdev_ops->vdev_op_nparity != NULL)
394 nparity = vd->vdev_ops->vdev_op_nparity(vd);
395
396 return (nparity);
397 }
398
399 static int
vdev_prop_get_int(vdev_t * vd,vdev_prop_t prop,uint64_t * value)400 vdev_prop_get_int(vdev_t *vd, vdev_prop_t prop, uint64_t *value)
401 {
402 spa_t *spa = vd->vdev_spa;
403 objset_t *mos = spa->spa_meta_objset;
404 uint64_t objid;
405 int err;
406
407 if (vd->vdev_root_zap != 0) {
408 objid = vd->vdev_root_zap;
409 } else if (vd->vdev_top_zap != 0) {
410 objid = vd->vdev_top_zap;
411 } else if (vd->vdev_leaf_zap != 0) {
412 objid = vd->vdev_leaf_zap;
413 } else {
414 return (EINVAL);
415 }
416
417 err = zap_lookup(mos, objid, vdev_prop_to_name(prop),
418 sizeof (uint64_t), 1, value);
419
420 if (err == ENOENT)
421 *value = vdev_prop_default_numeric(prop);
422
423 return (err);
424 }
425
426 /*
427 * Get the number of data disks for a top-level vdev.
428 */
429 uint64_t
vdev_get_ndisks(vdev_t * vd)430 vdev_get_ndisks(vdev_t *vd)
431 {
432 uint64_t ndisks = 1;
433
434 if (vd->vdev_ops->vdev_op_ndisks != NULL)
435 ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
436
437 return (ndisks);
438 }
439
440 vdev_t *
vdev_lookup_top(spa_t * spa,uint64_t vdev)441 vdev_lookup_top(spa_t *spa, uint64_t vdev)
442 {
443 vdev_t *rvd = spa->spa_root_vdev;
444
445 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
446
447 if (vdev < rvd->vdev_children) {
448 ASSERT(rvd->vdev_child[vdev] != NULL);
449 return (rvd->vdev_child[vdev]);
450 }
451
452 return (NULL);
453 }
454
455 vdev_t *
vdev_lookup_by_guid(vdev_t * vd,uint64_t guid)456 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
457 {
458 vdev_t *mvd;
459
460 if (vd->vdev_guid == guid)
461 return (vd);
462
463 for (int c = 0; c < vd->vdev_children; c++)
464 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
465 NULL)
466 return (mvd);
467
468 return (NULL);
469 }
470
471 static int
vdev_count_leaves_impl(vdev_t * vd)472 vdev_count_leaves_impl(vdev_t *vd)
473 {
474 int n = 0;
475
476 if (vd->vdev_ops->vdev_op_leaf)
477 return (1);
478
479 for (int c = 0; c < vd->vdev_children; c++)
480 n += vdev_count_leaves_impl(vd->vdev_child[c]);
481
482 return (n);
483 }
484
485 int
vdev_count_leaves(spa_t * spa)486 vdev_count_leaves(spa_t *spa)
487 {
488 int rc;
489
490 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
491 rc = vdev_count_leaves_impl(spa->spa_root_vdev);
492 spa_config_exit(spa, SCL_VDEV, FTAG);
493
494 return (rc);
495 }
496
497 void
vdev_add_child(vdev_t * pvd,vdev_t * cvd)498 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
499 {
500 size_t oldsize, newsize;
501 uint64_t id = cvd->vdev_id;
502 vdev_t **newchild;
503
504 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
505 ASSERT(cvd->vdev_parent == NULL);
506
507 cvd->vdev_parent = pvd;
508
509 if (pvd == NULL)
510 return;
511
512 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
513
514 oldsize = pvd->vdev_children * sizeof (vdev_t *);
515 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
516 newsize = pvd->vdev_children * sizeof (vdev_t *);
517
518 newchild = kmem_alloc(newsize, KM_SLEEP);
519 if (pvd->vdev_child != NULL) {
520 memcpy(newchild, pvd->vdev_child, oldsize);
521 kmem_free(pvd->vdev_child, oldsize);
522 }
523
524 pvd->vdev_child = newchild;
525 pvd->vdev_child[id] = cvd;
526
527 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
528 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
529
530 /*
531 * Walk up all ancestors to update guid sum.
532 */
533 for (; pvd != NULL; pvd = pvd->vdev_parent)
534 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
535
536 if (cvd->vdev_ops->vdev_op_leaf) {
537 list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
538 cvd->vdev_spa->spa_leaf_list_gen++;
539 }
540 }
541
542 void
vdev_remove_child(vdev_t * pvd,vdev_t * cvd)543 vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
544 {
545 int c;
546 uint_t id = cvd->vdev_id;
547
548 ASSERT(cvd->vdev_parent == pvd);
549
550 if (pvd == NULL)
551 return;
552
553 ASSERT(id < pvd->vdev_children);
554 ASSERT(pvd->vdev_child[id] == cvd);
555
556 pvd->vdev_child[id] = NULL;
557 cvd->vdev_parent = NULL;
558
559 for (c = 0; c < pvd->vdev_children; c++)
560 if (pvd->vdev_child[c])
561 break;
562
563 if (c == pvd->vdev_children) {
564 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
565 pvd->vdev_child = NULL;
566 pvd->vdev_children = 0;
567 }
568
569 if (cvd->vdev_ops->vdev_op_leaf) {
570 spa_t *spa = cvd->vdev_spa;
571 list_remove(&spa->spa_leaf_list, cvd);
572 spa->spa_leaf_list_gen++;
573 }
574
575 /*
576 * Walk up all ancestors to update guid sum.
577 */
578 for (; pvd != NULL; pvd = pvd->vdev_parent)
579 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
580 }
581
582 /*
583 * Remove any holes in the child array.
584 */
585 void
vdev_compact_children(vdev_t * pvd)586 vdev_compact_children(vdev_t *pvd)
587 {
588 vdev_t **newchild, *cvd;
589 int oldc = pvd->vdev_children;
590 int newc;
591
592 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
593
594 if (oldc == 0)
595 return;
596
597 for (int c = newc = 0; c < oldc; c++)
598 if (pvd->vdev_child[c])
599 newc++;
600
601 if (newc > 0) {
602 newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
603
604 for (int c = newc = 0; c < oldc; c++) {
605 if ((cvd = pvd->vdev_child[c]) != NULL) {
606 newchild[newc] = cvd;
607 cvd->vdev_id = newc++;
608 }
609 }
610 } else {
611 newchild = NULL;
612 }
613
614 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
615 pvd->vdev_child = newchild;
616 pvd->vdev_children = newc;
617 }
618
619 /*
620 * Allocate and minimally initialize a vdev_t.
621 */
622 vdev_t *
vdev_alloc_common(spa_t * spa,uint_t id,uint64_t guid,vdev_ops_t * ops)623 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
624 {
625 vdev_t *vd;
626 vdev_indirect_config_t *vic;
627
628 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
629 vic = &vd->vdev_indirect_config;
630
631 if (spa->spa_root_vdev == NULL) {
632 ASSERT(ops == &vdev_root_ops);
633 spa->spa_root_vdev = vd;
634 spa->spa_load_guid = spa_generate_guid(NULL);
635 }
636
637 if (guid == 0 && ops != &vdev_hole_ops) {
638 if (spa->spa_root_vdev == vd) {
639 /*
640 * The root vdev's guid will also be the pool guid,
641 * which must be unique among all pools.
642 */
643 guid = spa_generate_guid(NULL);
644 } else {
645 /*
646 * Any other vdev's guid must be unique within the pool.
647 */
648 guid = spa_generate_guid(spa);
649 }
650 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
651 }
652
653 vd->vdev_spa = spa;
654 vd->vdev_id = id;
655 vd->vdev_guid = guid;
656 vd->vdev_guid_sum = guid;
657 vd->vdev_ops = ops;
658 vd->vdev_state = VDEV_STATE_CLOSED;
659 vd->vdev_ishole = (ops == &vdev_hole_ops);
660 vic->vic_prev_indirect_vdev = UINT64_MAX;
661
662 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
663 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
664 vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
665 0, 0);
666
667 /*
668 * Initialize rate limit structs for events. We rate limit ZIO delay
669 * and checksum events so that we don't overwhelm ZED with thousands
670 * of events when a disk is acting up.
671 */
672 zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
673 1);
674 zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_deadman_events_per_second,
675 1);
676 zfs_ratelimit_init(&vd->vdev_checksum_rl,
677 &zfs_checksum_events_per_second, 1);
678
679 /*
680 * Default Thresholds for tuning ZED
681 */
682 vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N);
683 vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T);
684 vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N);
685 vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T);
686 vd->vdev_slow_io_n = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_N);
687 vd->vdev_slow_io_t = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_T);
688
689 list_link_init(&vd->vdev_config_dirty_node);
690 list_link_init(&vd->vdev_state_dirty_node);
691 list_link_init(&vd->vdev_initialize_node);
692 list_link_init(&vd->vdev_leaf_node);
693 list_link_init(&vd->vdev_trim_node);
694
695 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
696 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
697 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
698 mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
699
700 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
701 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
702 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
703 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
704
705 mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
706 mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
707 mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
708 cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
709 cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
710 cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL);
711 cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
712
713 mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
714 cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
715
716 for (int t = 0; t < DTL_TYPES; t++) {
717 vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
718 0);
719 }
720
721 txg_list_create(&vd->vdev_ms_list, spa,
722 offsetof(struct metaslab, ms_txg_node));
723 txg_list_create(&vd->vdev_dtl_list, spa,
724 offsetof(struct vdev, vdev_dtl_node));
725 vd->vdev_stat.vs_timestamp = gethrtime();
726 vdev_queue_init(vd);
727
728 return (vd);
729 }
730
731 /*
732 * Allocate a new vdev. The 'alloctype' is used to control whether we are
733 * creating a new vdev or loading an existing one - the behavior is slightly
734 * different for each case.
735 */
736 int
vdev_alloc(spa_t * spa,vdev_t ** vdp,nvlist_t * nv,vdev_t * parent,uint_t id,int alloctype)737 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
738 int alloctype)
739 {
740 vdev_ops_t *ops;
741 const char *type;
742 uint64_t guid = 0, islog;
743 vdev_t *vd;
744 vdev_indirect_config_t *vic;
745 const char *tmp = NULL;
746 int rc;
747 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
748 boolean_t top_level = (parent && !parent->vdev_parent);
749
750 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
751
752 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
753 return (SET_ERROR(EINVAL));
754
755 if ((ops = vdev_getops(type)) == NULL)
756 return (SET_ERROR(EINVAL));
757
758 /*
759 * If this is a load, get the vdev guid from the nvlist.
760 * Otherwise, vdev_alloc_common() will generate one for us.
761 */
762 if (alloctype == VDEV_ALLOC_LOAD) {
763 uint64_t label_id;
764
765 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
766 label_id != id)
767 return (SET_ERROR(EINVAL));
768
769 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
770 return (SET_ERROR(EINVAL));
771 } else if (alloctype == VDEV_ALLOC_SPARE) {
772 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
773 return (SET_ERROR(EINVAL));
774 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
775 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
776 return (SET_ERROR(EINVAL));
777 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
778 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
779 return (SET_ERROR(EINVAL));
780 }
781
782 /*
783 * The first allocated vdev must be of type 'root'.
784 */
785 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
786 return (SET_ERROR(EINVAL));
787
788 /*
789 * Determine whether we're a log vdev.
790 */
791 islog = 0;
792 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
793 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
794 return (SET_ERROR(ENOTSUP));
795
796 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
797 return (SET_ERROR(ENOTSUP));
798
799 if (top_level && alloctype == VDEV_ALLOC_ADD) {
800 const char *bias;
801
802 /*
803 * If creating a top-level vdev, check for allocation
804 * classes input.
805 */
806 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
807 &bias) == 0) {
808 alloc_bias = vdev_derive_alloc_bias(bias);
809
810 /* spa_vdev_add() expects feature to be enabled */
811 if (spa->spa_load_state != SPA_LOAD_CREATE &&
812 !spa_feature_is_enabled(spa,
813 SPA_FEATURE_ALLOCATION_CLASSES)) {
814 return (SET_ERROR(ENOTSUP));
815 }
816 }
817
818 /* spa_vdev_add() expects feature to be enabled */
819 if (ops == &vdev_draid_ops &&
820 spa->spa_load_state != SPA_LOAD_CREATE &&
821 !spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
822 return (SET_ERROR(ENOTSUP));
823 }
824 }
825
826 /*
827 * Initialize the vdev specific data. This is done before calling
828 * vdev_alloc_common() since it may fail and this simplifies the
829 * error reporting and cleanup code paths.
830 */
831 void *tsd = NULL;
832 if (ops->vdev_op_init != NULL) {
833 rc = ops->vdev_op_init(spa, nv, &tsd);
834 if (rc != 0) {
835 return (rc);
836 }
837 }
838
839 vd = vdev_alloc_common(spa, id, guid, ops);
840 vd->vdev_tsd = tsd;
841 vd->vdev_islog = islog;
842
843 if (top_level && alloc_bias != VDEV_BIAS_NONE)
844 vd->vdev_alloc_bias = alloc_bias;
845
846 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tmp) == 0)
847 vd->vdev_path = spa_strdup(tmp);
848
849 /*
850 * ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
851 * fault on a vdev and want it to persist across imports (like with
852 * zpool offline -f).
853 */
854 rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
855 if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
856 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
857 vd->vdev_faulted = 1;
858 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
859 }
860
861 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &tmp) == 0)
862 vd->vdev_devid = spa_strdup(tmp);
863 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, &tmp) == 0)
864 vd->vdev_physpath = spa_strdup(tmp);
865
866 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
867 &tmp) == 0)
868 vd->vdev_enc_sysfs_path = spa_strdup(tmp);
869
870 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &tmp) == 0)
871 vd->vdev_fru = spa_strdup(tmp);
872
873 /*
874 * Set the whole_disk property. If it's not specified, leave the value
875 * as -1.
876 */
877 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
878 &vd->vdev_wholedisk) != 0)
879 vd->vdev_wholedisk = -1ULL;
880
881 vic = &vd->vdev_indirect_config;
882
883 ASSERT0(vic->vic_mapping_object);
884 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
885 &vic->vic_mapping_object);
886 ASSERT0(vic->vic_births_object);
887 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
888 &vic->vic_births_object);
889 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
890 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
891 &vic->vic_prev_indirect_vdev);
892
893 /*
894 * Look for the 'not present' flag. This will only be set if the device
895 * was not present at the time of import.
896 */
897 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
898 &vd->vdev_not_present);
899
900 /*
901 * Get the alignment requirement. Ignore pool ashift for vdev
902 * attach case.
903 */
904 if (alloctype != VDEV_ALLOC_ATTACH) {
905 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT,
906 &vd->vdev_ashift);
907 } else {
908 vd->vdev_attaching = B_TRUE;
909 }
910
911 /*
912 * Retrieve the vdev creation time.
913 */
914 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
915 &vd->vdev_crtxg);
916
917 if (vd->vdev_ops == &vdev_root_ops &&
918 (alloctype == VDEV_ALLOC_LOAD ||
919 alloctype == VDEV_ALLOC_SPLIT ||
920 alloctype == VDEV_ALLOC_ROOTPOOL)) {
921 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
922 &vd->vdev_root_zap);
923 }
924
925 /*
926 * If we're a top-level vdev, try to load the allocation parameters.
927 */
928 if (top_level &&
929 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
930 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
931 &vd->vdev_ms_array);
932 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
933 &vd->vdev_ms_shift);
934 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
935 &vd->vdev_asize);
936 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
937 &vd->vdev_noalloc);
938 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
939 &vd->vdev_removing);
940 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
941 &vd->vdev_top_zap);
942 vd->vdev_rz_expanding = nvlist_exists(nv,
943 ZPOOL_CONFIG_RAIDZ_EXPANDING);
944 } else {
945 ASSERT0(vd->vdev_top_zap);
946 }
947
948 if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
949 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
950 alloctype == VDEV_ALLOC_ADD ||
951 alloctype == VDEV_ALLOC_SPLIT ||
952 alloctype == VDEV_ALLOC_ROOTPOOL);
953 /* Note: metaslab_group_create() is now deferred */
954 }
955
956 if (vd->vdev_ops->vdev_op_leaf &&
957 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
958 (void) nvlist_lookup_uint64(nv,
959 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
960 } else {
961 ASSERT0(vd->vdev_leaf_zap);
962 }
963
964 /*
965 * If we're a leaf vdev, try to load the DTL object and other state.
966 */
967
968 if (vd->vdev_ops->vdev_op_leaf &&
969 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
970 alloctype == VDEV_ALLOC_ROOTPOOL)) {
971 if (alloctype == VDEV_ALLOC_LOAD) {
972 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
973 &vd->vdev_dtl_object);
974 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
975 &vd->vdev_unspare);
976 }
977
978 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
979 uint64_t spare = 0;
980
981 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
982 &spare) == 0 && spare)
983 spa_spare_add(vd);
984 }
985
986 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
987 &vd->vdev_offline);
988
989 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
990 &vd->vdev_resilver_txg);
991
992 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
993 &vd->vdev_rebuild_txg);
994
995 if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
996 vdev_defer_resilver(vd);
997
998 /*
999 * In general, when importing a pool we want to ignore the
1000 * persistent fault state, as the diagnosis made on another
1001 * system may not be valid in the current context. The only
1002 * exception is if we forced a vdev to a persistently faulted
1003 * state with 'zpool offline -f'. The persistent fault will
1004 * remain across imports until cleared.
1005 *
1006 * Local vdevs will remain in the faulted state.
1007 */
1008 if (spa_load_state(spa) == SPA_LOAD_OPEN ||
1009 spa_load_state(spa) == SPA_LOAD_IMPORT) {
1010 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
1011 &vd->vdev_faulted);
1012 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
1013 &vd->vdev_degraded);
1014 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
1015 &vd->vdev_removed);
1016
1017 if (vd->vdev_faulted || vd->vdev_degraded) {
1018 const char *aux;
1019
1020 vd->vdev_label_aux =
1021 VDEV_AUX_ERR_EXCEEDED;
1022 if (nvlist_lookup_string(nv,
1023 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
1024 strcmp(aux, "external") == 0)
1025 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
1026 else
1027 vd->vdev_faulted = 0ULL;
1028 }
1029 }
1030 }
1031
1032 /*
1033 * Add ourselves to the parent's list of children.
1034 */
1035 vdev_add_child(parent, vd);
1036
1037 *vdp = vd;
1038
1039 return (0);
1040 }
1041
1042 void
vdev_free(vdev_t * vd)1043 vdev_free(vdev_t *vd)
1044 {
1045 spa_t *spa = vd->vdev_spa;
1046
1047 ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
1048 ASSERT3P(vd->vdev_trim_thread, ==, NULL);
1049 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
1050 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
1051
1052 /*
1053 * Scan queues are normally destroyed at the end of a scan. If the
1054 * queue exists here, that implies the vdev is being removed while
1055 * the scan is still running.
1056 */
1057 if (vd->vdev_scan_io_queue != NULL) {
1058 mutex_enter(&vd->vdev_scan_io_queue_lock);
1059 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
1060 vd->vdev_scan_io_queue = NULL;
1061 mutex_exit(&vd->vdev_scan_io_queue_lock);
1062 }
1063
1064 /*
1065 * vdev_free() implies closing the vdev first. This is simpler than
1066 * trying to ensure complicated semantics for all callers.
1067 */
1068 vdev_close(vd);
1069
1070 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
1071 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
1072
1073 /*
1074 * Free all children.
1075 */
1076 for (int c = 0; c < vd->vdev_children; c++)
1077 vdev_free(vd->vdev_child[c]);
1078
1079 ASSERT(vd->vdev_child == NULL);
1080 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
1081
1082 if (vd->vdev_ops->vdev_op_fini != NULL)
1083 vd->vdev_ops->vdev_op_fini(vd);
1084
1085 /*
1086 * Discard allocation state.
1087 */
1088 if (vd->vdev_mg != NULL) {
1089 vdev_metaslab_fini(vd);
1090 metaslab_group_destroy(vd->vdev_mg);
1091 vd->vdev_mg = NULL;
1092 }
1093 if (vd->vdev_log_mg != NULL) {
1094 ASSERT0(vd->vdev_ms_count);
1095 metaslab_group_destroy(vd->vdev_log_mg);
1096 vd->vdev_log_mg = NULL;
1097 }
1098
1099 ASSERT0(vd->vdev_stat.vs_space);
1100 ASSERT0(vd->vdev_stat.vs_dspace);
1101 ASSERT0(vd->vdev_stat.vs_alloc);
1102
1103 /*
1104 * Remove this vdev from its parent's child list.
1105 */
1106 vdev_remove_child(vd->vdev_parent, vd);
1107
1108 ASSERT(vd->vdev_parent == NULL);
1109 ASSERT(!list_link_active(&vd->vdev_leaf_node));
1110
1111 /*
1112 * Clean up vdev structure.
1113 */
1114 vdev_queue_fini(vd);
1115
1116 if (vd->vdev_path)
1117 spa_strfree(vd->vdev_path);
1118 if (vd->vdev_devid)
1119 spa_strfree(vd->vdev_devid);
1120 if (vd->vdev_physpath)
1121 spa_strfree(vd->vdev_physpath);
1122
1123 if (vd->vdev_enc_sysfs_path)
1124 spa_strfree(vd->vdev_enc_sysfs_path);
1125
1126 if (vd->vdev_fru)
1127 spa_strfree(vd->vdev_fru);
1128
1129 if (vd->vdev_isspare)
1130 spa_spare_remove(vd);
1131 if (vd->vdev_isl2cache)
1132 spa_l2cache_remove(vd);
1133
1134 txg_list_destroy(&vd->vdev_ms_list);
1135 txg_list_destroy(&vd->vdev_dtl_list);
1136
1137 mutex_enter(&vd->vdev_dtl_lock);
1138 space_map_close(vd->vdev_dtl_sm);
1139 for (int t = 0; t < DTL_TYPES; t++) {
1140 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
1141 range_tree_destroy(vd->vdev_dtl[t]);
1142 }
1143 mutex_exit(&vd->vdev_dtl_lock);
1144
1145 EQUIV(vd->vdev_indirect_births != NULL,
1146 vd->vdev_indirect_mapping != NULL);
1147 if (vd->vdev_indirect_births != NULL) {
1148 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
1149 vdev_indirect_births_close(vd->vdev_indirect_births);
1150 }
1151
1152 if (vd->vdev_obsolete_sm != NULL) {
1153 ASSERT(vd->vdev_removing ||
1154 vd->vdev_ops == &vdev_indirect_ops);
1155 space_map_close(vd->vdev_obsolete_sm);
1156 vd->vdev_obsolete_sm = NULL;
1157 }
1158 range_tree_destroy(vd->vdev_obsolete_segments);
1159 rw_destroy(&vd->vdev_indirect_rwlock);
1160 mutex_destroy(&vd->vdev_obsolete_lock);
1161
1162 mutex_destroy(&vd->vdev_dtl_lock);
1163 mutex_destroy(&vd->vdev_stat_lock);
1164 mutex_destroy(&vd->vdev_probe_lock);
1165 mutex_destroy(&vd->vdev_scan_io_queue_lock);
1166
1167 mutex_destroy(&vd->vdev_initialize_lock);
1168 mutex_destroy(&vd->vdev_initialize_io_lock);
1169 cv_destroy(&vd->vdev_initialize_io_cv);
1170 cv_destroy(&vd->vdev_initialize_cv);
1171
1172 mutex_destroy(&vd->vdev_trim_lock);
1173 mutex_destroy(&vd->vdev_autotrim_lock);
1174 mutex_destroy(&vd->vdev_trim_io_lock);
1175 cv_destroy(&vd->vdev_trim_cv);
1176 cv_destroy(&vd->vdev_autotrim_cv);
1177 cv_destroy(&vd->vdev_autotrim_kick_cv);
1178 cv_destroy(&vd->vdev_trim_io_cv);
1179
1180 mutex_destroy(&vd->vdev_rebuild_lock);
1181 cv_destroy(&vd->vdev_rebuild_cv);
1182
1183 zfs_ratelimit_fini(&vd->vdev_delay_rl);
1184 zfs_ratelimit_fini(&vd->vdev_deadman_rl);
1185 zfs_ratelimit_fini(&vd->vdev_checksum_rl);
1186
1187 if (vd == spa->spa_root_vdev)
1188 spa->spa_root_vdev = NULL;
1189
1190 kmem_free(vd, sizeof (vdev_t));
1191 }
1192
1193 /*
1194 * Transfer top-level vdev state from svd to tvd.
1195 */
1196 static void
vdev_top_transfer(vdev_t * svd,vdev_t * tvd)1197 vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
1198 {
1199 spa_t *spa = svd->vdev_spa;
1200 metaslab_t *msp;
1201 vdev_t *vd;
1202 int t;
1203
1204 ASSERT(tvd == tvd->vdev_top);
1205
1206 tvd->vdev_ms_array = svd->vdev_ms_array;
1207 tvd->vdev_ms_shift = svd->vdev_ms_shift;
1208 tvd->vdev_ms_count = svd->vdev_ms_count;
1209 tvd->vdev_top_zap = svd->vdev_top_zap;
1210
1211 svd->vdev_ms_array = 0;
1212 svd->vdev_ms_shift = 0;
1213 svd->vdev_ms_count = 0;
1214 svd->vdev_top_zap = 0;
1215
1216 if (tvd->vdev_mg)
1217 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
1218 if (tvd->vdev_log_mg)
1219 ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
1220 tvd->vdev_mg = svd->vdev_mg;
1221 tvd->vdev_log_mg = svd->vdev_log_mg;
1222 tvd->vdev_ms = svd->vdev_ms;
1223
1224 svd->vdev_mg = NULL;
1225 svd->vdev_log_mg = NULL;
1226 svd->vdev_ms = NULL;
1227
1228 if (tvd->vdev_mg != NULL)
1229 tvd->vdev_mg->mg_vd = tvd;
1230 if (tvd->vdev_log_mg != NULL)
1231 tvd->vdev_log_mg->mg_vd = tvd;
1232
1233 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
1234 svd->vdev_checkpoint_sm = NULL;
1235
1236 tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
1237 svd->vdev_alloc_bias = VDEV_BIAS_NONE;
1238
1239 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
1240 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
1241 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
1242
1243 svd->vdev_stat.vs_alloc = 0;
1244 svd->vdev_stat.vs_space = 0;
1245 svd->vdev_stat.vs_dspace = 0;
1246
1247 /*
1248 * State which may be set on a top-level vdev that's in the
1249 * process of being removed.
1250 */
1251 ASSERT0(tvd->vdev_indirect_config.vic_births_object);
1252 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
1253 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
1254 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
1255 ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
1256 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
1257 ASSERT0(tvd->vdev_noalloc);
1258 ASSERT0(tvd->vdev_removing);
1259 ASSERT0(tvd->vdev_rebuilding);
1260 tvd->vdev_noalloc = svd->vdev_noalloc;
1261 tvd->vdev_removing = svd->vdev_removing;
1262 tvd->vdev_rebuilding = svd->vdev_rebuilding;
1263 tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
1264 tvd->vdev_indirect_config = svd->vdev_indirect_config;
1265 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
1266 tvd->vdev_indirect_births = svd->vdev_indirect_births;
1267 range_tree_swap(&svd->vdev_obsolete_segments,
1268 &tvd->vdev_obsolete_segments);
1269 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
1270 svd->vdev_indirect_config.vic_mapping_object = 0;
1271 svd->vdev_indirect_config.vic_births_object = 0;
1272 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
1273 svd->vdev_indirect_mapping = NULL;
1274 svd->vdev_indirect_births = NULL;
1275 svd->vdev_obsolete_sm = NULL;
1276 svd->vdev_noalloc = 0;
1277 svd->vdev_removing = 0;
1278 svd->vdev_rebuilding = 0;
1279
1280 for (t = 0; t < TXG_SIZE; t++) {
1281 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
1282 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
1283 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
1284 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
1285 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
1286 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
1287 }
1288
1289 if (list_link_active(&svd->vdev_config_dirty_node)) {
1290 vdev_config_clean(svd);
1291 vdev_config_dirty(tvd);
1292 }
1293
1294 if (list_link_active(&svd->vdev_state_dirty_node)) {
1295 vdev_state_clean(svd);
1296 vdev_state_dirty(tvd);
1297 }
1298
1299 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
1300 svd->vdev_deflate_ratio = 0;
1301
1302 tvd->vdev_islog = svd->vdev_islog;
1303 svd->vdev_islog = 0;
1304
1305 dsl_scan_io_queue_vdev_xfer(svd, tvd);
1306 }
1307
1308 static void
vdev_top_update(vdev_t * tvd,vdev_t * vd)1309 vdev_top_update(vdev_t *tvd, vdev_t *vd)
1310 {
1311 if (vd == NULL)
1312 return;
1313
1314 vd->vdev_top = tvd;
1315
1316 for (int c = 0; c < vd->vdev_children; c++)
1317 vdev_top_update(tvd, vd->vdev_child[c]);
1318 }
1319
1320 /*
1321 * Add a mirror/replacing vdev above an existing vdev. There is no need to
1322 * call .vdev_op_init() since mirror/replacing vdevs do not have private state.
1323 */
1324 vdev_t *
vdev_add_parent(vdev_t * cvd,vdev_ops_t * ops)1325 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
1326 {
1327 spa_t *spa = cvd->vdev_spa;
1328 vdev_t *pvd = cvd->vdev_parent;
1329 vdev_t *mvd;
1330
1331 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1332
1333 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
1334
1335 mvd->vdev_asize = cvd->vdev_asize;
1336 mvd->vdev_min_asize = cvd->vdev_min_asize;
1337 mvd->vdev_max_asize = cvd->vdev_max_asize;
1338 mvd->vdev_psize = cvd->vdev_psize;
1339 mvd->vdev_ashift = cvd->vdev_ashift;
1340 mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
1341 mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
1342 mvd->vdev_state = cvd->vdev_state;
1343 mvd->vdev_crtxg = cvd->vdev_crtxg;
1344
1345 vdev_remove_child(pvd, cvd);
1346 vdev_add_child(pvd, mvd);
1347 cvd->vdev_id = mvd->vdev_children;
1348 vdev_add_child(mvd, cvd);
1349 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1350
1351 if (mvd == mvd->vdev_top)
1352 vdev_top_transfer(cvd, mvd);
1353
1354 return (mvd);
1355 }
1356
1357 /*
1358 * Remove a 1-way mirror/replacing vdev from the tree.
1359 */
1360 void
vdev_remove_parent(vdev_t * cvd)1361 vdev_remove_parent(vdev_t *cvd)
1362 {
1363 vdev_t *mvd = cvd->vdev_parent;
1364 vdev_t *pvd = mvd->vdev_parent;
1365
1366 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1367
1368 ASSERT(mvd->vdev_children == 1);
1369 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
1370 mvd->vdev_ops == &vdev_replacing_ops ||
1371 mvd->vdev_ops == &vdev_spare_ops);
1372 cvd->vdev_ashift = mvd->vdev_ashift;
1373 cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
1374 cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
1375 vdev_remove_child(mvd, cvd);
1376 vdev_remove_child(pvd, mvd);
1377
1378 /*
1379 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
1380 * Otherwise, we could have detached an offline device, and when we
1381 * go to import the pool we'll think we have two top-level vdevs,
1382 * instead of a different version of the same top-level vdev.
1383 */
1384 if (mvd->vdev_top == mvd) {
1385 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
1386 cvd->vdev_orig_guid = cvd->vdev_guid;
1387 cvd->vdev_guid += guid_delta;
1388 cvd->vdev_guid_sum += guid_delta;
1389
1390 /*
1391 * If pool not set for autoexpand, we need to also preserve
1392 * mvd's asize to prevent automatic expansion of cvd.
1393 * Otherwise if we are adjusting the mirror by attaching and
1394 * detaching children of non-uniform sizes, the mirror could
1395 * autoexpand, unexpectedly requiring larger devices to
1396 * re-establish the mirror.
1397 */
1398 if (!cvd->vdev_spa->spa_autoexpand)
1399 cvd->vdev_asize = mvd->vdev_asize;
1400 }
1401 cvd->vdev_id = mvd->vdev_id;
1402 vdev_add_child(pvd, cvd);
1403 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1404
1405 if (cvd == cvd->vdev_top)
1406 vdev_top_transfer(mvd, cvd);
1407
1408 ASSERT(mvd->vdev_children == 0);
1409 vdev_free(mvd);
1410 }
1411
1412 /*
1413 * Choose GCD for spa_gcd_alloc.
1414 */
1415 static uint64_t
vdev_gcd(uint64_t a,uint64_t b)1416 vdev_gcd(uint64_t a, uint64_t b)
1417 {
1418 while (b != 0) {
1419 uint64_t t = b;
1420 b = a % b;
1421 a = t;
1422 }
1423 return (a);
1424 }
1425
1426 /*
1427 * Set spa_min_alloc and spa_gcd_alloc.
1428 */
1429 static void
vdev_spa_set_alloc(spa_t * spa,uint64_t min_alloc)1430 vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc)
1431 {
1432 if (min_alloc < spa->spa_min_alloc)
1433 spa->spa_min_alloc = min_alloc;
1434 if (spa->spa_gcd_alloc == INT_MAX) {
1435 spa->spa_gcd_alloc = min_alloc;
1436 } else {
1437 spa->spa_gcd_alloc = vdev_gcd(min_alloc,
1438 spa->spa_gcd_alloc);
1439 }
1440 }
1441
1442 void
vdev_metaslab_group_create(vdev_t * vd)1443 vdev_metaslab_group_create(vdev_t *vd)
1444 {
1445 spa_t *spa = vd->vdev_spa;
1446
1447 /*
1448 * metaslab_group_create was delayed until allocation bias was available
1449 */
1450 if (vd->vdev_mg == NULL) {
1451 metaslab_class_t *mc;
1452
1453 if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
1454 vd->vdev_alloc_bias = VDEV_BIAS_LOG;
1455
1456 ASSERT3U(vd->vdev_islog, ==,
1457 (vd->vdev_alloc_bias == VDEV_BIAS_LOG));
1458
1459 switch (vd->vdev_alloc_bias) {
1460 case VDEV_BIAS_LOG:
1461 mc = spa_log_class(spa);
1462 break;
1463 case VDEV_BIAS_SPECIAL:
1464 mc = spa_special_class(spa);
1465 break;
1466 case VDEV_BIAS_DEDUP:
1467 mc = spa_dedup_class(spa);
1468 break;
1469 default:
1470 mc = spa_normal_class(spa);
1471 }
1472
1473 vd->vdev_mg = metaslab_group_create(mc, vd,
1474 spa->spa_alloc_count);
1475
1476 if (!vd->vdev_islog) {
1477 vd->vdev_log_mg = metaslab_group_create(
1478 spa_embedded_log_class(spa), vd, 1);
1479 }
1480
1481 /*
1482 * The spa ashift min/max only apply for the normal metaslab
1483 * class. Class destination is late binding so ashift boundary
1484 * setting had to wait until now.
1485 */
1486 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1487 mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
1488 if (vd->vdev_ashift > spa->spa_max_ashift)
1489 spa->spa_max_ashift = vd->vdev_ashift;
1490 if (vd->vdev_ashift < spa->spa_min_ashift)
1491 spa->spa_min_ashift = vd->vdev_ashift;
1492
1493 uint64_t min_alloc = vdev_get_min_alloc(vd);
1494 vdev_spa_set_alloc(spa, min_alloc);
1495 }
1496 }
1497 }
1498
1499 int
vdev_metaslab_init(vdev_t * vd,uint64_t txg)1500 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
1501 {
1502 spa_t *spa = vd->vdev_spa;
1503 uint64_t oldc = vd->vdev_ms_count;
1504 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
1505 metaslab_t **mspp;
1506 int error;
1507 boolean_t expanding = (oldc != 0);
1508
1509 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1510
1511 /*
1512 * This vdev is not being allocated from yet or is a hole.
1513 */
1514 if (vd->vdev_ms_shift == 0)
1515 return (0);
1516
1517 ASSERT(!vd->vdev_ishole);
1518
1519 ASSERT(oldc <= newc);
1520
1521 mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
1522
1523 if (expanding) {
1524 memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
1525 vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
1526 }
1527
1528 vd->vdev_ms = mspp;
1529 vd->vdev_ms_count = newc;
1530
1531 for (uint64_t m = oldc; m < newc; m++) {
1532 uint64_t object = 0;
1533 /*
1534 * vdev_ms_array may be 0 if we are creating the "fake"
1535 * metaslabs for an indirect vdev for zdb's leak detection.
1536 * See zdb_leak_init().
1537 */
1538 if (txg == 0 && vd->vdev_ms_array != 0) {
1539 error = dmu_read(spa->spa_meta_objset,
1540 vd->vdev_ms_array,
1541 m * sizeof (uint64_t), sizeof (uint64_t), &object,
1542 DMU_READ_PREFETCH);
1543 if (error != 0) {
1544 vdev_dbgmsg(vd, "unable to read the metaslab "
1545 "array [error=%d]", error);
1546 return (error);
1547 }
1548 }
1549
1550 error = metaslab_init(vd->vdev_mg, m, object, txg,
1551 &(vd->vdev_ms[m]));
1552 if (error != 0) {
1553 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
1554 error);
1555 return (error);
1556 }
1557 }
1558
1559 /*
1560 * Find the emptiest metaslab on the vdev and mark it for use for
1561 * embedded slog by moving it from the regular to the log metaslab
1562 * group.
1563 */
1564 if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
1565 vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
1566 avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
1567 uint64_t slog_msid = 0;
1568 uint64_t smallest = UINT64_MAX;
1569
1570 /*
1571 * Note, we only search the new metaslabs, because the old
1572 * (pre-existing) ones may be active (e.g. have non-empty
1573 * range_tree's), and we don't move them to the new
1574 * metaslab_t.
1575 */
1576 for (uint64_t m = oldc; m < newc; m++) {
1577 uint64_t alloc =
1578 space_map_allocated(vd->vdev_ms[m]->ms_sm);
1579 if (alloc < smallest) {
1580 slog_msid = m;
1581 smallest = alloc;
1582 }
1583 }
1584 metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
1585 /*
1586 * The metaslab was marked as dirty at the end of
1587 * metaslab_init(). Remove it from the dirty list so that we
1588 * can uninitialize and reinitialize it to the new class.
1589 */
1590 if (txg != 0) {
1591 (void) txg_list_remove_this(&vd->vdev_ms_list,
1592 slog_ms, txg);
1593 }
1594 uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
1595 metaslab_fini(slog_ms);
1596 VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
1597 &vd->vdev_ms[slog_msid]));
1598 }
1599
1600 if (txg == 0)
1601 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
1602
1603 /*
1604 * If the vdev is marked as non-allocating then don't
1605 * activate the metaslabs since we want to ensure that
1606 * no allocations are performed on this device.
1607 */
1608 if (vd->vdev_noalloc) {
1609 /* track non-allocating vdev space */
1610 spa->spa_nonallocating_dspace += spa_deflate(spa) ?
1611 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1612 } else if (!expanding) {
1613 metaslab_group_activate(vd->vdev_mg);
1614 if (vd->vdev_log_mg != NULL)
1615 metaslab_group_activate(vd->vdev_log_mg);
1616 }
1617
1618 if (txg == 0)
1619 spa_config_exit(spa, SCL_ALLOC, FTAG);
1620
1621 return (0);
1622 }
1623
1624 void
vdev_metaslab_fini(vdev_t * vd)1625 vdev_metaslab_fini(vdev_t *vd)
1626 {
1627 if (vd->vdev_checkpoint_sm != NULL) {
1628 ASSERT(spa_feature_is_active(vd->vdev_spa,
1629 SPA_FEATURE_POOL_CHECKPOINT));
1630 space_map_close(vd->vdev_checkpoint_sm);
1631 /*
1632 * Even though we close the space map, we need to set its
1633 * pointer to NULL. The reason is that vdev_metaslab_fini()
1634 * may be called multiple times for certain operations
1635 * (i.e. when destroying a pool) so we need to ensure that
1636 * this clause never executes twice. This logic is similar
1637 * to the one used for the vdev_ms clause below.
1638 */
1639 vd->vdev_checkpoint_sm = NULL;
1640 }
1641
1642 if (vd->vdev_ms != NULL) {
1643 metaslab_group_t *mg = vd->vdev_mg;
1644
1645 metaslab_group_passivate(mg);
1646 if (vd->vdev_log_mg != NULL) {
1647 ASSERT(!vd->vdev_islog);
1648 metaslab_group_passivate(vd->vdev_log_mg);
1649 }
1650
1651 uint64_t count = vd->vdev_ms_count;
1652 for (uint64_t m = 0; m < count; m++) {
1653 metaslab_t *msp = vd->vdev_ms[m];
1654 if (msp != NULL)
1655 metaslab_fini(msp);
1656 }
1657 vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
1658 vd->vdev_ms = NULL;
1659 vd->vdev_ms_count = 0;
1660
1661 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1662 ASSERT0(mg->mg_histogram[i]);
1663 if (vd->vdev_log_mg != NULL)
1664 ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
1665 }
1666 }
1667 ASSERT0(vd->vdev_ms_count);
1668 }
1669
1670 typedef struct vdev_probe_stats {
1671 boolean_t vps_readable;
1672 boolean_t vps_writeable;
1673 boolean_t vps_zio_done_probe;
1674 int vps_flags;
1675 } vdev_probe_stats_t;
1676
1677 static void
vdev_probe_done(zio_t * zio)1678 vdev_probe_done(zio_t *zio)
1679 {
1680 spa_t *spa = zio->io_spa;
1681 vdev_t *vd = zio->io_vd;
1682 vdev_probe_stats_t *vps = zio->io_private;
1683
1684 ASSERT(vd->vdev_probe_zio != NULL);
1685
1686 if (zio->io_type == ZIO_TYPE_READ) {
1687 if (zio->io_error == 0)
1688 vps->vps_readable = 1;
1689 if (zio->io_error == 0 && spa_writeable(spa)) {
1690 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
1691 zio->io_offset, zio->io_size, zio->io_abd,
1692 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1693 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
1694 } else {
1695 abd_free(zio->io_abd);
1696 }
1697 } else if (zio->io_type == ZIO_TYPE_WRITE) {
1698 if (zio->io_error == 0)
1699 vps->vps_writeable = 1;
1700 abd_free(zio->io_abd);
1701 } else if (zio->io_type == ZIO_TYPE_NULL) {
1702 zio_t *pio;
1703 zio_link_t *zl;
1704
1705 vd->vdev_cant_read |= !vps->vps_readable;
1706 vd->vdev_cant_write |= !vps->vps_writeable;
1707 vdev_dbgmsg(vd, "probe done, cant_read=%u cant_write=%u",
1708 vd->vdev_cant_read, vd->vdev_cant_write);
1709
1710 if (vdev_readable(vd) &&
1711 (vdev_writeable(vd) || !spa_writeable(spa))) {
1712 zio->io_error = 0;
1713 } else {
1714 ASSERT(zio->io_error != 0);
1715 vdev_dbgmsg(vd, "failed probe");
1716 (void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
1717 spa, vd, NULL, NULL, 0);
1718 zio->io_error = SET_ERROR(ENXIO);
1719
1720 /*
1721 * If this probe was initiated from zio pipeline, then
1722 * change the state in a spa_async_request. Probes that
1723 * were initiated from a vdev_open can change the state
1724 * as part of the open call.
1725 */
1726 if (vps->vps_zio_done_probe) {
1727 vd->vdev_fault_wanted = B_TRUE;
1728 spa_async_request(spa, SPA_ASYNC_FAULT_VDEV);
1729 }
1730 }
1731
1732 mutex_enter(&vd->vdev_probe_lock);
1733 ASSERT(vd->vdev_probe_zio == zio);
1734 vd->vdev_probe_zio = NULL;
1735 mutex_exit(&vd->vdev_probe_lock);
1736
1737 zl = NULL;
1738 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
1739 if (!vdev_accessible(vd, pio))
1740 pio->io_error = SET_ERROR(ENXIO);
1741
1742 kmem_free(vps, sizeof (*vps));
1743 }
1744 }
1745
1746 /*
1747 * Determine whether this device is accessible.
1748 *
1749 * Read and write to several known locations: the pad regions of each
1750 * vdev label but the first, which we leave alone in case it contains
1751 * a VTOC.
1752 */
1753 zio_t *
vdev_probe(vdev_t * vd,zio_t * zio)1754 vdev_probe(vdev_t *vd, zio_t *zio)
1755 {
1756 spa_t *spa = vd->vdev_spa;
1757 vdev_probe_stats_t *vps = NULL;
1758 zio_t *pio;
1759
1760 ASSERT(vd->vdev_ops->vdev_op_leaf);
1761
1762 /*
1763 * Don't probe the probe.
1764 */
1765 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
1766 return (NULL);
1767
1768 /*
1769 * To prevent 'probe storms' when a device fails, we create
1770 * just one probe i/o at a time. All zios that want to probe
1771 * this vdev will become parents of the probe io.
1772 */
1773 mutex_enter(&vd->vdev_probe_lock);
1774
1775 if ((pio = vd->vdev_probe_zio) == NULL) {
1776 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
1777
1778 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
1779 ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_TRYHARD;
1780 vps->vps_zio_done_probe = (zio != NULL);
1781
1782 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1783 /*
1784 * vdev_cant_read and vdev_cant_write can only
1785 * transition from TRUE to FALSE when we have the
1786 * SCL_ZIO lock as writer; otherwise they can only
1787 * transition from FALSE to TRUE. This ensures that
1788 * any zio looking at these values can assume that
1789 * failures persist for the life of the I/O. That's
1790 * important because when a device has intermittent
1791 * connectivity problems, we want to ensure that
1792 * they're ascribed to the device (ENXIO) and not
1793 * the zio (EIO).
1794 *
1795 * Since we hold SCL_ZIO as writer here, clear both
1796 * values so the probe can reevaluate from first
1797 * principles.
1798 */
1799 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1800 vd->vdev_cant_read = B_FALSE;
1801 vd->vdev_cant_write = B_FALSE;
1802 }
1803
1804 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1805 vdev_probe_done, vps,
1806 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1807 }
1808
1809 if (zio != NULL)
1810 zio_add_child(zio, pio);
1811
1812 mutex_exit(&vd->vdev_probe_lock);
1813
1814 if (vps == NULL) {
1815 ASSERT(zio != NULL);
1816 return (NULL);
1817 }
1818
1819 for (int l = 1; l < VDEV_LABELS; l++) {
1820 zio_nowait(zio_read_phys(pio, vd,
1821 vdev_label_offset(vd->vdev_psize, l,
1822 offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
1823 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
1824 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1825 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1826 }
1827
1828 if (zio == NULL)
1829 return (pio);
1830
1831 zio_nowait(pio);
1832 return (NULL);
1833 }
1834
1835 static void
vdev_load_child(void * arg)1836 vdev_load_child(void *arg)
1837 {
1838 vdev_t *vd = arg;
1839
1840 vd->vdev_load_error = vdev_load(vd);
1841 }
1842
1843 static void
vdev_open_child(void * arg)1844 vdev_open_child(void *arg)
1845 {
1846 vdev_t *vd = arg;
1847
1848 vd->vdev_open_thread = curthread;
1849 vd->vdev_open_error = vdev_open(vd);
1850 vd->vdev_open_thread = NULL;
1851 }
1852
1853 static boolean_t
vdev_uses_zvols(vdev_t * vd)1854 vdev_uses_zvols(vdev_t *vd)
1855 {
1856 #ifdef _KERNEL
1857 if (zvol_is_zvol(vd->vdev_path))
1858 return (B_TRUE);
1859 #endif
1860
1861 for (int c = 0; c < vd->vdev_children; c++)
1862 if (vdev_uses_zvols(vd->vdev_child[c]))
1863 return (B_TRUE);
1864
1865 return (B_FALSE);
1866 }
1867
1868 /*
1869 * Returns B_TRUE if the passed child should be opened.
1870 */
1871 static boolean_t
vdev_default_open_children_func(vdev_t * vd)1872 vdev_default_open_children_func(vdev_t *vd)
1873 {
1874 (void) vd;
1875 return (B_TRUE);
1876 }
1877
1878 /*
1879 * Open the requested child vdevs. If any of the leaf vdevs are using
1880 * a ZFS volume then do the opens in a single thread. This avoids a
1881 * deadlock when the current thread is holding the spa_namespace_lock.
1882 */
1883 static void
vdev_open_children_impl(vdev_t * vd,vdev_open_children_func_t * open_func)1884 vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
1885 {
1886 int children = vd->vdev_children;
1887
1888 taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
1889 children, children, TASKQ_PREPOPULATE);
1890 vd->vdev_nonrot = B_TRUE;
1891
1892 for (int c = 0; c < children; c++) {
1893 vdev_t *cvd = vd->vdev_child[c];
1894
1895 if (open_func(cvd) == B_FALSE)
1896 continue;
1897
1898 if (tq == NULL || vdev_uses_zvols(vd)) {
1899 cvd->vdev_open_error = vdev_open(cvd);
1900 } else {
1901 VERIFY(taskq_dispatch(tq, vdev_open_child,
1902 cvd, TQ_SLEEP) != TASKQID_INVALID);
1903 }
1904
1905 vd->vdev_nonrot &= cvd->vdev_nonrot;
1906 }
1907
1908 if (tq != NULL) {
1909 taskq_wait(tq);
1910 taskq_destroy(tq);
1911 }
1912 }
1913
1914 /*
1915 * Open all child vdevs.
1916 */
1917 void
vdev_open_children(vdev_t * vd)1918 vdev_open_children(vdev_t *vd)
1919 {
1920 vdev_open_children_impl(vd, vdev_default_open_children_func);
1921 }
1922
1923 /*
1924 * Conditionally open a subset of child vdevs.
1925 */
1926 void
vdev_open_children_subset(vdev_t * vd,vdev_open_children_func_t * open_func)1927 vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
1928 {
1929 vdev_open_children_impl(vd, open_func);
1930 }
1931
1932 /*
1933 * Compute the raidz-deflation ratio. Note, we hard-code 128k (1 << 17)
1934 * because it is the "typical" blocksize. Even though SPA_MAXBLOCKSIZE
1935 * changed, this algorithm can not change, otherwise it would inconsistently
1936 * account for existing bp's. We also hard-code txg 0 for the same reason
1937 * since expanded RAIDZ vdevs can use a different asize for different birth
1938 * txg's.
1939 */
1940 static void
vdev_set_deflate_ratio(vdev_t * vd)1941 vdev_set_deflate_ratio(vdev_t *vd)
1942 {
1943 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
1944 vd->vdev_deflate_ratio = (1 << 17) /
1945 (vdev_psize_to_asize_txg(vd, 1 << 17, 0) >>
1946 SPA_MINBLOCKSHIFT);
1947 }
1948 }
1949
1950 /*
1951 * Choose the best of two ashifts, preferring one between logical ashift
1952 * (absolute minimum) and administrator defined maximum, otherwise take
1953 * the biggest of the two.
1954 */
1955 uint64_t
vdev_best_ashift(uint64_t logical,uint64_t a,uint64_t b)1956 vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b)
1957 {
1958 if (a > logical && a <= zfs_vdev_max_auto_ashift) {
1959 if (b <= logical || b > zfs_vdev_max_auto_ashift)
1960 return (a);
1961 else
1962 return (MAX(a, b));
1963 } else if (b <= logical || b > zfs_vdev_max_auto_ashift)
1964 return (MAX(a, b));
1965 return (b);
1966 }
1967
1968 /*
1969 * Maximize performance by inflating the configured ashift for top level
1970 * vdevs to be as close to the physical ashift as possible while maintaining
1971 * administrator defined limits and ensuring it doesn't go below the
1972 * logical ashift.
1973 */
1974 static void
vdev_ashift_optimize(vdev_t * vd)1975 vdev_ashift_optimize(vdev_t *vd)
1976 {
1977 ASSERT(vd == vd->vdev_top);
1978
1979 if (vd->vdev_ashift < vd->vdev_physical_ashift &&
1980 vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) {
1981 vd->vdev_ashift = MIN(
1982 MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
1983 MAX(zfs_vdev_min_auto_ashift,
1984 vd->vdev_physical_ashift));
1985 } else {
1986 /*
1987 * If the logical and physical ashifts are the same, then
1988 * we ensure that the top-level vdev's ashift is not smaller
1989 * than our minimum ashift value. For the unusual case
1990 * where logical ashift > physical ashift, we can't cap
1991 * the calculated ashift based on max ashift as that
1992 * would cause failures.
1993 * We still check if we need to increase it to match
1994 * the min ashift.
1995 */
1996 vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
1997 vd->vdev_ashift);
1998 }
1999 }
2000
2001 /*
2002 * Prepare a virtual device for access.
2003 */
2004 int
vdev_open(vdev_t * vd)2005 vdev_open(vdev_t *vd)
2006 {
2007 spa_t *spa = vd->vdev_spa;
2008 int error;
2009 uint64_t osize = 0;
2010 uint64_t max_osize = 0;
2011 uint64_t asize, max_asize, psize;
2012 uint64_t logical_ashift = 0;
2013 uint64_t physical_ashift = 0;
2014
2015 ASSERT(vd->vdev_open_thread == curthread ||
2016 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2017 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
2018 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
2019 vd->vdev_state == VDEV_STATE_OFFLINE);
2020
2021 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2022 vd->vdev_cant_read = B_FALSE;
2023 vd->vdev_cant_write = B_FALSE;
2024 vd->vdev_min_asize = vdev_get_min_asize(vd);
2025
2026 /*
2027 * If this vdev is not removed, check its fault status. If it's
2028 * faulted, bail out of the open.
2029 */
2030 if (!vd->vdev_removed && vd->vdev_faulted) {
2031 ASSERT(vd->vdev_children == 0);
2032 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
2033 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
2034 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2035 vd->vdev_label_aux);
2036 return (SET_ERROR(ENXIO));
2037 } else if (vd->vdev_offline) {
2038 ASSERT(vd->vdev_children == 0);
2039 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
2040 return (SET_ERROR(ENXIO));
2041 }
2042
2043 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
2044 &logical_ashift, &physical_ashift);
2045
2046 /* Keep the device in removed state if unplugged */
2047 if (error == ENOENT && vd->vdev_removed) {
2048 vdev_set_state(vd, B_TRUE, VDEV_STATE_REMOVED,
2049 VDEV_AUX_NONE);
2050 return (error);
2051 }
2052
2053 /*
2054 * Physical volume size should never be larger than its max size, unless
2055 * the disk has shrunk while we were reading it or the device is buggy
2056 * or damaged: either way it's not safe for use, bail out of the open.
2057 */
2058 if (osize > max_osize) {
2059 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2060 VDEV_AUX_OPEN_FAILED);
2061 return (SET_ERROR(ENXIO));
2062 }
2063
2064 /*
2065 * Reset the vdev_reopening flag so that we actually close
2066 * the vdev on error.
2067 */
2068 vd->vdev_reopening = B_FALSE;
2069 if (zio_injection_enabled && error == 0)
2070 error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
2071
2072 if (error) {
2073 if (vd->vdev_removed &&
2074 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
2075 vd->vdev_removed = B_FALSE;
2076
2077 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
2078 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
2079 vd->vdev_stat.vs_aux);
2080 } else {
2081 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2082 vd->vdev_stat.vs_aux);
2083 }
2084 return (error);
2085 }
2086
2087 vd->vdev_removed = B_FALSE;
2088
2089 /*
2090 * Recheck the faulted flag now that we have confirmed that
2091 * the vdev is accessible. If we're faulted, bail.
2092 */
2093 if (vd->vdev_faulted) {
2094 ASSERT(vd->vdev_children == 0);
2095 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
2096 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
2097 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2098 vd->vdev_label_aux);
2099 return (SET_ERROR(ENXIO));
2100 }
2101
2102 if (vd->vdev_degraded) {
2103 ASSERT(vd->vdev_children == 0);
2104 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
2105 VDEV_AUX_ERR_EXCEEDED);
2106 } else {
2107 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
2108 }
2109
2110 /*
2111 * For hole or missing vdevs we just return success.
2112 */
2113 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
2114 return (0);
2115
2116 for (int c = 0; c < vd->vdev_children; c++) {
2117 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
2118 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
2119 VDEV_AUX_NONE);
2120 break;
2121 }
2122 }
2123
2124 osize = P2ALIGN_TYPED(osize, sizeof (vdev_label_t), uint64_t);
2125 max_osize = P2ALIGN_TYPED(max_osize, sizeof (vdev_label_t), uint64_t);
2126
2127 if (vd->vdev_children == 0) {
2128 if (osize < SPA_MINDEVSIZE) {
2129 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2130 VDEV_AUX_TOO_SMALL);
2131 return (SET_ERROR(EOVERFLOW));
2132 }
2133 psize = osize;
2134 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
2135 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
2136 VDEV_LABEL_END_SIZE);
2137 } else {
2138 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
2139 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
2140 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2141 VDEV_AUX_TOO_SMALL);
2142 return (SET_ERROR(EOVERFLOW));
2143 }
2144 psize = 0;
2145 asize = osize;
2146 max_asize = max_osize;
2147 }
2148
2149 /*
2150 * If the vdev was expanded, record this so that we can re-create the
2151 * uberblock rings in labels {2,3}, during the next sync.
2152 */
2153 if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
2154 vd->vdev_copy_uberblocks = B_TRUE;
2155
2156 vd->vdev_psize = psize;
2157
2158 /*
2159 * Make sure the allocatable size hasn't shrunk too much.
2160 */
2161 if (asize < vd->vdev_min_asize) {
2162 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2163 VDEV_AUX_BAD_LABEL);
2164 return (SET_ERROR(EINVAL));
2165 }
2166
2167 /*
2168 * We can always set the logical/physical ashift members since
2169 * their values are only used to calculate the vdev_ashift when
2170 * the device is first added to the config. These values should
2171 * not be used for anything else since they may change whenever
2172 * the device is reopened and we don't store them in the label.
2173 */
2174 vd->vdev_physical_ashift =
2175 MAX(physical_ashift, vd->vdev_physical_ashift);
2176 vd->vdev_logical_ashift = MAX(logical_ashift,
2177 vd->vdev_logical_ashift);
2178
2179 if (vd->vdev_asize == 0) {
2180 /*
2181 * This is the first-ever open, so use the computed values.
2182 * For compatibility, a different ashift can be requested.
2183 */
2184 vd->vdev_asize = asize;
2185 vd->vdev_max_asize = max_asize;
2186
2187 /*
2188 * If the vdev_ashift was not overridden at creation time,
2189 * then set it the logical ashift and optimize the ashift.
2190 */
2191 if (vd->vdev_ashift == 0) {
2192 vd->vdev_ashift = vd->vdev_logical_ashift;
2193
2194 if (vd->vdev_logical_ashift > ASHIFT_MAX) {
2195 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2196 VDEV_AUX_ASHIFT_TOO_BIG);
2197 return (SET_ERROR(EDOM));
2198 }
2199
2200 if (vd->vdev_top == vd && vd->vdev_attaching == B_FALSE)
2201 vdev_ashift_optimize(vd);
2202 vd->vdev_attaching = B_FALSE;
2203 }
2204 if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
2205 vd->vdev_ashift > ASHIFT_MAX)) {
2206 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2207 VDEV_AUX_BAD_ASHIFT);
2208 return (SET_ERROR(EDOM));
2209 }
2210 } else {
2211 /*
2212 * Make sure the alignment required hasn't increased.
2213 */
2214 if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
2215 vd->vdev_ops->vdev_op_leaf) {
2216 (void) zfs_ereport_post(
2217 FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
2218 spa, vd, NULL, NULL, 0);
2219 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2220 VDEV_AUX_BAD_LABEL);
2221 return (SET_ERROR(EDOM));
2222 }
2223 vd->vdev_max_asize = max_asize;
2224 }
2225
2226 /*
2227 * If all children are healthy we update asize if either:
2228 * The asize has increased, due to a device expansion caused by dynamic
2229 * LUN growth or vdev replacement, and automatic expansion is enabled;
2230 * making the additional space available.
2231 *
2232 * The asize has decreased, due to a device shrink usually caused by a
2233 * vdev replace with a smaller device. This ensures that calculations
2234 * based of max_asize and asize e.g. esize are always valid. It's safe
2235 * to do this as we've already validated that asize is greater than
2236 * vdev_min_asize.
2237 */
2238 if (vd->vdev_state == VDEV_STATE_HEALTHY &&
2239 ((asize > vd->vdev_asize &&
2240 (vd->vdev_expanding || spa->spa_autoexpand)) ||
2241 (asize < vd->vdev_asize)))
2242 vd->vdev_asize = asize;
2243
2244 vdev_set_min_asize(vd);
2245
2246 /*
2247 * Ensure we can issue some IO before declaring the
2248 * vdev open for business.
2249 */
2250 if (vd->vdev_ops->vdev_op_leaf &&
2251 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
2252 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2253 VDEV_AUX_ERR_EXCEEDED);
2254 return (error);
2255 }
2256
2257 /*
2258 * Track the minimum allocation size.
2259 */
2260 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
2261 vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
2262 uint64_t min_alloc = vdev_get_min_alloc(vd);
2263 vdev_spa_set_alloc(spa, min_alloc);
2264 }
2265
2266 /*
2267 * If this is a leaf vdev, assess whether a resilver is needed.
2268 * But don't do this if we are doing a reopen for a scrub, since
2269 * this would just restart the scrub we are already doing.
2270 */
2271 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
2272 dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
2273
2274 return (0);
2275 }
2276
2277 static void
vdev_validate_child(void * arg)2278 vdev_validate_child(void *arg)
2279 {
2280 vdev_t *vd = arg;
2281
2282 vd->vdev_validate_thread = curthread;
2283 vd->vdev_validate_error = vdev_validate(vd);
2284 vd->vdev_validate_thread = NULL;
2285 }
2286
2287 /*
2288 * Called once the vdevs are all opened, this routine validates the label
2289 * contents. This needs to be done before vdev_load() so that we don't
2290 * inadvertently do repair I/Os to the wrong device.
2291 *
2292 * This function will only return failure if one of the vdevs indicates that it
2293 * has since been destroyed or exported. This is only possible if
2294 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
2295 * will be updated but the function will return 0.
2296 */
2297 int
vdev_validate(vdev_t * vd)2298 vdev_validate(vdev_t *vd)
2299 {
2300 spa_t *spa = vd->vdev_spa;
2301 taskq_t *tq = NULL;
2302 nvlist_t *label;
2303 uint64_t guid = 0, aux_guid = 0, top_guid;
2304 uint64_t state;
2305 nvlist_t *nvl;
2306 uint64_t txg;
2307 int children = vd->vdev_children;
2308
2309 if (vdev_validate_skip)
2310 return (0);
2311
2312 if (children > 0) {
2313 tq = taskq_create("vdev_validate", children, minclsyspri,
2314 children, children, TASKQ_PREPOPULATE);
2315 }
2316
2317 for (uint64_t c = 0; c < children; c++) {
2318 vdev_t *cvd = vd->vdev_child[c];
2319
2320 if (tq == NULL || vdev_uses_zvols(cvd)) {
2321 vdev_validate_child(cvd);
2322 } else {
2323 VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
2324 TQ_SLEEP) != TASKQID_INVALID);
2325 }
2326 }
2327 if (tq != NULL) {
2328 taskq_wait(tq);
2329 taskq_destroy(tq);
2330 }
2331 for (int c = 0; c < children; c++) {
2332 int error = vd->vdev_child[c]->vdev_validate_error;
2333
2334 if (error != 0)
2335 return (SET_ERROR(EBADF));
2336 }
2337
2338
2339 /*
2340 * If the device has already failed, or was marked offline, don't do
2341 * any further validation. Otherwise, label I/O will fail and we will
2342 * overwrite the previous state.
2343 */
2344 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
2345 return (0);
2346
2347 /*
2348 * If we are performing an extreme rewind, we allow for a label that
2349 * was modified at a point after the current txg.
2350 * If config lock is not held do not check for the txg. spa_sync could
2351 * be updating the vdev's label before updating spa_last_synced_txg.
2352 */
2353 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
2354 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
2355 txg = UINT64_MAX;
2356 else
2357 txg = spa_last_synced_txg(spa);
2358
2359 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
2360 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2361 VDEV_AUX_BAD_LABEL);
2362 vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
2363 "txg %llu", (u_longlong_t)txg);
2364 return (0);
2365 }
2366
2367 /*
2368 * Determine if this vdev has been split off into another
2369 * pool. If so, then refuse to open it.
2370 */
2371 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
2372 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
2373 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2374 VDEV_AUX_SPLIT_POOL);
2375 nvlist_free(label);
2376 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
2377 return (0);
2378 }
2379
2380 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
2381 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2382 VDEV_AUX_CORRUPT_DATA);
2383 nvlist_free(label);
2384 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2385 ZPOOL_CONFIG_POOL_GUID);
2386 return (0);
2387 }
2388
2389 /*
2390 * If config is not trusted then ignore the spa guid check. This is
2391 * necessary because if the machine crashed during a re-guid the new
2392 * guid might have been written to all of the vdev labels, but not the
2393 * cached config. The check will be performed again once we have the
2394 * trusted config from the MOS.
2395 */
2396 if (spa->spa_trust_config && guid != spa_guid(spa)) {
2397 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2398 VDEV_AUX_CORRUPT_DATA);
2399 nvlist_free(label);
2400 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
2401 "match config (%llu != %llu)", (u_longlong_t)guid,
2402 (u_longlong_t)spa_guid(spa));
2403 return (0);
2404 }
2405
2406 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
2407 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
2408 &aux_guid) != 0)
2409 aux_guid = 0;
2410
2411 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
2412 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2413 VDEV_AUX_CORRUPT_DATA);
2414 nvlist_free(label);
2415 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2416 ZPOOL_CONFIG_GUID);
2417 return (0);
2418 }
2419
2420 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
2421 != 0) {
2422 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2423 VDEV_AUX_CORRUPT_DATA);
2424 nvlist_free(label);
2425 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2426 ZPOOL_CONFIG_TOP_GUID);
2427 return (0);
2428 }
2429
2430 /*
2431 * If this vdev just became a top-level vdev because its sibling was
2432 * detached, it will have adopted the parent's vdev guid -- but the
2433 * label may or may not be on disk yet. Fortunately, either version
2434 * of the label will have the same top guid, so if we're a top-level
2435 * vdev, we can safely compare to that instead.
2436 * However, if the config comes from a cachefile that failed to update
2437 * after the detach, a top-level vdev will appear as a non top-level
2438 * vdev in the config. Also relax the constraints if we perform an
2439 * extreme rewind.
2440 *
2441 * If we split this vdev off instead, then we also check the
2442 * original pool's guid. We don't want to consider the vdev
2443 * corrupt if it is partway through a split operation.
2444 */
2445 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
2446 boolean_t mismatch = B_FALSE;
2447 if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
2448 if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
2449 mismatch = B_TRUE;
2450 } else {
2451 if (vd->vdev_guid != top_guid &&
2452 vd->vdev_top->vdev_guid != guid)
2453 mismatch = B_TRUE;
2454 }
2455
2456 if (mismatch) {
2457 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2458 VDEV_AUX_CORRUPT_DATA);
2459 nvlist_free(label);
2460 vdev_dbgmsg(vd, "vdev_validate: config guid "
2461 "doesn't match label guid");
2462 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
2463 (u_longlong_t)vd->vdev_guid,
2464 (u_longlong_t)vd->vdev_top->vdev_guid);
2465 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
2466 "aux_guid %llu", (u_longlong_t)guid,
2467 (u_longlong_t)top_guid, (u_longlong_t)aux_guid);
2468 return (0);
2469 }
2470 }
2471
2472 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
2473 &state) != 0) {
2474 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2475 VDEV_AUX_CORRUPT_DATA);
2476 nvlist_free(label);
2477 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2478 ZPOOL_CONFIG_POOL_STATE);
2479 return (0);
2480 }
2481
2482 nvlist_free(label);
2483
2484 /*
2485 * If this is a verbatim import, no need to check the
2486 * state of the pool.
2487 */
2488 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
2489 spa_load_state(spa) == SPA_LOAD_OPEN &&
2490 state != POOL_STATE_ACTIVE) {
2491 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
2492 "for spa %s", (u_longlong_t)state, spa->spa_name);
2493 return (SET_ERROR(EBADF));
2494 }
2495
2496 /*
2497 * If we were able to open and validate a vdev that was
2498 * previously marked permanently unavailable, clear that state
2499 * now.
2500 */
2501 if (vd->vdev_not_present)
2502 vd->vdev_not_present = 0;
2503
2504 return (0);
2505 }
2506
2507 static void
vdev_update_path(const char * prefix,char * svd,char ** dvd,uint64_t guid)2508 vdev_update_path(const char *prefix, char *svd, char **dvd, uint64_t guid)
2509 {
2510 if (svd != NULL && *dvd != NULL) {
2511 if (strcmp(svd, *dvd) != 0) {
2512 zfs_dbgmsg("vdev_copy_path: vdev %llu: %s changed "
2513 "from '%s' to '%s'", (u_longlong_t)guid, prefix,
2514 *dvd, svd);
2515 spa_strfree(*dvd);
2516 *dvd = spa_strdup(svd);
2517 }
2518 } else if (svd != NULL) {
2519 *dvd = spa_strdup(svd);
2520 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
2521 (u_longlong_t)guid, *dvd);
2522 }
2523 }
2524
2525 static void
vdev_copy_path_impl(vdev_t * svd,vdev_t * dvd)2526 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
2527 {
2528 char *old, *new;
2529
2530 vdev_update_path("vdev_path", svd->vdev_path, &dvd->vdev_path,
2531 dvd->vdev_guid);
2532
2533 vdev_update_path("vdev_devid", svd->vdev_devid, &dvd->vdev_devid,
2534 dvd->vdev_guid);
2535
2536 vdev_update_path("vdev_physpath", svd->vdev_physpath,
2537 &dvd->vdev_physpath, dvd->vdev_guid);
2538
2539 /*
2540 * Our enclosure sysfs path may have changed between imports
2541 */
2542 old = dvd->vdev_enc_sysfs_path;
2543 new = svd->vdev_enc_sysfs_path;
2544 if ((old != NULL && new == NULL) ||
2545 (old == NULL && new != NULL) ||
2546 ((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
2547 zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
2548 "changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
2549 old, new);
2550
2551 if (dvd->vdev_enc_sysfs_path)
2552 spa_strfree(dvd->vdev_enc_sysfs_path);
2553
2554 if (svd->vdev_enc_sysfs_path) {
2555 dvd->vdev_enc_sysfs_path = spa_strdup(
2556 svd->vdev_enc_sysfs_path);
2557 } else {
2558 dvd->vdev_enc_sysfs_path = NULL;
2559 }
2560 }
2561 }
2562
2563 /*
2564 * Recursively copy vdev paths from one vdev to another. Source and destination
2565 * vdev trees must have same geometry otherwise return error. Intended to copy
2566 * paths from userland config into MOS config.
2567 */
2568 int
vdev_copy_path_strict(vdev_t * svd,vdev_t * dvd)2569 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
2570 {
2571 if ((svd->vdev_ops == &vdev_missing_ops) ||
2572 (svd->vdev_ishole && dvd->vdev_ishole) ||
2573 (dvd->vdev_ops == &vdev_indirect_ops))
2574 return (0);
2575
2576 if (svd->vdev_ops != dvd->vdev_ops) {
2577 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
2578 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
2579 return (SET_ERROR(EINVAL));
2580 }
2581
2582 if (svd->vdev_guid != dvd->vdev_guid) {
2583 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
2584 "%llu)", (u_longlong_t)svd->vdev_guid,
2585 (u_longlong_t)dvd->vdev_guid);
2586 return (SET_ERROR(EINVAL));
2587 }
2588
2589 if (svd->vdev_children != dvd->vdev_children) {
2590 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
2591 "%llu != %llu", (u_longlong_t)svd->vdev_children,
2592 (u_longlong_t)dvd->vdev_children);
2593 return (SET_ERROR(EINVAL));
2594 }
2595
2596 for (uint64_t i = 0; i < svd->vdev_children; i++) {
2597 int error = vdev_copy_path_strict(svd->vdev_child[i],
2598 dvd->vdev_child[i]);
2599 if (error != 0)
2600 return (error);
2601 }
2602
2603 if (svd->vdev_ops->vdev_op_leaf)
2604 vdev_copy_path_impl(svd, dvd);
2605
2606 return (0);
2607 }
2608
2609 static void
vdev_copy_path_search(vdev_t * stvd,vdev_t * dvd)2610 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
2611 {
2612 ASSERT(stvd->vdev_top == stvd);
2613 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
2614
2615 for (uint64_t i = 0; i < dvd->vdev_children; i++) {
2616 vdev_copy_path_search(stvd, dvd->vdev_child[i]);
2617 }
2618
2619 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
2620 return;
2621
2622 /*
2623 * The idea here is that while a vdev can shift positions within
2624 * a top vdev (when replacing, attaching mirror, etc.) it cannot
2625 * step outside of it.
2626 */
2627 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
2628
2629 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
2630 return;
2631
2632 ASSERT(vd->vdev_ops->vdev_op_leaf);
2633
2634 vdev_copy_path_impl(vd, dvd);
2635 }
2636
2637 /*
2638 * Recursively copy vdev paths from one root vdev to another. Source and
2639 * destination vdev trees may differ in geometry. For each destination leaf
2640 * vdev, search a vdev with the same guid and top vdev id in the source.
2641 * Intended to copy paths from userland config into MOS config.
2642 */
2643 void
vdev_copy_path_relaxed(vdev_t * srvd,vdev_t * drvd)2644 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
2645 {
2646 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
2647 ASSERT(srvd->vdev_ops == &vdev_root_ops);
2648 ASSERT(drvd->vdev_ops == &vdev_root_ops);
2649
2650 for (uint64_t i = 0; i < children; i++) {
2651 vdev_copy_path_search(srvd->vdev_child[i],
2652 drvd->vdev_child[i]);
2653 }
2654 }
2655
2656 /*
2657 * Close a virtual device.
2658 */
2659 void
vdev_close(vdev_t * vd)2660 vdev_close(vdev_t *vd)
2661 {
2662 vdev_t *pvd = vd->vdev_parent;
2663 spa_t *spa __maybe_unused = vd->vdev_spa;
2664
2665 ASSERT(vd != NULL);
2666 ASSERT(vd->vdev_open_thread == curthread ||
2667 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2668
2669 /*
2670 * If our parent is reopening, then we are as well, unless we are
2671 * going offline.
2672 */
2673 if (pvd != NULL && pvd->vdev_reopening)
2674 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
2675
2676 vd->vdev_ops->vdev_op_close(vd);
2677
2678 /*
2679 * We record the previous state before we close it, so that if we are
2680 * doing a reopen(), we don't generate FMA ereports if we notice that
2681 * it's still faulted.
2682 */
2683 vd->vdev_prevstate = vd->vdev_state;
2684
2685 if (vd->vdev_offline)
2686 vd->vdev_state = VDEV_STATE_OFFLINE;
2687 else
2688 vd->vdev_state = VDEV_STATE_CLOSED;
2689 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2690 }
2691
2692 void
vdev_hold(vdev_t * vd)2693 vdev_hold(vdev_t *vd)
2694 {
2695 spa_t *spa = vd->vdev_spa;
2696
2697 ASSERT(spa_is_root(spa));
2698 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
2699 return;
2700
2701 for (int c = 0; c < vd->vdev_children; c++)
2702 vdev_hold(vd->vdev_child[c]);
2703
2704 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
2705 vd->vdev_ops->vdev_op_hold(vd);
2706 }
2707
2708 void
vdev_rele(vdev_t * vd)2709 vdev_rele(vdev_t *vd)
2710 {
2711 ASSERT(spa_is_root(vd->vdev_spa));
2712 for (int c = 0; c < vd->vdev_children; c++)
2713 vdev_rele(vd->vdev_child[c]);
2714
2715 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
2716 vd->vdev_ops->vdev_op_rele(vd);
2717 }
2718
2719 /*
2720 * Reopen all interior vdevs and any unopened leaves. We don't actually
2721 * reopen leaf vdevs which had previously been opened as they might deadlock
2722 * on the spa_config_lock. Instead we only obtain the leaf's physical size.
2723 * If the leaf has never been opened then open it, as usual.
2724 */
2725 void
vdev_reopen(vdev_t * vd)2726 vdev_reopen(vdev_t *vd)
2727 {
2728 spa_t *spa = vd->vdev_spa;
2729
2730 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2731
2732 /* set the reopening flag unless we're taking the vdev offline */
2733 vd->vdev_reopening = !vd->vdev_offline;
2734 vdev_close(vd);
2735 (void) vdev_open(vd);
2736
2737 /*
2738 * Call vdev_validate() here to make sure we have the same device.
2739 * Otherwise, a device with an invalid label could be successfully
2740 * opened in response to vdev_reopen().
2741 */
2742 if (vd->vdev_aux) {
2743 (void) vdev_validate_aux(vd);
2744 if (vdev_readable(vd) && vdev_writeable(vd) &&
2745 vd->vdev_aux == &spa->spa_l2cache) {
2746 /*
2747 * In case the vdev is present we should evict all ARC
2748 * buffers and pointers to log blocks and reclaim their
2749 * space before restoring its contents to L2ARC.
2750 */
2751 if (l2arc_vdev_present(vd)) {
2752 l2arc_rebuild_vdev(vd, B_TRUE);
2753 } else {
2754 l2arc_add_vdev(spa, vd);
2755 }
2756 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
2757 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
2758 }
2759 } else {
2760 (void) vdev_validate(vd);
2761 }
2762
2763 /*
2764 * Recheck if resilver is still needed and cancel any
2765 * scheduled resilver if resilver is unneeded.
2766 */
2767 if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) &&
2768 spa->spa_async_tasks & SPA_ASYNC_RESILVER) {
2769 mutex_enter(&spa->spa_async_lock);
2770 spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER;
2771 mutex_exit(&spa->spa_async_lock);
2772 }
2773
2774 /*
2775 * Reassess parent vdev's health.
2776 */
2777 vdev_propagate_state(vd);
2778 }
2779
2780 int
vdev_create(vdev_t * vd,uint64_t txg,boolean_t isreplacing)2781 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
2782 {
2783 int error;
2784
2785 /*
2786 * Normally, partial opens (e.g. of a mirror) are allowed.
2787 * For a create, however, we want to fail the request if
2788 * there are any components we can't open.
2789 */
2790 error = vdev_open(vd);
2791
2792 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
2793 vdev_close(vd);
2794 return (error ? error : SET_ERROR(ENXIO));
2795 }
2796
2797 /*
2798 * Recursively load DTLs and initialize all labels.
2799 */
2800 if ((error = vdev_dtl_load(vd)) != 0 ||
2801 (error = vdev_label_init(vd, txg, isreplacing ?
2802 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
2803 vdev_close(vd);
2804 return (error);
2805 }
2806
2807 return (0);
2808 }
2809
2810 void
vdev_metaslab_set_size(vdev_t * vd)2811 vdev_metaslab_set_size(vdev_t *vd)
2812 {
2813 uint64_t asize = vd->vdev_asize;
2814 uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
2815 uint64_t ms_shift;
2816
2817 /*
2818 * There are two dimensions to the metaslab sizing calculation:
2819 * the size of the metaslab and the count of metaslabs per vdev.
2820 *
2821 * The default values used below are a good balance between memory
2822 * usage (larger metaslab size means more memory needed for loaded
2823 * metaslabs; more metaslabs means more memory needed for the
2824 * metaslab_t structs), metaslab load time (larger metaslabs take
2825 * longer to load), and metaslab sync time (more metaslabs means
2826 * more time spent syncing all of them).
2827 *
2828 * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
2829 * The range of the dimensions are as follows:
2830 *
2831 * 2^29 <= ms_size <= 2^34
2832 * 16 <= ms_count <= 131,072
2833 *
2834 * On the lower end of vdev sizes, we aim for metaslabs sizes of
2835 * at least 512MB (2^29) to minimize fragmentation effects when
2836 * testing with smaller devices. However, the count constraint
2837 * of at least 16 metaslabs will override this minimum size goal.
2838 *
2839 * On the upper end of vdev sizes, we aim for a maximum metaslab
2840 * size of 16GB. However, we will cap the total count to 2^17
2841 * metaslabs to keep our memory footprint in check and let the
2842 * metaslab size grow from there if that limit is hit.
2843 *
2844 * The net effect of applying above constrains is summarized below.
2845 *
2846 * vdev size metaslab count
2847 * --------------|-----------------
2848 * < 8GB ~16
2849 * 8GB - 100GB one per 512MB
2850 * 100GB - 3TB ~200
2851 * 3TB - 2PB one per 16GB
2852 * > 2PB ~131,072
2853 * --------------------------------
2854 *
2855 * Finally, note that all of the above calculate the initial
2856 * number of metaslabs. Expanding a top-level vdev will result
2857 * in additional metaslabs being allocated making it possible
2858 * to exceed the zfs_vdev_ms_count_limit.
2859 */
2860
2861 if (ms_count < zfs_vdev_min_ms_count)
2862 ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
2863 else if (ms_count > zfs_vdev_default_ms_count)
2864 ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
2865 else
2866 ms_shift = zfs_vdev_default_ms_shift;
2867
2868 if (ms_shift < SPA_MAXBLOCKSHIFT) {
2869 ms_shift = SPA_MAXBLOCKSHIFT;
2870 } else if (ms_shift > zfs_vdev_max_ms_shift) {
2871 ms_shift = zfs_vdev_max_ms_shift;
2872 /* cap the total count to constrain memory footprint */
2873 if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
2874 ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
2875 }
2876
2877 vd->vdev_ms_shift = ms_shift;
2878 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
2879 }
2880
2881 void
vdev_dirty(vdev_t * vd,int flags,void * arg,uint64_t txg)2882 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
2883 {
2884 ASSERT(vd == vd->vdev_top);
2885 /* indirect vdevs don't have metaslabs or dtls */
2886 ASSERT(vdev_is_concrete(vd) || flags == 0);
2887 ASSERT(ISP2(flags));
2888 ASSERT(spa_writeable(vd->vdev_spa));
2889
2890 if (flags & VDD_METASLAB)
2891 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
2892
2893 if (flags & VDD_DTL)
2894 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
2895
2896 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
2897 }
2898
2899 void
vdev_dirty_leaves(vdev_t * vd,int flags,uint64_t txg)2900 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
2901 {
2902 for (int c = 0; c < vd->vdev_children; c++)
2903 vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
2904
2905 if (vd->vdev_ops->vdev_op_leaf)
2906 vdev_dirty(vd->vdev_top, flags, vd, txg);
2907 }
2908
2909 /*
2910 * DTLs.
2911 *
2912 * A vdev's DTL (dirty time log) is the set of transaction groups for which
2913 * the vdev has less than perfect replication. There are four kinds of DTL:
2914 *
2915 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
2916 *
2917 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
2918 *
2919 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
2920 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
2921 * txgs that was scrubbed.
2922 *
2923 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
2924 * persistent errors or just some device being offline.
2925 * Unlike the other three, the DTL_OUTAGE map is not generally
2926 * maintained; it's only computed when needed, typically to
2927 * determine whether a device can be detached.
2928 *
2929 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
2930 * either has the data or it doesn't.
2931 *
2932 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
2933 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
2934 * if any child is less than fully replicated, then so is its parent.
2935 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
2936 * comprising only those txgs which appear in 'maxfaults' or more children;
2937 * those are the txgs we don't have enough replication to read. For example,
2938 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
2939 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
2940 * two child DTL_MISSING maps.
2941 *
2942 * It should be clear from the above that to compute the DTLs and outage maps
2943 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
2944 * Therefore, that is all we keep on disk. When loading the pool, or after
2945 * a configuration change, we generate all other DTLs from first principles.
2946 */
2947 void
vdev_dtl_dirty(vdev_t * vd,vdev_dtl_type_t t,uint64_t txg,uint64_t size)2948 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2949 {
2950 range_tree_t *rt = vd->vdev_dtl[t];
2951
2952 ASSERT(t < DTL_TYPES);
2953 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2954 ASSERT(spa_writeable(vd->vdev_spa));
2955
2956 mutex_enter(&vd->vdev_dtl_lock);
2957 if (!range_tree_contains(rt, txg, size))
2958 range_tree_add(rt, txg, size);
2959 mutex_exit(&vd->vdev_dtl_lock);
2960 }
2961
2962 boolean_t
vdev_dtl_contains(vdev_t * vd,vdev_dtl_type_t t,uint64_t txg,uint64_t size)2963 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2964 {
2965 range_tree_t *rt = vd->vdev_dtl[t];
2966 boolean_t dirty = B_FALSE;
2967
2968 ASSERT(t < DTL_TYPES);
2969 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2970
2971 /*
2972 * While we are loading the pool, the DTLs have not been loaded yet.
2973 * This isn't a problem but it can result in devices being tried
2974 * which are known to not have the data. In which case, the import
2975 * is relying on the checksum to ensure that we get the right data.
2976 * Note that while importing we are only reading the MOS, which is
2977 * always checksummed.
2978 */
2979 mutex_enter(&vd->vdev_dtl_lock);
2980 if (!range_tree_is_empty(rt))
2981 dirty = range_tree_contains(rt, txg, size);
2982 mutex_exit(&vd->vdev_dtl_lock);
2983
2984 return (dirty);
2985 }
2986
2987 boolean_t
vdev_dtl_empty(vdev_t * vd,vdev_dtl_type_t t)2988 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
2989 {
2990 range_tree_t *rt = vd->vdev_dtl[t];
2991 boolean_t empty;
2992
2993 mutex_enter(&vd->vdev_dtl_lock);
2994 empty = range_tree_is_empty(rt);
2995 mutex_exit(&vd->vdev_dtl_lock);
2996
2997 return (empty);
2998 }
2999
3000 /*
3001 * Check if the txg falls within the range which must be
3002 * resilvered. DVAs outside this range can always be skipped.
3003 */
3004 boolean_t
vdev_default_need_resilver(vdev_t * vd,const dva_t * dva,size_t psize,uint64_t phys_birth)3005 vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
3006 uint64_t phys_birth)
3007 {
3008 (void) dva, (void) psize;
3009
3010 /* Set by sequential resilver. */
3011 if (phys_birth == TXG_UNKNOWN)
3012 return (B_TRUE);
3013
3014 return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
3015 }
3016
3017 /*
3018 * Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
3019 */
3020 boolean_t
vdev_dtl_need_resilver(vdev_t * vd,const dva_t * dva,size_t psize,uint64_t phys_birth)3021 vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
3022 uint64_t phys_birth)
3023 {
3024 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
3025
3026 if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
3027 vd->vdev_ops->vdev_op_leaf)
3028 return (B_TRUE);
3029
3030 return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
3031 phys_birth));
3032 }
3033
3034 /*
3035 * Returns the lowest txg in the DTL range.
3036 */
3037 static uint64_t
vdev_dtl_min(vdev_t * vd)3038 vdev_dtl_min(vdev_t *vd)
3039 {
3040 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
3041 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
3042 ASSERT0(vd->vdev_children);
3043
3044 return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
3045 }
3046
3047 /*
3048 * Returns the highest txg in the DTL.
3049 */
3050 static uint64_t
vdev_dtl_max(vdev_t * vd)3051 vdev_dtl_max(vdev_t *vd)
3052 {
3053 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
3054 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
3055 ASSERT0(vd->vdev_children);
3056
3057 return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
3058 }
3059
3060 /*
3061 * Determine if a resilvering vdev should remove any DTL entries from
3062 * its range. If the vdev was resilvering for the entire duration of the
3063 * scan then it should excise that range from its DTLs. Otherwise, this
3064 * vdev is considered partially resilvered and should leave its DTL
3065 * entries intact. The comment in vdev_dtl_reassess() describes how we
3066 * excise the DTLs.
3067 */
3068 static boolean_t
vdev_dtl_should_excise(vdev_t * vd,boolean_t rebuild_done)3069 vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
3070 {
3071 ASSERT0(vd->vdev_children);
3072
3073 if (vd->vdev_state < VDEV_STATE_DEGRADED)
3074 return (B_FALSE);
3075
3076 if (vd->vdev_resilver_deferred)
3077 return (B_FALSE);
3078
3079 if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
3080 return (B_TRUE);
3081
3082 if (rebuild_done) {
3083 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
3084 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
3085
3086 /* Rebuild not initiated by attach */
3087 if (vd->vdev_rebuild_txg == 0)
3088 return (B_TRUE);
3089
3090 /*
3091 * When a rebuild completes without error then all missing data
3092 * up to the rebuild max txg has been reconstructed and the DTL
3093 * is eligible for excision.
3094 */
3095 if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
3096 vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
3097 ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
3098 ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
3099 ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
3100 return (B_TRUE);
3101 }
3102 } else {
3103 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
3104 dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
3105
3106 /* Resilver not initiated by attach */
3107 if (vd->vdev_resilver_txg == 0)
3108 return (B_TRUE);
3109
3110 /*
3111 * When a resilver is initiated the scan will assign the
3112 * scn_max_txg value to the highest txg value that exists
3113 * in all DTLs. If this device's max DTL is not part of this
3114 * scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
3115 * then it is not eligible for excision.
3116 */
3117 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
3118 ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
3119 ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
3120 ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
3121 return (B_TRUE);
3122 }
3123 }
3124
3125 return (B_FALSE);
3126 }
3127
3128 /*
3129 * Reassess DTLs after a config change or scrub completion. If txg == 0 no
3130 * write operations will be issued to the pool.
3131 */
3132 void
vdev_dtl_reassess(vdev_t * vd,uint64_t txg,uint64_t scrub_txg,boolean_t scrub_done,boolean_t rebuild_done)3133 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
3134 boolean_t scrub_done, boolean_t rebuild_done)
3135 {
3136 spa_t *spa = vd->vdev_spa;
3137 avl_tree_t reftree;
3138 int minref;
3139
3140 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
3141
3142 for (int c = 0; c < vd->vdev_children; c++)
3143 vdev_dtl_reassess(vd->vdev_child[c], txg,
3144 scrub_txg, scrub_done, rebuild_done);
3145
3146 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
3147 return;
3148
3149 if (vd->vdev_ops->vdev_op_leaf) {
3150 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
3151 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
3152 boolean_t check_excise = B_FALSE;
3153 boolean_t wasempty = B_TRUE;
3154
3155 mutex_enter(&vd->vdev_dtl_lock);
3156
3157 /*
3158 * If requested, pretend the scan or rebuild completed cleanly.
3159 */
3160 if (zfs_scan_ignore_errors) {
3161 if (scn != NULL)
3162 scn->scn_phys.scn_errors = 0;
3163 if (vr != NULL)
3164 vr->vr_rebuild_phys.vrp_errors = 0;
3165 }
3166
3167 if (scrub_txg != 0 &&
3168 !range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
3169 wasempty = B_FALSE;
3170 zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
3171 "dtl:%llu/%llu errors:%llu",
3172 (u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
3173 (u_longlong_t)scrub_txg, spa->spa_scrub_started,
3174 (u_longlong_t)vdev_dtl_min(vd),
3175 (u_longlong_t)vdev_dtl_max(vd),
3176 (u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
3177 }
3178
3179 /*
3180 * If we've completed a scrub/resilver or a rebuild cleanly
3181 * then determine if this vdev should remove any DTLs. We
3182 * only want to excise regions on vdevs that were available
3183 * during the entire duration of this scan.
3184 */
3185 if (rebuild_done &&
3186 vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
3187 check_excise = B_TRUE;
3188 } else {
3189 if (spa->spa_scrub_started ||
3190 (scn != NULL && scn->scn_phys.scn_errors == 0)) {
3191 check_excise = B_TRUE;
3192 }
3193 }
3194
3195 if (scrub_txg && check_excise &&
3196 vdev_dtl_should_excise(vd, rebuild_done)) {
3197 /*
3198 * We completed a scrub, resilver or rebuild up to
3199 * scrub_txg. If we did it without rebooting, then
3200 * the scrub dtl will be valid, so excise the old
3201 * region and fold in the scrub dtl. Otherwise,
3202 * leave the dtl as-is if there was an error.
3203 *
3204 * There's little trick here: to excise the beginning
3205 * of the DTL_MISSING map, we put it into a reference
3206 * tree and then add a segment with refcnt -1 that
3207 * covers the range [0, scrub_txg). This means
3208 * that each txg in that range has refcnt -1 or 0.
3209 * We then add DTL_SCRUB with a refcnt of 2, so that
3210 * entries in the range [0, scrub_txg) will have a
3211 * positive refcnt -- either 1 or 2. We then convert
3212 * the reference tree into the new DTL_MISSING map.
3213 */
3214 space_reftree_create(&reftree);
3215 space_reftree_add_map(&reftree,
3216 vd->vdev_dtl[DTL_MISSING], 1);
3217 space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
3218 space_reftree_add_map(&reftree,
3219 vd->vdev_dtl[DTL_SCRUB], 2);
3220 space_reftree_generate_map(&reftree,
3221 vd->vdev_dtl[DTL_MISSING], 1);
3222 space_reftree_destroy(&reftree);
3223
3224 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
3225 zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
3226 (u_longlong_t)vdev_dtl_min(vd),
3227 (u_longlong_t)vdev_dtl_max(vd));
3228 } else if (!wasempty) {
3229 zfs_dbgmsg("DTL_MISSING is now empty");
3230 }
3231 }
3232 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
3233 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
3234 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
3235 if (scrub_done)
3236 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
3237 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
3238 if (!vdev_readable(vd))
3239 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
3240 else
3241 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
3242 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
3243
3244 /*
3245 * If the vdev was resilvering or rebuilding and no longer
3246 * has any DTLs then reset the appropriate flag and dirty
3247 * the top level so that we persist the change.
3248 */
3249 if (txg != 0 &&
3250 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
3251 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
3252 if (vd->vdev_rebuild_txg != 0) {
3253 vd->vdev_rebuild_txg = 0;
3254 vdev_config_dirty(vd->vdev_top);
3255 } else if (vd->vdev_resilver_txg != 0) {
3256 vd->vdev_resilver_txg = 0;
3257 vdev_config_dirty(vd->vdev_top);
3258 }
3259 }
3260
3261 mutex_exit(&vd->vdev_dtl_lock);
3262
3263 if (txg != 0)
3264 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
3265 } else {
3266 mutex_enter(&vd->vdev_dtl_lock);
3267 for (int t = 0; t < DTL_TYPES; t++) {
3268 /* account for child's outage in parent's missing map */
3269 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
3270 if (t == DTL_SCRUB) {
3271 /* leaf vdevs only */
3272 continue;
3273 }
3274 if (t == DTL_PARTIAL) {
3275 /* i.e. non-zero */
3276 minref = 1;
3277 } else if (vdev_get_nparity(vd) != 0) {
3278 /* RAIDZ, DRAID */
3279 minref = vdev_get_nparity(vd) + 1;
3280 } else {
3281 /* any kind of mirror */
3282 minref = vd->vdev_children;
3283 }
3284 space_reftree_create(&reftree);
3285 for (int c = 0; c < vd->vdev_children; c++) {
3286 vdev_t *cvd = vd->vdev_child[c];
3287 mutex_enter(&cvd->vdev_dtl_lock);
3288 space_reftree_add_map(&reftree,
3289 cvd->vdev_dtl[s], 1);
3290 mutex_exit(&cvd->vdev_dtl_lock);
3291 }
3292 space_reftree_generate_map(&reftree,
3293 vd->vdev_dtl[t], minref);
3294 space_reftree_destroy(&reftree);
3295 }
3296 mutex_exit(&vd->vdev_dtl_lock);
3297 }
3298
3299 if (vd->vdev_top->vdev_ops == &vdev_raidz_ops) {
3300 raidz_dtl_reassessed(vd);
3301 }
3302 }
3303
3304 /*
3305 * Iterate over all the vdevs except spare, and post kobj events
3306 */
3307 void
vdev_post_kobj_evt(vdev_t * vd)3308 vdev_post_kobj_evt(vdev_t *vd)
3309 {
3310 if (vd->vdev_ops->vdev_op_kobj_evt_post &&
3311 vd->vdev_kobj_flag == B_FALSE) {
3312 vd->vdev_kobj_flag = B_TRUE;
3313 vd->vdev_ops->vdev_op_kobj_evt_post(vd);
3314 }
3315
3316 for (int c = 0; c < vd->vdev_children; c++)
3317 vdev_post_kobj_evt(vd->vdev_child[c]);
3318 }
3319
3320 /*
3321 * Iterate over all the vdevs except spare, and clear kobj events
3322 */
3323 void
vdev_clear_kobj_evt(vdev_t * vd)3324 vdev_clear_kobj_evt(vdev_t *vd)
3325 {
3326 vd->vdev_kobj_flag = B_FALSE;
3327
3328 for (int c = 0; c < vd->vdev_children; c++)
3329 vdev_clear_kobj_evt(vd->vdev_child[c]);
3330 }
3331
3332 int
vdev_dtl_load(vdev_t * vd)3333 vdev_dtl_load(vdev_t *vd)
3334 {
3335 spa_t *spa = vd->vdev_spa;
3336 objset_t *mos = spa->spa_meta_objset;
3337 range_tree_t *rt;
3338 int error = 0;
3339
3340 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
3341 ASSERT(vdev_is_concrete(vd));
3342
3343 /*
3344 * If the dtl cannot be sync'd there is no need to open it.
3345 */
3346 if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
3347 return (0);
3348
3349 error = space_map_open(&vd->vdev_dtl_sm, mos,
3350 vd->vdev_dtl_object, 0, -1ULL, 0);
3351 if (error)
3352 return (error);
3353 ASSERT(vd->vdev_dtl_sm != NULL);
3354
3355 rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
3356 error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
3357 if (error == 0) {
3358 mutex_enter(&vd->vdev_dtl_lock);
3359 range_tree_walk(rt, range_tree_add,
3360 vd->vdev_dtl[DTL_MISSING]);
3361 mutex_exit(&vd->vdev_dtl_lock);
3362 }
3363
3364 range_tree_vacate(rt, NULL, NULL);
3365 range_tree_destroy(rt);
3366
3367 return (error);
3368 }
3369
3370 for (int c = 0; c < vd->vdev_children; c++) {
3371 error = vdev_dtl_load(vd->vdev_child[c]);
3372 if (error != 0)
3373 break;
3374 }
3375
3376 return (error);
3377 }
3378
3379 static void
vdev_zap_allocation_data(vdev_t * vd,dmu_tx_t * tx)3380 vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
3381 {
3382 spa_t *spa = vd->vdev_spa;
3383 objset_t *mos = spa->spa_meta_objset;
3384 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
3385 const char *string;
3386
3387 ASSERT(alloc_bias != VDEV_BIAS_NONE);
3388
3389 string =
3390 (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
3391 (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
3392 (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
3393
3394 ASSERT(string != NULL);
3395 VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
3396 1, strlen(string) + 1, string, tx));
3397
3398 if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
3399 spa_activate_allocation_classes(spa, tx);
3400 }
3401 }
3402
3403 void
vdev_destroy_unlink_zap(vdev_t * vd,uint64_t zapobj,dmu_tx_t * tx)3404 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
3405 {
3406 spa_t *spa = vd->vdev_spa;
3407
3408 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
3409 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3410 zapobj, tx));
3411 }
3412
3413 uint64_t
vdev_create_link_zap(vdev_t * vd,dmu_tx_t * tx)3414 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
3415 {
3416 spa_t *spa = vd->vdev_spa;
3417 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
3418 DMU_OT_NONE, 0, tx);
3419
3420 ASSERT(zap != 0);
3421 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3422 zap, tx));
3423
3424 return (zap);
3425 }
3426
3427 void
vdev_construct_zaps(vdev_t * vd,dmu_tx_t * tx)3428 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
3429 {
3430 if (vd->vdev_ops != &vdev_hole_ops &&
3431 vd->vdev_ops != &vdev_missing_ops &&
3432 vd->vdev_ops != &vdev_root_ops &&
3433 !vd->vdev_top->vdev_removing) {
3434 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
3435 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
3436 }
3437 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
3438 vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
3439 if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
3440 vdev_zap_allocation_data(vd, tx);
3441 }
3442 }
3443 if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 &&
3444 spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
3445 if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2))
3446 spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx);
3447 vd->vdev_root_zap = vdev_create_link_zap(vd, tx);
3448 }
3449
3450 for (uint64_t i = 0; i < vd->vdev_children; i++) {
3451 vdev_construct_zaps(vd->vdev_child[i], tx);
3452 }
3453 }
3454
3455 static void
vdev_dtl_sync(vdev_t * vd,uint64_t txg)3456 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
3457 {
3458 spa_t *spa = vd->vdev_spa;
3459 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
3460 objset_t *mos = spa->spa_meta_objset;
3461 range_tree_t *rtsync;
3462 dmu_tx_t *tx;
3463 uint64_t object = space_map_object(vd->vdev_dtl_sm);
3464
3465 ASSERT(vdev_is_concrete(vd));
3466 ASSERT(vd->vdev_ops->vdev_op_leaf);
3467
3468 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3469
3470 if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
3471 mutex_enter(&vd->vdev_dtl_lock);
3472 space_map_free(vd->vdev_dtl_sm, tx);
3473 space_map_close(vd->vdev_dtl_sm);
3474 vd->vdev_dtl_sm = NULL;
3475 mutex_exit(&vd->vdev_dtl_lock);
3476
3477 /*
3478 * We only destroy the leaf ZAP for detached leaves or for
3479 * removed log devices. Removed data devices handle leaf ZAP
3480 * cleanup later, once cancellation is no longer possible.
3481 */
3482 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
3483 vd->vdev_top->vdev_islog)) {
3484 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
3485 vd->vdev_leaf_zap = 0;
3486 }
3487
3488 dmu_tx_commit(tx);
3489 return;
3490 }
3491
3492 if (vd->vdev_dtl_sm == NULL) {
3493 uint64_t new_object;
3494
3495 new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
3496 VERIFY3U(new_object, !=, 0);
3497
3498 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
3499 0, -1ULL, 0));
3500 ASSERT(vd->vdev_dtl_sm != NULL);
3501 }
3502
3503 rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
3504
3505 mutex_enter(&vd->vdev_dtl_lock);
3506 range_tree_walk(rt, range_tree_add, rtsync);
3507 mutex_exit(&vd->vdev_dtl_lock);
3508
3509 space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
3510 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
3511 range_tree_vacate(rtsync, NULL, NULL);
3512
3513 range_tree_destroy(rtsync);
3514
3515 /*
3516 * If the object for the space map has changed then dirty
3517 * the top level so that we update the config.
3518 */
3519 if (object != space_map_object(vd->vdev_dtl_sm)) {
3520 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
3521 "new object %llu", (u_longlong_t)txg, spa_name(spa),
3522 (u_longlong_t)object,
3523 (u_longlong_t)space_map_object(vd->vdev_dtl_sm));
3524 vdev_config_dirty(vd->vdev_top);
3525 }
3526
3527 dmu_tx_commit(tx);
3528 }
3529
3530 /*
3531 * Determine whether the specified vdev can be offlined/detached/removed
3532 * without losing data.
3533 */
3534 boolean_t
vdev_dtl_required(vdev_t * vd)3535 vdev_dtl_required(vdev_t *vd)
3536 {
3537 spa_t *spa = vd->vdev_spa;
3538 vdev_t *tvd = vd->vdev_top;
3539 uint8_t cant_read = vd->vdev_cant_read;
3540 boolean_t required;
3541
3542 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3543
3544 if (vd == spa->spa_root_vdev || vd == tvd)
3545 return (B_TRUE);
3546
3547 /*
3548 * Temporarily mark the device as unreadable, and then determine
3549 * whether this results in any DTL outages in the top-level vdev.
3550 * If not, we can safely offline/detach/remove the device.
3551 */
3552 vd->vdev_cant_read = B_TRUE;
3553 vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
3554 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
3555 vd->vdev_cant_read = cant_read;
3556 vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
3557
3558 if (!required && zio_injection_enabled) {
3559 required = !!zio_handle_device_injection(vd, NULL,
3560 SET_ERROR(ECHILD));
3561 }
3562
3563 return (required);
3564 }
3565
3566 /*
3567 * Determine if resilver is needed, and if so the txg range.
3568 */
3569 boolean_t
vdev_resilver_needed(vdev_t * vd,uint64_t * minp,uint64_t * maxp)3570 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
3571 {
3572 boolean_t needed = B_FALSE;
3573 uint64_t thismin = UINT64_MAX;
3574 uint64_t thismax = 0;
3575
3576 if (vd->vdev_children == 0) {
3577 mutex_enter(&vd->vdev_dtl_lock);
3578 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
3579 vdev_writeable(vd)) {
3580
3581 thismin = vdev_dtl_min(vd);
3582 thismax = vdev_dtl_max(vd);
3583 needed = B_TRUE;
3584 }
3585 mutex_exit(&vd->vdev_dtl_lock);
3586 } else {
3587 for (int c = 0; c < vd->vdev_children; c++) {
3588 vdev_t *cvd = vd->vdev_child[c];
3589 uint64_t cmin, cmax;
3590
3591 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
3592 thismin = MIN(thismin, cmin);
3593 thismax = MAX(thismax, cmax);
3594 needed = B_TRUE;
3595 }
3596 }
3597 }
3598
3599 if (needed && minp) {
3600 *minp = thismin;
3601 *maxp = thismax;
3602 }
3603 return (needed);
3604 }
3605
3606 /*
3607 * Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
3608 * will contain either the checkpoint spacemap object or zero if none exists.
3609 * All other errors are returned to the caller.
3610 */
3611 int
vdev_checkpoint_sm_object(vdev_t * vd,uint64_t * sm_obj)3612 vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
3613 {
3614 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
3615
3616 if (vd->vdev_top_zap == 0) {
3617 *sm_obj = 0;
3618 return (0);
3619 }
3620
3621 int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
3622 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
3623 if (error == ENOENT) {
3624 *sm_obj = 0;
3625 error = 0;
3626 }
3627
3628 return (error);
3629 }
3630
3631 int
vdev_load(vdev_t * vd)3632 vdev_load(vdev_t *vd)
3633 {
3634 int children = vd->vdev_children;
3635 int error = 0;
3636 taskq_t *tq = NULL;
3637
3638 /*
3639 * It's only worthwhile to use the taskq for the root vdev, because the
3640 * slow part is metaslab_init, and that only happens for top-level
3641 * vdevs.
3642 */
3643 if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
3644 tq = taskq_create("vdev_load", children, minclsyspri,
3645 children, children, TASKQ_PREPOPULATE);
3646 }
3647
3648 /*
3649 * Recursively load all children.
3650 */
3651 for (int c = 0; c < vd->vdev_children; c++) {
3652 vdev_t *cvd = vd->vdev_child[c];
3653
3654 if (tq == NULL || vdev_uses_zvols(cvd)) {
3655 cvd->vdev_load_error = vdev_load(cvd);
3656 } else {
3657 VERIFY(taskq_dispatch(tq, vdev_load_child,
3658 cvd, TQ_SLEEP) != TASKQID_INVALID);
3659 }
3660 }
3661
3662 if (tq != NULL) {
3663 taskq_wait(tq);
3664 taskq_destroy(tq);
3665 }
3666
3667 for (int c = 0; c < vd->vdev_children; c++) {
3668 int error = vd->vdev_child[c]->vdev_load_error;
3669
3670 if (error != 0)
3671 return (error);
3672 }
3673
3674 vdev_set_deflate_ratio(vd);
3675
3676 if (vd->vdev_ops == &vdev_raidz_ops) {
3677 error = vdev_raidz_load(vd);
3678 if (error != 0)
3679 return (error);
3680 }
3681
3682 /*
3683 * On spa_load path, grab the allocation bias from our zap
3684 */
3685 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3686 spa_t *spa = vd->vdev_spa;
3687 char bias_str[64];
3688
3689 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
3690 VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
3691 bias_str);
3692 if (error == 0) {
3693 ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
3694 vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
3695 } else if (error != ENOENT) {
3696 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3697 VDEV_AUX_CORRUPT_DATA);
3698 vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
3699 "failed [error=%d]",
3700 (u_longlong_t)vd->vdev_top_zap, error);
3701 return (error);
3702 }
3703 }
3704
3705 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3706 spa_t *spa = vd->vdev_spa;
3707 uint64_t failfast;
3708
3709 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
3710 vdev_prop_to_name(VDEV_PROP_FAILFAST), sizeof (failfast),
3711 1, &failfast);
3712 if (error == 0) {
3713 vd->vdev_failfast = failfast & 1;
3714 } else if (error == ENOENT) {
3715 vd->vdev_failfast = vdev_prop_default_numeric(
3716 VDEV_PROP_FAILFAST);
3717 } else {
3718 vdev_dbgmsg(vd,
3719 "vdev_load: zap_lookup(top_zap=%llu) "
3720 "failed [error=%d]",
3721 (u_longlong_t)vd->vdev_top_zap, error);
3722 }
3723 }
3724
3725 /*
3726 * Load any rebuild state from the top-level vdev zap.
3727 */
3728 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3729 error = vdev_rebuild_load(vd);
3730 if (error && error != ENOTSUP) {
3731 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3732 VDEV_AUX_CORRUPT_DATA);
3733 vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
3734 "failed [error=%d]", error);
3735 return (error);
3736 }
3737 }
3738
3739 if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) {
3740 uint64_t zapobj;
3741
3742 if (vd->vdev_top_zap != 0)
3743 zapobj = vd->vdev_top_zap;
3744 else
3745 zapobj = vd->vdev_leaf_zap;
3746
3747 error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_N,
3748 &vd->vdev_checksum_n);
3749 if (error && error != ENOENT)
3750 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3751 "failed [error=%d]", (u_longlong_t)zapobj, error);
3752
3753 error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_T,
3754 &vd->vdev_checksum_t);
3755 if (error && error != ENOENT)
3756 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3757 "failed [error=%d]", (u_longlong_t)zapobj, error);
3758
3759 error = vdev_prop_get_int(vd, VDEV_PROP_IO_N,
3760 &vd->vdev_io_n);
3761 if (error && error != ENOENT)
3762 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3763 "failed [error=%d]", (u_longlong_t)zapobj, error);
3764
3765 error = vdev_prop_get_int(vd, VDEV_PROP_IO_T,
3766 &vd->vdev_io_t);
3767 if (error && error != ENOENT)
3768 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3769 "failed [error=%d]", (u_longlong_t)zapobj, error);
3770
3771 error = vdev_prop_get_int(vd, VDEV_PROP_SLOW_IO_N,
3772 &vd->vdev_slow_io_n);
3773 if (error && error != ENOENT)
3774 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3775 "failed [error=%d]", (u_longlong_t)zapobj, error);
3776
3777 error = vdev_prop_get_int(vd, VDEV_PROP_SLOW_IO_T,
3778 &vd->vdev_slow_io_t);
3779 if (error && error != ENOENT)
3780 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3781 "failed [error=%d]", (u_longlong_t)zapobj, error);
3782 }
3783
3784 /*
3785 * If this is a top-level vdev, initialize its metaslabs.
3786 */
3787 if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
3788 vdev_metaslab_group_create(vd);
3789
3790 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
3791 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3792 VDEV_AUX_CORRUPT_DATA);
3793 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
3794 "asize=%llu", (u_longlong_t)vd->vdev_ashift,
3795 (u_longlong_t)vd->vdev_asize);
3796 return (SET_ERROR(ENXIO));
3797 }
3798
3799 error = vdev_metaslab_init(vd, 0);
3800 if (error != 0) {
3801 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
3802 "[error=%d]", error);
3803 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3804 VDEV_AUX_CORRUPT_DATA);
3805 return (error);
3806 }
3807
3808 uint64_t checkpoint_sm_obj;
3809 error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
3810 if (error == 0 && checkpoint_sm_obj != 0) {
3811 objset_t *mos = spa_meta_objset(vd->vdev_spa);
3812 ASSERT(vd->vdev_asize != 0);
3813 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
3814
3815 error = space_map_open(&vd->vdev_checkpoint_sm,
3816 mos, checkpoint_sm_obj, 0, vd->vdev_asize,
3817 vd->vdev_ashift);
3818 if (error != 0) {
3819 vdev_dbgmsg(vd, "vdev_load: space_map_open "
3820 "failed for checkpoint spacemap (obj %llu) "
3821 "[error=%d]",
3822 (u_longlong_t)checkpoint_sm_obj, error);
3823 return (error);
3824 }
3825 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3826
3827 /*
3828 * Since the checkpoint_sm contains free entries
3829 * exclusively we can use space_map_allocated() to
3830 * indicate the cumulative checkpointed space that
3831 * has been freed.
3832 */
3833 vd->vdev_stat.vs_checkpoint_space =
3834 -space_map_allocated(vd->vdev_checkpoint_sm);
3835 vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
3836 vd->vdev_stat.vs_checkpoint_space;
3837 } else if (error != 0) {
3838 vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
3839 "checkpoint space map object from vdev ZAP "
3840 "[error=%d]", error);
3841 return (error);
3842 }
3843 }
3844
3845 /*
3846 * If this is a leaf vdev, load its DTL.
3847 */
3848 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
3849 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3850 VDEV_AUX_CORRUPT_DATA);
3851 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
3852 "[error=%d]", error);
3853 return (error);
3854 }
3855
3856 uint64_t obsolete_sm_object;
3857 error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
3858 if (error == 0 && obsolete_sm_object != 0) {
3859 objset_t *mos = vd->vdev_spa->spa_meta_objset;
3860 ASSERT(vd->vdev_asize != 0);
3861 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
3862
3863 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
3864 obsolete_sm_object, 0, vd->vdev_asize, 0))) {
3865 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3866 VDEV_AUX_CORRUPT_DATA);
3867 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
3868 "obsolete spacemap (obj %llu) [error=%d]",
3869 (u_longlong_t)obsolete_sm_object, error);
3870 return (error);
3871 }
3872 } else if (error != 0) {
3873 vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
3874 "space map object from vdev ZAP [error=%d]", error);
3875 return (error);
3876 }
3877
3878 return (0);
3879 }
3880
3881 /*
3882 * The special vdev case is used for hot spares and l2cache devices. Its
3883 * sole purpose it to set the vdev state for the associated vdev. To do this,
3884 * we make sure that we can open the underlying device, then try to read the
3885 * label, and make sure that the label is sane and that it hasn't been
3886 * repurposed to another pool.
3887 */
3888 int
vdev_validate_aux(vdev_t * vd)3889 vdev_validate_aux(vdev_t *vd)
3890 {
3891 nvlist_t *label;
3892 uint64_t guid, version;
3893 uint64_t state;
3894
3895 if (!vdev_readable(vd))
3896 return (0);
3897
3898 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
3899 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3900 VDEV_AUX_CORRUPT_DATA);
3901 return (-1);
3902 }
3903
3904 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
3905 !SPA_VERSION_IS_SUPPORTED(version) ||
3906 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
3907 guid != vd->vdev_guid ||
3908 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
3909 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3910 VDEV_AUX_CORRUPT_DATA);
3911 nvlist_free(label);
3912 return (-1);
3913 }
3914
3915 /*
3916 * We don't actually check the pool state here. If it's in fact in
3917 * use by another pool, we update this fact on the fly when requested.
3918 */
3919 nvlist_free(label);
3920 return (0);
3921 }
3922
3923 static void
vdev_destroy_ms_flush_data(vdev_t * vd,dmu_tx_t * tx)3924 vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
3925 {
3926 objset_t *mos = spa_meta_objset(vd->vdev_spa);
3927
3928 if (vd->vdev_top_zap == 0)
3929 return;
3930
3931 uint64_t object = 0;
3932 int err = zap_lookup(mos, vd->vdev_top_zap,
3933 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
3934 if (err == ENOENT)
3935 return;
3936 VERIFY0(err);
3937
3938 VERIFY0(dmu_object_free(mos, object, tx));
3939 VERIFY0(zap_remove(mos, vd->vdev_top_zap,
3940 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
3941 }
3942
3943 /*
3944 * Free the objects used to store this vdev's spacemaps, and the array
3945 * that points to them.
3946 */
3947 void
vdev_destroy_spacemaps(vdev_t * vd,dmu_tx_t * tx)3948 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
3949 {
3950 if (vd->vdev_ms_array == 0)
3951 return;
3952
3953 objset_t *mos = vd->vdev_spa->spa_meta_objset;
3954 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
3955 size_t array_bytes = array_count * sizeof (uint64_t);
3956 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
3957 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
3958 array_bytes, smobj_array, 0));
3959
3960 for (uint64_t i = 0; i < array_count; i++) {
3961 uint64_t smobj = smobj_array[i];
3962 if (smobj == 0)
3963 continue;
3964
3965 space_map_free_obj(mos, smobj, tx);
3966 }
3967
3968 kmem_free(smobj_array, array_bytes);
3969 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
3970 vdev_destroy_ms_flush_data(vd, tx);
3971 vd->vdev_ms_array = 0;
3972 }
3973
3974 static void
vdev_remove_empty_log(vdev_t * vd,uint64_t txg)3975 vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
3976 {
3977 spa_t *spa = vd->vdev_spa;
3978
3979 ASSERT(vd->vdev_islog);
3980 ASSERT(vd == vd->vdev_top);
3981 ASSERT3U(txg, ==, spa_syncing_txg(spa));
3982
3983 dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3984
3985 vdev_destroy_spacemaps(vd, tx);
3986 if (vd->vdev_top_zap != 0) {
3987 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
3988 vd->vdev_top_zap = 0;
3989 }
3990
3991 dmu_tx_commit(tx);
3992 }
3993
3994 void
vdev_sync_done(vdev_t * vd,uint64_t txg)3995 vdev_sync_done(vdev_t *vd, uint64_t txg)
3996 {
3997 metaslab_t *msp;
3998 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
3999
4000 ASSERT(vdev_is_concrete(vd));
4001
4002 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
4003 != NULL)
4004 metaslab_sync_done(msp, txg);
4005
4006 if (reassess) {
4007 metaslab_sync_reassess(vd->vdev_mg);
4008 if (vd->vdev_log_mg != NULL)
4009 metaslab_sync_reassess(vd->vdev_log_mg);
4010 }
4011 }
4012
4013 void
vdev_sync(vdev_t * vd,uint64_t txg)4014 vdev_sync(vdev_t *vd, uint64_t txg)
4015 {
4016 spa_t *spa = vd->vdev_spa;
4017 vdev_t *lvd;
4018 metaslab_t *msp;
4019
4020 ASSERT3U(txg, ==, spa->spa_syncing_txg);
4021 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
4022 if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
4023 ASSERT(vd->vdev_removing ||
4024 vd->vdev_ops == &vdev_indirect_ops);
4025
4026 vdev_indirect_sync_obsolete(vd, tx);
4027
4028 /*
4029 * If the vdev is indirect, it can't have dirty
4030 * metaslabs or DTLs.
4031 */
4032 if (vd->vdev_ops == &vdev_indirect_ops) {
4033 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
4034 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
4035 dmu_tx_commit(tx);
4036 return;
4037 }
4038 }
4039
4040 ASSERT(vdev_is_concrete(vd));
4041
4042 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
4043 !vd->vdev_removing) {
4044 ASSERT(vd == vd->vdev_top);
4045 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
4046 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
4047 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
4048 ASSERT(vd->vdev_ms_array != 0);
4049 vdev_config_dirty(vd);
4050 }
4051
4052 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
4053 metaslab_sync(msp, txg);
4054 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
4055 }
4056
4057 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
4058 vdev_dtl_sync(lvd, txg);
4059
4060 /*
4061 * If this is an empty log device being removed, destroy the
4062 * metadata associated with it.
4063 */
4064 if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
4065 vdev_remove_empty_log(vd, txg);
4066
4067 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
4068 dmu_tx_commit(tx);
4069 }
4070
4071 /*
4072 * Return the amount of space that should be (or was) allocated for the given
4073 * psize (compressed block size) in the given TXG. Note that for expanded
4074 * RAIDZ vdevs, the size allocated for older BP's may be larger. See
4075 * vdev_raidz_asize().
4076 */
4077 uint64_t
vdev_psize_to_asize_txg(vdev_t * vd,uint64_t psize,uint64_t txg)4078 vdev_psize_to_asize_txg(vdev_t *vd, uint64_t psize, uint64_t txg)
4079 {
4080 return (vd->vdev_ops->vdev_op_asize(vd, psize, txg));
4081 }
4082
4083 uint64_t
vdev_psize_to_asize(vdev_t * vd,uint64_t psize)4084 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
4085 {
4086 return (vdev_psize_to_asize_txg(vd, psize, 0));
4087 }
4088
4089 /*
4090 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
4091 * not be opened, and no I/O is attempted.
4092 */
4093 int
vdev_fault(spa_t * spa,uint64_t guid,vdev_aux_t aux)4094 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
4095 {
4096 vdev_t *vd, *tvd;
4097
4098 spa_vdev_state_enter(spa, SCL_NONE);
4099
4100 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4101 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4102
4103 if (!vd->vdev_ops->vdev_op_leaf)
4104 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4105
4106 tvd = vd->vdev_top;
4107
4108 /*
4109 * If user did a 'zpool offline -f' then make the fault persist across
4110 * reboots.
4111 */
4112 if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
4113 /*
4114 * There are two kinds of forced faults: temporary and
4115 * persistent. Temporary faults go away at pool import, while
4116 * persistent faults stay set. Both types of faults can be
4117 * cleared with a zpool clear.
4118 *
4119 * We tell if a vdev is persistently faulted by looking at the
4120 * ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
4121 * import then it's a persistent fault. Otherwise, it's
4122 * temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
4123 * by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
4124 * tells vdev_config_generate() (which gets run later) to set
4125 * ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
4126 */
4127 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
4128 vd->vdev_tmpoffline = B_FALSE;
4129 aux = VDEV_AUX_EXTERNAL;
4130 } else {
4131 vd->vdev_tmpoffline = B_TRUE;
4132 }
4133
4134 /*
4135 * We don't directly use the aux state here, but if we do a
4136 * vdev_reopen(), we need this value to be present to remember why we
4137 * were faulted.
4138 */
4139 vd->vdev_label_aux = aux;
4140
4141 /*
4142 * Faulted state takes precedence over degraded.
4143 */
4144 vd->vdev_delayed_close = B_FALSE;
4145 vd->vdev_faulted = 1ULL;
4146 vd->vdev_degraded = 0ULL;
4147 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
4148
4149 /*
4150 * If this device has the only valid copy of the data, then
4151 * back off and simply mark the vdev as degraded instead.
4152 */
4153 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
4154 vd->vdev_degraded = 1ULL;
4155 vd->vdev_faulted = 0ULL;
4156
4157 /*
4158 * If we reopen the device and it's not dead, only then do we
4159 * mark it degraded.
4160 */
4161 vdev_reopen(tvd);
4162
4163 if (vdev_readable(vd))
4164 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
4165 }
4166
4167 return (spa_vdev_state_exit(spa, vd, 0));
4168 }
4169
4170 /*
4171 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
4172 * user that something is wrong. The vdev continues to operate as normal as far
4173 * as I/O is concerned.
4174 */
4175 int
vdev_degrade(spa_t * spa,uint64_t guid,vdev_aux_t aux)4176 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
4177 {
4178 vdev_t *vd;
4179
4180 spa_vdev_state_enter(spa, SCL_NONE);
4181
4182 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4183 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4184
4185 if (!vd->vdev_ops->vdev_op_leaf)
4186 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4187
4188 /*
4189 * If the vdev is already faulted, then don't do anything.
4190 */
4191 if (vd->vdev_faulted || vd->vdev_degraded)
4192 return (spa_vdev_state_exit(spa, NULL, 0));
4193
4194 vd->vdev_degraded = 1ULL;
4195 if (!vdev_is_dead(vd))
4196 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
4197 aux);
4198
4199 return (spa_vdev_state_exit(spa, vd, 0));
4200 }
4201
4202 int
vdev_remove_wanted(spa_t * spa,uint64_t guid)4203 vdev_remove_wanted(spa_t *spa, uint64_t guid)
4204 {
4205 vdev_t *vd;
4206
4207 spa_vdev_state_enter(spa, SCL_NONE);
4208
4209 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4210 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4211
4212 /*
4213 * If the vdev is already removed, or expanding which can trigger
4214 * repartition add/remove events, then don't do anything.
4215 */
4216 if (vd->vdev_removed || vd->vdev_expanding)
4217 return (spa_vdev_state_exit(spa, NULL, 0));
4218
4219 /*
4220 * Confirm the vdev has been removed, otherwise don't do anything.
4221 */
4222 if (vd->vdev_ops->vdev_op_leaf && !zio_wait(vdev_probe(vd, NULL)))
4223 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(EEXIST)));
4224
4225 vd->vdev_remove_wanted = B_TRUE;
4226 spa_async_request(spa, SPA_ASYNC_REMOVE);
4227
4228 return (spa_vdev_state_exit(spa, vd, 0));
4229 }
4230
4231
4232 /*
4233 * Online the given vdev.
4234 *
4235 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
4236 * spare device should be detached when the device finishes resilvering.
4237 * Second, the online should be treated like a 'test' online case, so no FMA
4238 * events are generated if the device fails to open.
4239 */
4240 int
vdev_online(spa_t * spa,uint64_t guid,uint64_t flags,vdev_state_t * newstate)4241 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
4242 {
4243 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
4244 boolean_t wasoffline;
4245 vdev_state_t oldstate;
4246
4247 spa_vdev_state_enter(spa, SCL_NONE);
4248
4249 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4250 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4251
4252 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
4253 oldstate = vd->vdev_state;
4254
4255 tvd = vd->vdev_top;
4256 vd->vdev_offline = B_FALSE;
4257 vd->vdev_tmpoffline = B_FALSE;
4258 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
4259 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
4260
4261 /* XXX - L2ARC 1.0 does not support expansion */
4262 if (!vd->vdev_aux) {
4263 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4264 pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
4265 spa->spa_autoexpand);
4266 vd->vdev_expansion_time = gethrestime_sec();
4267 }
4268
4269 vdev_reopen(tvd);
4270 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
4271
4272 if (!vd->vdev_aux) {
4273 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4274 pvd->vdev_expanding = B_FALSE;
4275 }
4276
4277 if (newstate)
4278 *newstate = vd->vdev_state;
4279 if ((flags & ZFS_ONLINE_UNSPARE) &&
4280 !vdev_is_dead(vd) && vd->vdev_parent &&
4281 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4282 vd->vdev_parent->vdev_child[0] == vd)
4283 vd->vdev_unspare = B_TRUE;
4284
4285 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
4286
4287 /* XXX - L2ARC 1.0 does not support expansion */
4288 if (vd->vdev_aux)
4289 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
4290 spa->spa_ccw_fail_time = 0;
4291 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
4292 }
4293
4294 /* Restart initializing if necessary */
4295 mutex_enter(&vd->vdev_initialize_lock);
4296 if (vdev_writeable(vd) &&
4297 vd->vdev_initialize_thread == NULL &&
4298 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
4299 (void) vdev_initialize(vd);
4300 }
4301 mutex_exit(&vd->vdev_initialize_lock);
4302
4303 /*
4304 * Restart trimming if necessary. We do not restart trimming for cache
4305 * devices here. This is triggered by l2arc_rebuild_vdev()
4306 * asynchronously for the whole device or in l2arc_evict() as it evicts
4307 * space for upcoming writes.
4308 */
4309 mutex_enter(&vd->vdev_trim_lock);
4310 if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
4311 vd->vdev_trim_thread == NULL &&
4312 vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
4313 (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
4314 vd->vdev_trim_secure);
4315 }
4316 mutex_exit(&vd->vdev_trim_lock);
4317
4318 if (wasoffline ||
4319 (oldstate < VDEV_STATE_DEGRADED &&
4320 vd->vdev_state >= VDEV_STATE_DEGRADED)) {
4321 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
4322
4323 /*
4324 * Asynchronously detach spare vdev if resilver or
4325 * rebuild is not required
4326 */
4327 if (vd->vdev_unspare &&
4328 !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4329 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool) &&
4330 !vdev_rebuild_active(tvd))
4331 spa_async_request(spa, SPA_ASYNC_DETACH_SPARE);
4332 }
4333 return (spa_vdev_state_exit(spa, vd, 0));
4334 }
4335
4336 static int
vdev_offline_locked(spa_t * spa,uint64_t guid,uint64_t flags)4337 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
4338 {
4339 vdev_t *vd, *tvd;
4340 int error = 0;
4341 uint64_t generation;
4342 metaslab_group_t *mg;
4343
4344 top:
4345 spa_vdev_state_enter(spa, SCL_ALLOC);
4346
4347 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4348 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4349
4350 if (!vd->vdev_ops->vdev_op_leaf)
4351 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4352
4353 if (vd->vdev_ops == &vdev_draid_spare_ops)
4354 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
4355
4356 tvd = vd->vdev_top;
4357 mg = tvd->vdev_mg;
4358 generation = spa->spa_config_generation + 1;
4359
4360 /*
4361 * If the device isn't already offline, try to offline it.
4362 */
4363 if (!vd->vdev_offline) {
4364 /*
4365 * If this device has the only valid copy of some data,
4366 * don't allow it to be offlined. Log devices are always
4367 * expendable.
4368 */
4369 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
4370 vdev_dtl_required(vd))
4371 return (spa_vdev_state_exit(spa, NULL,
4372 SET_ERROR(EBUSY)));
4373
4374 /*
4375 * If the top-level is a slog and it has had allocations
4376 * then proceed. We check that the vdev's metaslab group
4377 * is not NULL since it's possible that we may have just
4378 * added this vdev but not yet initialized its metaslabs.
4379 */
4380 if (tvd->vdev_islog && mg != NULL) {
4381 /*
4382 * Prevent any future allocations.
4383 */
4384 ASSERT3P(tvd->vdev_log_mg, ==, NULL);
4385 metaslab_group_passivate(mg);
4386 (void) spa_vdev_state_exit(spa, vd, 0);
4387
4388 error = spa_reset_logs(spa);
4389
4390 /*
4391 * If the log device was successfully reset but has
4392 * checkpointed data, do not offline it.
4393 */
4394 if (error == 0 &&
4395 tvd->vdev_checkpoint_sm != NULL) {
4396 ASSERT3U(space_map_allocated(
4397 tvd->vdev_checkpoint_sm), !=, 0);
4398 error = ZFS_ERR_CHECKPOINT_EXISTS;
4399 }
4400
4401 spa_vdev_state_enter(spa, SCL_ALLOC);
4402
4403 /*
4404 * Check to see if the config has changed.
4405 */
4406 if (error || generation != spa->spa_config_generation) {
4407 metaslab_group_activate(mg);
4408 if (error)
4409 return (spa_vdev_state_exit(spa,
4410 vd, error));
4411 (void) spa_vdev_state_exit(spa, vd, 0);
4412 goto top;
4413 }
4414 ASSERT0(tvd->vdev_stat.vs_alloc);
4415 }
4416
4417 /*
4418 * Offline this device and reopen its top-level vdev.
4419 * If the top-level vdev is a log device then just offline
4420 * it. Otherwise, if this action results in the top-level
4421 * vdev becoming unusable, undo it and fail the request.
4422 */
4423 vd->vdev_offline = B_TRUE;
4424 vdev_reopen(tvd);
4425
4426 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
4427 vdev_is_dead(tvd)) {
4428 vd->vdev_offline = B_FALSE;
4429 vdev_reopen(tvd);
4430 return (spa_vdev_state_exit(spa, NULL,
4431 SET_ERROR(EBUSY)));
4432 }
4433
4434 /*
4435 * Add the device back into the metaslab rotor so that
4436 * once we online the device it's open for business.
4437 */
4438 if (tvd->vdev_islog && mg != NULL)
4439 metaslab_group_activate(mg);
4440 }
4441
4442 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
4443
4444 return (spa_vdev_state_exit(spa, vd, 0));
4445 }
4446
4447 int
vdev_offline(spa_t * spa,uint64_t guid,uint64_t flags)4448 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
4449 {
4450 int error;
4451
4452 mutex_enter(&spa->spa_vdev_top_lock);
4453 error = vdev_offline_locked(spa, guid, flags);
4454 mutex_exit(&spa->spa_vdev_top_lock);
4455
4456 return (error);
4457 }
4458
4459 /*
4460 * Clear the error counts associated with this vdev. Unlike vdev_online() and
4461 * vdev_offline(), we assume the spa config is locked. We also clear all
4462 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
4463 */
4464 void
vdev_clear(spa_t * spa,vdev_t * vd)4465 vdev_clear(spa_t *spa, vdev_t *vd)
4466 {
4467 vdev_t *rvd = spa->spa_root_vdev;
4468
4469 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
4470
4471 if (vd == NULL)
4472 vd = rvd;
4473
4474 vd->vdev_stat.vs_read_errors = 0;
4475 vd->vdev_stat.vs_write_errors = 0;
4476 vd->vdev_stat.vs_checksum_errors = 0;
4477 vd->vdev_stat.vs_slow_ios = 0;
4478
4479 for (int c = 0; c < vd->vdev_children; c++)
4480 vdev_clear(spa, vd->vdev_child[c]);
4481
4482 /*
4483 * It makes no sense to "clear" an indirect or removed vdev.
4484 */
4485 if (!vdev_is_concrete(vd) || vd->vdev_removed)
4486 return;
4487
4488 /*
4489 * If we're in the FAULTED state or have experienced failed I/O, then
4490 * clear the persistent state and attempt to reopen the device. We
4491 * also mark the vdev config dirty, so that the new faulted state is
4492 * written out to disk.
4493 */
4494 if (vd->vdev_faulted || vd->vdev_degraded ||
4495 !vdev_readable(vd) || !vdev_writeable(vd)) {
4496 /*
4497 * When reopening in response to a clear event, it may be due to
4498 * a fmadm repair request. In this case, if the device is
4499 * still broken, we want to still post the ereport again.
4500 */
4501 vd->vdev_forcefault = B_TRUE;
4502
4503 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
4504 vd->vdev_cant_read = B_FALSE;
4505 vd->vdev_cant_write = B_FALSE;
4506 vd->vdev_stat.vs_aux = 0;
4507
4508 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
4509
4510 vd->vdev_forcefault = B_FALSE;
4511
4512 if (vd != rvd && vdev_writeable(vd->vdev_top))
4513 vdev_state_dirty(vd->vdev_top);
4514
4515 /* If a resilver isn't required, check if vdevs can be culled */
4516 if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
4517 !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4518 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
4519 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
4520
4521 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
4522 }
4523
4524 /*
4525 * When clearing a FMA-diagnosed fault, we always want to
4526 * unspare the device, as we assume that the original spare was
4527 * done in response to the FMA fault.
4528 */
4529 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
4530 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4531 vd->vdev_parent->vdev_child[0] == vd)
4532 vd->vdev_unspare = B_TRUE;
4533
4534 /* Clear recent error events cache (i.e. duplicate events tracking) */
4535 zfs_ereport_clear(spa, vd);
4536 }
4537
4538 boolean_t
vdev_is_dead(vdev_t * vd)4539 vdev_is_dead(vdev_t *vd)
4540 {
4541 /*
4542 * Holes and missing devices are always considered "dead".
4543 * This simplifies the code since we don't have to check for
4544 * these types of devices in the various code paths.
4545 * Instead we rely on the fact that we skip over dead devices
4546 * before issuing I/O to them.
4547 */
4548 return (vd->vdev_state < VDEV_STATE_DEGRADED ||
4549 vd->vdev_ops == &vdev_hole_ops ||
4550 vd->vdev_ops == &vdev_missing_ops);
4551 }
4552
4553 boolean_t
vdev_readable(vdev_t * vd)4554 vdev_readable(vdev_t *vd)
4555 {
4556 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
4557 }
4558
4559 boolean_t
vdev_writeable(vdev_t * vd)4560 vdev_writeable(vdev_t *vd)
4561 {
4562 return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
4563 vdev_is_concrete(vd));
4564 }
4565
4566 boolean_t
vdev_allocatable(vdev_t * vd)4567 vdev_allocatable(vdev_t *vd)
4568 {
4569 uint64_t state = vd->vdev_state;
4570
4571 /*
4572 * We currently allow allocations from vdevs which may be in the
4573 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
4574 * fails to reopen then we'll catch it later when we're holding
4575 * the proper locks. Note that we have to get the vdev state
4576 * in a local variable because although it changes atomically,
4577 * we're asking two separate questions about it.
4578 */
4579 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
4580 !vd->vdev_cant_write && vdev_is_concrete(vd) &&
4581 vd->vdev_mg->mg_initialized);
4582 }
4583
4584 boolean_t
vdev_accessible(vdev_t * vd,zio_t * zio)4585 vdev_accessible(vdev_t *vd, zio_t *zio)
4586 {
4587 ASSERT(zio->io_vd == vd);
4588
4589 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
4590 return (B_FALSE);
4591
4592 if (zio->io_type == ZIO_TYPE_READ)
4593 return (!vd->vdev_cant_read);
4594
4595 if (zio->io_type == ZIO_TYPE_WRITE)
4596 return (!vd->vdev_cant_write);
4597
4598 return (B_TRUE);
4599 }
4600
4601 static void
vdev_get_child_stat(vdev_t * cvd,vdev_stat_t * vs,vdev_stat_t * cvs)4602 vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
4603 {
4604 /*
4605 * Exclude the dRAID spare when aggregating to avoid double counting
4606 * the ops and bytes. These IOs are counted by the physical leaves.
4607 */
4608 if (cvd->vdev_ops == &vdev_draid_spare_ops)
4609 return;
4610
4611 for (int t = 0; t < VS_ZIO_TYPES; t++) {
4612 vs->vs_ops[t] += cvs->vs_ops[t];
4613 vs->vs_bytes[t] += cvs->vs_bytes[t];
4614 }
4615
4616 cvs->vs_scan_removing = cvd->vdev_removing;
4617 }
4618
4619 /*
4620 * Get extended stats
4621 */
4622 static void
vdev_get_child_stat_ex(vdev_t * cvd,vdev_stat_ex_t * vsx,vdev_stat_ex_t * cvsx)4623 vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
4624 {
4625 (void) cvd;
4626
4627 int t, b;
4628 for (t = 0; t < ZIO_TYPES; t++) {
4629 for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
4630 vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
4631
4632 for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
4633 vsx->vsx_total_histo[t][b] +=
4634 cvsx->vsx_total_histo[t][b];
4635 }
4636 }
4637
4638 for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
4639 for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
4640 vsx->vsx_queue_histo[t][b] +=
4641 cvsx->vsx_queue_histo[t][b];
4642 }
4643 vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
4644 vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
4645
4646 for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
4647 vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
4648
4649 for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
4650 vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
4651 }
4652
4653 }
4654
4655 boolean_t
vdev_is_spacemap_addressable(vdev_t * vd)4656 vdev_is_spacemap_addressable(vdev_t *vd)
4657 {
4658 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
4659 return (B_TRUE);
4660
4661 /*
4662 * If double-word space map entries are not enabled we assume
4663 * 47 bits of the space map entry are dedicated to the entry's
4664 * offset (see SM_OFFSET_BITS in space_map.h). We then use that
4665 * to calculate the maximum address that can be described by a
4666 * space map entry for the given device.
4667 */
4668 uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
4669
4670 if (shift >= 63) /* detect potential overflow */
4671 return (B_TRUE);
4672
4673 return (vd->vdev_asize < (1ULL << shift));
4674 }
4675
4676 /*
4677 * Get statistics for the given vdev.
4678 */
4679 static void
vdev_get_stats_ex_impl(vdev_t * vd,vdev_stat_t * vs,vdev_stat_ex_t * vsx)4680 vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
4681 {
4682 int t;
4683 /*
4684 * If we're getting stats on the root vdev, aggregate the I/O counts
4685 * over all top-level vdevs (i.e. the direct children of the root).
4686 */
4687 if (!vd->vdev_ops->vdev_op_leaf) {
4688 if (vs) {
4689 memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
4690 memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
4691 }
4692 if (vsx)
4693 memset(vsx, 0, sizeof (*vsx));
4694
4695 for (int c = 0; c < vd->vdev_children; c++) {
4696 vdev_t *cvd = vd->vdev_child[c];
4697 vdev_stat_t *cvs = &cvd->vdev_stat;
4698 vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
4699
4700 vdev_get_stats_ex_impl(cvd, cvs, cvsx);
4701 if (vs)
4702 vdev_get_child_stat(cvd, vs, cvs);
4703 if (vsx)
4704 vdev_get_child_stat_ex(cvd, vsx, cvsx);
4705 }
4706 } else {
4707 /*
4708 * We're a leaf. Just copy our ZIO active queue stats in. The
4709 * other leaf stats are updated in vdev_stat_update().
4710 */
4711 if (!vsx)
4712 return;
4713
4714 memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
4715
4716 for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
4717 vsx->vsx_active_queue[t] = vd->vdev_queue.vq_cactive[t];
4718 vsx->vsx_pend_queue[t] = vdev_queue_class_length(vd, t);
4719 }
4720 }
4721 }
4722
4723 void
vdev_get_stats_ex(vdev_t * vd,vdev_stat_t * vs,vdev_stat_ex_t * vsx)4724 vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
4725 {
4726 vdev_t *tvd = vd->vdev_top;
4727 mutex_enter(&vd->vdev_stat_lock);
4728 if (vs) {
4729 memcpy(vs, &vd->vdev_stat, sizeof (*vs));
4730 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
4731 vs->vs_state = vd->vdev_state;
4732 vs->vs_rsize = vdev_get_min_asize(vd);
4733
4734 if (vd->vdev_ops->vdev_op_leaf) {
4735 vs->vs_pspace = vd->vdev_psize;
4736 vs->vs_rsize += VDEV_LABEL_START_SIZE +
4737 VDEV_LABEL_END_SIZE;
4738 /*
4739 * Report initializing progress. Since we don't
4740 * have the initializing locks held, this is only
4741 * an estimate (although a fairly accurate one).
4742 */
4743 vs->vs_initialize_bytes_done =
4744 vd->vdev_initialize_bytes_done;
4745 vs->vs_initialize_bytes_est =
4746 vd->vdev_initialize_bytes_est;
4747 vs->vs_initialize_state = vd->vdev_initialize_state;
4748 vs->vs_initialize_action_time =
4749 vd->vdev_initialize_action_time;
4750
4751 /*
4752 * Report manual TRIM progress. Since we don't have
4753 * the manual TRIM locks held, this is only an
4754 * estimate (although fairly accurate one).
4755 */
4756 vs->vs_trim_notsup = !vd->vdev_has_trim;
4757 vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
4758 vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
4759 vs->vs_trim_state = vd->vdev_trim_state;
4760 vs->vs_trim_action_time = vd->vdev_trim_action_time;
4761
4762 /* Set when there is a deferred resilver. */
4763 vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
4764 }
4765
4766 /*
4767 * Report expandable space on top-level, non-auxiliary devices
4768 * only. The expandable space is reported in terms of metaslab
4769 * sized units since that determines how much space the pool
4770 * can expand.
4771 */
4772 if (vd->vdev_aux == NULL && tvd != NULL) {
4773 vs->vs_esize = P2ALIGN_TYPED(
4774 vd->vdev_max_asize - vd->vdev_asize,
4775 1ULL << tvd->vdev_ms_shift, uint64_t);
4776 }
4777
4778 vs->vs_configured_ashift = vd->vdev_top != NULL
4779 ? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
4780 vs->vs_logical_ashift = vd->vdev_logical_ashift;
4781 if (vd->vdev_physical_ashift <= ASHIFT_MAX)
4782 vs->vs_physical_ashift = vd->vdev_physical_ashift;
4783 else
4784 vs->vs_physical_ashift = 0;
4785
4786 /*
4787 * Report fragmentation and rebuild progress for top-level,
4788 * non-auxiliary, concrete devices.
4789 */
4790 if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
4791 vdev_is_concrete(vd)) {
4792 /*
4793 * The vdev fragmentation rating doesn't take into
4794 * account the embedded slog metaslab (vdev_log_mg).
4795 * Since it's only one metaslab, it would have a tiny
4796 * impact on the overall fragmentation.
4797 */
4798 vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
4799 vd->vdev_mg->mg_fragmentation : 0;
4800 }
4801 vs->vs_noalloc = MAX(vd->vdev_noalloc,
4802 tvd ? tvd->vdev_noalloc : 0);
4803 }
4804
4805 vdev_get_stats_ex_impl(vd, vs, vsx);
4806 mutex_exit(&vd->vdev_stat_lock);
4807 }
4808
4809 void
vdev_get_stats(vdev_t * vd,vdev_stat_t * vs)4810 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
4811 {
4812 return (vdev_get_stats_ex(vd, vs, NULL));
4813 }
4814
4815 void
vdev_clear_stats(vdev_t * vd)4816 vdev_clear_stats(vdev_t *vd)
4817 {
4818 mutex_enter(&vd->vdev_stat_lock);
4819 vd->vdev_stat.vs_space = 0;
4820 vd->vdev_stat.vs_dspace = 0;
4821 vd->vdev_stat.vs_alloc = 0;
4822 mutex_exit(&vd->vdev_stat_lock);
4823 }
4824
4825 void
vdev_scan_stat_init(vdev_t * vd)4826 vdev_scan_stat_init(vdev_t *vd)
4827 {
4828 vdev_stat_t *vs = &vd->vdev_stat;
4829
4830 for (int c = 0; c < vd->vdev_children; c++)
4831 vdev_scan_stat_init(vd->vdev_child[c]);
4832
4833 mutex_enter(&vd->vdev_stat_lock);
4834 vs->vs_scan_processed = 0;
4835 mutex_exit(&vd->vdev_stat_lock);
4836 }
4837
4838 void
vdev_stat_update(zio_t * zio,uint64_t psize)4839 vdev_stat_update(zio_t *zio, uint64_t psize)
4840 {
4841 spa_t *spa = zio->io_spa;
4842 vdev_t *rvd = spa->spa_root_vdev;
4843 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
4844 vdev_t *pvd;
4845 uint64_t txg = zio->io_txg;
4846 /* Suppress ASAN false positive */
4847 #ifdef __SANITIZE_ADDRESS__
4848 vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL;
4849 vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL;
4850 #else
4851 vdev_stat_t *vs = &vd->vdev_stat;
4852 vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
4853 #endif
4854 zio_type_t type = zio->io_type;
4855 int flags = zio->io_flags;
4856
4857 /*
4858 * If this i/o is a gang leader, it didn't do any actual work.
4859 */
4860 if (zio->io_gang_tree)
4861 return;
4862
4863 if (zio->io_error == 0) {
4864 /*
4865 * If this is a root i/o, don't count it -- we've already
4866 * counted the top-level vdevs, and vdev_get_stats() will
4867 * aggregate them when asked. This reduces contention on
4868 * the root vdev_stat_lock and implicitly handles blocks
4869 * that compress away to holes, for which there is no i/o.
4870 * (Holes never create vdev children, so all the counters
4871 * remain zero, which is what we want.)
4872 *
4873 * Note: this only applies to successful i/o (io_error == 0)
4874 * because unlike i/o counts, errors are not additive.
4875 * When reading a ditto block, for example, failure of
4876 * one top-level vdev does not imply a root-level error.
4877 */
4878 if (vd == rvd)
4879 return;
4880
4881 ASSERT(vd == zio->io_vd);
4882
4883 if (flags & ZIO_FLAG_IO_BYPASS)
4884 return;
4885
4886 mutex_enter(&vd->vdev_stat_lock);
4887
4888 if (flags & ZIO_FLAG_IO_REPAIR) {
4889 /*
4890 * Repair is the result of a resilver issued by the
4891 * scan thread (spa_sync).
4892 */
4893 if (flags & ZIO_FLAG_SCAN_THREAD) {
4894 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
4895 dsl_scan_phys_t *scn_phys = &scn->scn_phys;
4896 uint64_t *processed = &scn_phys->scn_processed;
4897
4898 if (vd->vdev_ops->vdev_op_leaf)
4899 atomic_add_64(processed, psize);
4900 vs->vs_scan_processed += psize;
4901 }
4902
4903 /*
4904 * Repair is the result of a rebuild issued by the
4905 * rebuild thread (vdev_rebuild_thread). To avoid
4906 * double counting repaired bytes the virtual dRAID
4907 * spare vdev is excluded from the processed bytes.
4908 */
4909 if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
4910 vdev_t *tvd = vd->vdev_top;
4911 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
4912 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
4913 uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
4914
4915 if (vd->vdev_ops->vdev_op_leaf &&
4916 vd->vdev_ops != &vdev_draid_spare_ops) {
4917 atomic_add_64(rebuilt, psize);
4918 }
4919 vs->vs_rebuild_processed += psize;
4920 }
4921
4922 if (flags & ZIO_FLAG_SELF_HEAL)
4923 vs->vs_self_healed += psize;
4924 }
4925
4926 /*
4927 * The bytes/ops/histograms are recorded at the leaf level and
4928 * aggregated into the higher level vdevs in vdev_get_stats().
4929 */
4930 if (vd->vdev_ops->vdev_op_leaf &&
4931 (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
4932 zio_type_t vs_type = type;
4933 zio_priority_t priority = zio->io_priority;
4934
4935 /*
4936 * TRIM ops and bytes are reported to user space as
4937 * ZIO_TYPE_FLUSH. This is done to preserve the
4938 * vdev_stat_t structure layout for user space.
4939 */
4940 if (type == ZIO_TYPE_TRIM)
4941 vs_type = ZIO_TYPE_FLUSH;
4942
4943 /*
4944 * Solely for the purposes of 'zpool iostat -lqrw'
4945 * reporting use the priority to categorize the IO.
4946 * Only the following are reported to user space:
4947 *
4948 * ZIO_PRIORITY_SYNC_READ,
4949 * ZIO_PRIORITY_SYNC_WRITE,
4950 * ZIO_PRIORITY_ASYNC_READ,
4951 * ZIO_PRIORITY_ASYNC_WRITE,
4952 * ZIO_PRIORITY_SCRUB,
4953 * ZIO_PRIORITY_TRIM,
4954 * ZIO_PRIORITY_REBUILD.
4955 */
4956 if (priority == ZIO_PRIORITY_INITIALIZING) {
4957 ASSERT3U(type, ==, ZIO_TYPE_WRITE);
4958 priority = ZIO_PRIORITY_ASYNC_WRITE;
4959 } else if (priority == ZIO_PRIORITY_REMOVAL) {
4960 priority = ((type == ZIO_TYPE_WRITE) ?
4961 ZIO_PRIORITY_ASYNC_WRITE :
4962 ZIO_PRIORITY_ASYNC_READ);
4963 }
4964
4965 vs->vs_ops[vs_type]++;
4966 vs->vs_bytes[vs_type] += psize;
4967
4968 if (flags & ZIO_FLAG_DELEGATED) {
4969 vsx->vsx_agg_histo[priority]
4970 [RQ_HISTO(zio->io_size)]++;
4971 } else {
4972 vsx->vsx_ind_histo[priority]
4973 [RQ_HISTO(zio->io_size)]++;
4974 }
4975
4976 if (zio->io_delta && zio->io_delay) {
4977 vsx->vsx_queue_histo[priority]
4978 [L_HISTO(zio->io_delta - zio->io_delay)]++;
4979 vsx->vsx_disk_histo[type]
4980 [L_HISTO(zio->io_delay)]++;
4981 vsx->vsx_total_histo[type]
4982 [L_HISTO(zio->io_delta)]++;
4983 }
4984 }
4985
4986 mutex_exit(&vd->vdev_stat_lock);
4987 return;
4988 }
4989
4990 if (flags & ZIO_FLAG_SPECULATIVE)
4991 return;
4992
4993 /*
4994 * If this is an I/O error that is going to be retried, then ignore the
4995 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as
4996 * hard errors, when in reality they can happen for any number of
4997 * innocuous reasons (bus resets, MPxIO link failure, etc).
4998 */
4999 if (zio->io_error == EIO &&
5000 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
5001 return;
5002
5003 /*
5004 * Intent logs writes won't propagate their error to the root
5005 * I/O so don't mark these types of failures as pool-level
5006 * errors.
5007 */
5008 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
5009 return;
5010
5011 if (type == ZIO_TYPE_WRITE && txg != 0 &&
5012 (!(flags & ZIO_FLAG_IO_REPAIR) ||
5013 (flags & ZIO_FLAG_SCAN_THREAD) ||
5014 spa->spa_claiming)) {
5015 /*
5016 * This is either a normal write (not a repair), or it's
5017 * a repair induced by the scrub thread, or it's a repair
5018 * made by zil_claim() during spa_load() in the first txg.
5019 * In the normal case, we commit the DTL change in the same
5020 * txg as the block was born. In the scrub-induced repair
5021 * case, we know that scrubs run in first-pass syncing context,
5022 * so we commit the DTL change in spa_syncing_txg(spa).
5023 * In the zil_claim() case, we commit in spa_first_txg(spa).
5024 *
5025 * We currently do not make DTL entries for failed spontaneous
5026 * self-healing writes triggered by normal (non-scrubbing)
5027 * reads, because we have no transactional context in which to
5028 * do so -- and it's not clear that it'd be desirable anyway.
5029 */
5030 if (vd->vdev_ops->vdev_op_leaf) {
5031 uint64_t commit_txg = txg;
5032 if (flags & ZIO_FLAG_SCAN_THREAD) {
5033 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
5034 ASSERT(spa_sync_pass(spa) == 1);
5035 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
5036 commit_txg = spa_syncing_txg(spa);
5037 } else if (spa->spa_claiming) {
5038 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
5039 commit_txg = spa_first_txg(spa);
5040 }
5041 ASSERT(commit_txg >= spa_syncing_txg(spa));
5042 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
5043 return;
5044 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
5045 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
5046 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
5047 }
5048 if (vd != rvd)
5049 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
5050 }
5051 }
5052
5053 int64_t
vdev_deflated_space(vdev_t * vd,int64_t space)5054 vdev_deflated_space(vdev_t *vd, int64_t space)
5055 {
5056 ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
5057 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
5058
5059 return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
5060 }
5061
5062 /*
5063 * Update the in-core space usage stats for this vdev, its metaslab class,
5064 * and the root vdev.
5065 */
5066 void
vdev_space_update(vdev_t * vd,int64_t alloc_delta,int64_t defer_delta,int64_t space_delta)5067 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
5068 int64_t space_delta)
5069 {
5070 (void) defer_delta;
5071 int64_t dspace_delta;
5072 spa_t *spa = vd->vdev_spa;
5073 vdev_t *rvd = spa->spa_root_vdev;
5074
5075 ASSERT(vd == vd->vdev_top);
5076
5077 /*
5078 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
5079 * factor. We must calculate this here and not at the root vdev
5080 * because the root vdev's psize-to-asize is simply the max of its
5081 * children's, thus not accurate enough for us.
5082 */
5083 dspace_delta = vdev_deflated_space(vd, space_delta);
5084
5085 mutex_enter(&vd->vdev_stat_lock);
5086 /* ensure we won't underflow */
5087 if (alloc_delta < 0) {
5088 ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
5089 }
5090
5091 vd->vdev_stat.vs_alloc += alloc_delta;
5092 vd->vdev_stat.vs_space += space_delta;
5093 vd->vdev_stat.vs_dspace += dspace_delta;
5094 mutex_exit(&vd->vdev_stat_lock);
5095
5096 /* every class but log contributes to root space stats */
5097 if (vd->vdev_mg != NULL && !vd->vdev_islog) {
5098 ASSERT(!vd->vdev_isl2cache);
5099 mutex_enter(&rvd->vdev_stat_lock);
5100 rvd->vdev_stat.vs_alloc += alloc_delta;
5101 rvd->vdev_stat.vs_space += space_delta;
5102 rvd->vdev_stat.vs_dspace += dspace_delta;
5103 mutex_exit(&rvd->vdev_stat_lock);
5104 }
5105 /* Note: metaslab_class_space_update moved to metaslab_space_update */
5106 }
5107
5108 /*
5109 * Mark a top-level vdev's config as dirty, placing it on the dirty list
5110 * so that it will be written out next time the vdev configuration is synced.
5111 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
5112 */
5113 void
vdev_config_dirty(vdev_t * vd)5114 vdev_config_dirty(vdev_t *vd)
5115 {
5116 spa_t *spa = vd->vdev_spa;
5117 vdev_t *rvd = spa->spa_root_vdev;
5118 int c;
5119
5120 ASSERT(spa_writeable(spa));
5121
5122 /*
5123 * If this is an aux vdev (as with l2cache and spare devices), then we
5124 * update the vdev config manually and set the sync flag.
5125 */
5126 if (vd->vdev_aux != NULL) {
5127 spa_aux_vdev_t *sav = vd->vdev_aux;
5128 nvlist_t **aux;
5129 uint_t naux;
5130
5131 for (c = 0; c < sav->sav_count; c++) {
5132 if (sav->sav_vdevs[c] == vd)
5133 break;
5134 }
5135
5136 if (c == sav->sav_count) {
5137 /*
5138 * We're being removed. There's nothing more to do.
5139 */
5140 ASSERT(sav->sav_sync == B_TRUE);
5141 return;
5142 }
5143
5144 sav->sav_sync = B_TRUE;
5145
5146 if (nvlist_lookup_nvlist_array(sav->sav_config,
5147 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
5148 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
5149 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
5150 }
5151
5152 ASSERT(c < naux);
5153
5154 /*
5155 * Setting the nvlist in the middle if the array is a little
5156 * sketchy, but it will work.
5157 */
5158 nvlist_free(aux[c]);
5159 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
5160
5161 return;
5162 }
5163
5164 /*
5165 * The dirty list is protected by the SCL_CONFIG lock. The caller
5166 * must either hold SCL_CONFIG as writer, or must be the sync thread
5167 * (which holds SCL_CONFIG as reader). There's only one sync thread,
5168 * so this is sufficient to ensure mutual exclusion.
5169 */
5170 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5171 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5172 spa_config_held(spa, SCL_CONFIG, RW_READER)));
5173
5174 if (vd == rvd) {
5175 for (c = 0; c < rvd->vdev_children; c++)
5176 vdev_config_dirty(rvd->vdev_child[c]);
5177 } else {
5178 ASSERT(vd == vd->vdev_top);
5179
5180 if (!list_link_active(&vd->vdev_config_dirty_node) &&
5181 vdev_is_concrete(vd)) {
5182 list_insert_head(&spa->spa_config_dirty_list, vd);
5183 }
5184 }
5185 }
5186
5187 void
vdev_config_clean(vdev_t * vd)5188 vdev_config_clean(vdev_t *vd)
5189 {
5190 spa_t *spa = vd->vdev_spa;
5191
5192 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5193 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5194 spa_config_held(spa, SCL_CONFIG, RW_READER)));
5195
5196 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
5197 list_remove(&spa->spa_config_dirty_list, vd);
5198 }
5199
5200 /*
5201 * Mark a top-level vdev's state as dirty, so that the next pass of
5202 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
5203 * the state changes from larger config changes because they require
5204 * much less locking, and are often needed for administrative actions.
5205 */
5206 void
vdev_state_dirty(vdev_t * vd)5207 vdev_state_dirty(vdev_t *vd)
5208 {
5209 spa_t *spa = vd->vdev_spa;
5210
5211 ASSERT(spa_writeable(spa));
5212 ASSERT(vd == vd->vdev_top);
5213
5214 /*
5215 * The state list is protected by the SCL_STATE lock. The caller
5216 * must either hold SCL_STATE as writer, or must be the sync thread
5217 * (which holds SCL_STATE as reader). There's only one sync thread,
5218 * so this is sufficient to ensure mutual exclusion.
5219 */
5220 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5221 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5222 spa_config_held(spa, SCL_STATE, RW_READER)));
5223
5224 if (!list_link_active(&vd->vdev_state_dirty_node) &&
5225 vdev_is_concrete(vd))
5226 list_insert_head(&spa->spa_state_dirty_list, vd);
5227 }
5228
5229 void
vdev_state_clean(vdev_t * vd)5230 vdev_state_clean(vdev_t *vd)
5231 {
5232 spa_t *spa = vd->vdev_spa;
5233
5234 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5235 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5236 spa_config_held(spa, SCL_STATE, RW_READER)));
5237
5238 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
5239 list_remove(&spa->spa_state_dirty_list, vd);
5240 }
5241
5242 /*
5243 * Propagate vdev state up from children to parent.
5244 */
5245 void
vdev_propagate_state(vdev_t * vd)5246 vdev_propagate_state(vdev_t *vd)
5247 {
5248 spa_t *spa = vd->vdev_spa;
5249 vdev_t *rvd = spa->spa_root_vdev;
5250 int degraded = 0, faulted = 0;
5251 int corrupted = 0;
5252 vdev_t *child;
5253
5254 if (vd->vdev_children > 0) {
5255 for (int c = 0; c < vd->vdev_children; c++) {
5256 child = vd->vdev_child[c];
5257
5258 /*
5259 * Don't factor holes or indirect vdevs into the
5260 * decision.
5261 */
5262 if (!vdev_is_concrete(child))
5263 continue;
5264
5265 if (!vdev_readable(child) ||
5266 (!vdev_writeable(child) && spa_writeable(spa))) {
5267 /*
5268 * Root special: if there is a top-level log
5269 * device, treat the root vdev as if it were
5270 * degraded.
5271 */
5272 if (child->vdev_islog && vd == rvd)
5273 degraded++;
5274 else
5275 faulted++;
5276 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
5277 degraded++;
5278 }
5279
5280 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
5281 corrupted++;
5282 }
5283
5284 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
5285
5286 /*
5287 * Root special: if there is a top-level vdev that cannot be
5288 * opened due to corrupted metadata, then propagate the root
5289 * vdev's aux state as 'corrupt' rather than 'insufficient
5290 * replicas'.
5291 */
5292 if (corrupted && vd == rvd &&
5293 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
5294 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
5295 VDEV_AUX_CORRUPT_DATA);
5296 }
5297
5298 if (vd->vdev_parent)
5299 vdev_propagate_state(vd->vdev_parent);
5300 }
5301
5302 /*
5303 * Set a vdev's state. If this is during an open, we don't update the parent
5304 * state, because we're in the process of opening children depth-first.
5305 * Otherwise, we propagate the change to the parent.
5306 *
5307 * If this routine places a device in a faulted state, an appropriate ereport is
5308 * generated.
5309 */
5310 void
vdev_set_state(vdev_t * vd,boolean_t isopen,vdev_state_t state,vdev_aux_t aux)5311 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
5312 {
5313 uint64_t save_state;
5314 spa_t *spa = vd->vdev_spa;
5315
5316 if (state == vd->vdev_state) {
5317 /*
5318 * Since vdev_offline() code path is already in an offline
5319 * state we can miss a statechange event to OFFLINE. Check
5320 * the previous state to catch this condition.
5321 */
5322 if (vd->vdev_ops->vdev_op_leaf &&
5323 (state == VDEV_STATE_OFFLINE) &&
5324 (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
5325 /* post an offline state change */
5326 zfs_post_state_change(spa, vd, vd->vdev_prevstate);
5327 }
5328 vd->vdev_stat.vs_aux = aux;
5329 return;
5330 }
5331
5332 save_state = vd->vdev_state;
5333
5334 vd->vdev_state = state;
5335 vd->vdev_stat.vs_aux = aux;
5336
5337 /*
5338 * If we are setting the vdev state to anything but an open state, then
5339 * always close the underlying device unless the device has requested
5340 * a delayed close (i.e. we're about to remove or fault the device).
5341 * Otherwise, we keep accessible but invalid devices open forever.
5342 * We don't call vdev_close() itself, because that implies some extra
5343 * checks (offline, etc) that we don't want here. This is limited to
5344 * leaf devices, because otherwise closing the device will affect other
5345 * children.
5346 */
5347 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
5348 vd->vdev_ops->vdev_op_leaf)
5349 vd->vdev_ops->vdev_op_close(vd);
5350
5351 if (vd->vdev_removed &&
5352 state == VDEV_STATE_CANT_OPEN &&
5353 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
5354 /*
5355 * If the previous state is set to VDEV_STATE_REMOVED, then this
5356 * device was previously marked removed and someone attempted to
5357 * reopen it. If this failed due to a nonexistent device, then
5358 * keep the device in the REMOVED state. We also let this be if
5359 * it is one of our special test online cases, which is only
5360 * attempting to online the device and shouldn't generate an FMA
5361 * fault.
5362 */
5363 vd->vdev_state = VDEV_STATE_REMOVED;
5364 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
5365 } else if (state == VDEV_STATE_REMOVED) {
5366 vd->vdev_removed = B_TRUE;
5367 } else if (state == VDEV_STATE_CANT_OPEN) {
5368 /*
5369 * If we fail to open a vdev during an import or recovery, we
5370 * mark it as "not available", which signifies that it was
5371 * never there to begin with. Failure to open such a device
5372 * is not considered an error.
5373 */
5374 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
5375 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
5376 vd->vdev_ops->vdev_op_leaf)
5377 vd->vdev_not_present = 1;
5378
5379 /*
5380 * Post the appropriate ereport. If the 'prevstate' field is
5381 * set to something other than VDEV_STATE_UNKNOWN, it indicates
5382 * that this is part of a vdev_reopen(). In this case, we don't
5383 * want to post the ereport if the device was already in the
5384 * CANT_OPEN state beforehand.
5385 *
5386 * If the 'checkremove' flag is set, then this is an attempt to
5387 * online the device in response to an insertion event. If we
5388 * hit this case, then we have detected an insertion event for a
5389 * faulted or offline device that wasn't in the removed state.
5390 * In this scenario, we don't post an ereport because we are
5391 * about to replace the device, or attempt an online with
5392 * vdev_forcefault, which will generate the fault for us.
5393 */
5394 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
5395 !vd->vdev_not_present && !vd->vdev_checkremove &&
5396 vd != spa->spa_root_vdev) {
5397 const char *class;
5398
5399 switch (aux) {
5400 case VDEV_AUX_OPEN_FAILED:
5401 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
5402 break;
5403 case VDEV_AUX_CORRUPT_DATA:
5404 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
5405 break;
5406 case VDEV_AUX_NO_REPLICAS:
5407 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
5408 break;
5409 case VDEV_AUX_BAD_GUID_SUM:
5410 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
5411 break;
5412 case VDEV_AUX_TOO_SMALL:
5413 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
5414 break;
5415 case VDEV_AUX_BAD_LABEL:
5416 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
5417 break;
5418 case VDEV_AUX_BAD_ASHIFT:
5419 class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
5420 break;
5421 default:
5422 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
5423 }
5424
5425 (void) zfs_ereport_post(class, spa, vd, NULL, NULL,
5426 save_state);
5427 }
5428
5429 /* Erase any notion of persistent removed state */
5430 vd->vdev_removed = B_FALSE;
5431 } else {
5432 vd->vdev_removed = B_FALSE;
5433 }
5434
5435 /*
5436 * Notify ZED of any significant state-change on a leaf vdev.
5437 *
5438 */
5439 if (vd->vdev_ops->vdev_op_leaf) {
5440 /* preserve original state from a vdev_reopen() */
5441 if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
5442 (vd->vdev_prevstate != vd->vdev_state) &&
5443 (save_state <= VDEV_STATE_CLOSED))
5444 save_state = vd->vdev_prevstate;
5445
5446 /* filter out state change due to initial vdev_open */
5447 if (save_state > VDEV_STATE_CLOSED)
5448 zfs_post_state_change(spa, vd, save_state);
5449 }
5450
5451 if (!isopen && vd->vdev_parent)
5452 vdev_propagate_state(vd->vdev_parent);
5453 }
5454
5455 boolean_t
vdev_children_are_offline(vdev_t * vd)5456 vdev_children_are_offline(vdev_t *vd)
5457 {
5458 ASSERT(!vd->vdev_ops->vdev_op_leaf);
5459
5460 for (uint64_t i = 0; i < vd->vdev_children; i++) {
5461 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
5462 return (B_FALSE);
5463 }
5464
5465 return (B_TRUE);
5466 }
5467
5468 /*
5469 * Check the vdev configuration to ensure that it's capable of supporting
5470 * a root pool. We do not support partial configuration.
5471 */
5472 boolean_t
vdev_is_bootable(vdev_t * vd)5473 vdev_is_bootable(vdev_t *vd)
5474 {
5475 if (!vd->vdev_ops->vdev_op_leaf) {
5476 const char *vdev_type = vd->vdev_ops->vdev_op_type;
5477
5478 if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
5479 return (B_FALSE);
5480 }
5481
5482 for (int c = 0; c < vd->vdev_children; c++) {
5483 if (!vdev_is_bootable(vd->vdev_child[c]))
5484 return (B_FALSE);
5485 }
5486 return (B_TRUE);
5487 }
5488
5489 boolean_t
vdev_is_concrete(vdev_t * vd)5490 vdev_is_concrete(vdev_t *vd)
5491 {
5492 vdev_ops_t *ops = vd->vdev_ops;
5493 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
5494 ops == &vdev_missing_ops || ops == &vdev_root_ops) {
5495 return (B_FALSE);
5496 } else {
5497 return (B_TRUE);
5498 }
5499 }
5500
5501 /*
5502 * Determine if a log device has valid content. If the vdev was
5503 * removed or faulted in the MOS config then we know that
5504 * the content on the log device has already been written to the pool.
5505 */
5506 boolean_t
vdev_log_state_valid(vdev_t * vd)5507 vdev_log_state_valid(vdev_t *vd)
5508 {
5509 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
5510 !vd->vdev_removed)
5511 return (B_TRUE);
5512
5513 for (int c = 0; c < vd->vdev_children; c++)
5514 if (vdev_log_state_valid(vd->vdev_child[c]))
5515 return (B_TRUE);
5516
5517 return (B_FALSE);
5518 }
5519
5520 /*
5521 * Expand a vdev if possible.
5522 */
5523 void
vdev_expand(vdev_t * vd,uint64_t txg)5524 vdev_expand(vdev_t *vd, uint64_t txg)
5525 {
5526 ASSERT(vd->vdev_top == vd);
5527 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5528 ASSERT(vdev_is_concrete(vd));
5529
5530 vdev_set_deflate_ratio(vd);
5531
5532 if ((vd->vdev_spa->spa_raidz_expand == NULL ||
5533 vd->vdev_spa->spa_raidz_expand->vre_vdev_id != vd->vdev_id) &&
5534 (vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
5535 vdev_is_concrete(vd)) {
5536 vdev_metaslab_group_create(vd);
5537 VERIFY(vdev_metaslab_init(vd, txg) == 0);
5538 vdev_config_dirty(vd);
5539 }
5540 }
5541
5542 /*
5543 * Split a vdev.
5544 */
5545 void
vdev_split(vdev_t * vd)5546 vdev_split(vdev_t *vd)
5547 {
5548 vdev_t *cvd, *pvd = vd->vdev_parent;
5549
5550 VERIFY3U(pvd->vdev_children, >, 1);
5551
5552 vdev_remove_child(pvd, vd);
5553 vdev_compact_children(pvd);
5554
5555 ASSERT3P(pvd->vdev_child, !=, NULL);
5556
5557 cvd = pvd->vdev_child[0];
5558 if (pvd->vdev_children == 1) {
5559 vdev_remove_parent(cvd);
5560 cvd->vdev_splitting = B_TRUE;
5561 }
5562 vdev_propagate_state(cvd);
5563 }
5564
5565 void
vdev_deadman(vdev_t * vd,const char * tag)5566 vdev_deadman(vdev_t *vd, const char *tag)
5567 {
5568 for (int c = 0; c < vd->vdev_children; c++) {
5569 vdev_t *cvd = vd->vdev_child[c];
5570
5571 vdev_deadman(cvd, tag);
5572 }
5573
5574 if (vd->vdev_ops->vdev_op_leaf) {
5575 vdev_queue_t *vq = &vd->vdev_queue;
5576
5577 mutex_enter(&vq->vq_lock);
5578 if (vq->vq_active > 0) {
5579 spa_t *spa = vd->vdev_spa;
5580 zio_t *fio;
5581 uint64_t delta;
5582
5583 zfs_dbgmsg("slow vdev: %s has %u active IOs",
5584 vd->vdev_path, vq->vq_active);
5585
5586 /*
5587 * Look at the head of all the pending queues,
5588 * if any I/O has been outstanding for longer than
5589 * the spa_deadman_synctime invoke the deadman logic.
5590 */
5591 fio = list_head(&vq->vq_active_list);
5592 delta = gethrtime() - fio->io_timestamp;
5593 if (delta > spa_deadman_synctime(spa))
5594 zio_deadman(fio, tag);
5595 }
5596 mutex_exit(&vq->vq_lock);
5597 }
5598 }
5599
5600 void
vdev_defer_resilver(vdev_t * vd)5601 vdev_defer_resilver(vdev_t *vd)
5602 {
5603 ASSERT(vd->vdev_ops->vdev_op_leaf);
5604
5605 vd->vdev_resilver_deferred = B_TRUE;
5606 vd->vdev_spa->spa_resilver_deferred = B_TRUE;
5607 }
5608
5609 /*
5610 * Clears the resilver deferred flag on all leaf devs under vd. Returns
5611 * B_TRUE if we have devices that need to be resilvered and are available to
5612 * accept resilver I/Os.
5613 */
5614 boolean_t
vdev_clear_resilver_deferred(vdev_t * vd,dmu_tx_t * tx)5615 vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
5616 {
5617 boolean_t resilver_needed = B_FALSE;
5618 spa_t *spa = vd->vdev_spa;
5619
5620 for (int c = 0; c < vd->vdev_children; c++) {
5621 vdev_t *cvd = vd->vdev_child[c];
5622 resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
5623 }
5624
5625 if (vd == spa->spa_root_vdev &&
5626 spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
5627 spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
5628 vdev_config_dirty(vd);
5629 spa->spa_resilver_deferred = B_FALSE;
5630 return (resilver_needed);
5631 }
5632
5633 if (!vdev_is_concrete(vd) || vd->vdev_aux ||
5634 !vd->vdev_ops->vdev_op_leaf)
5635 return (resilver_needed);
5636
5637 vd->vdev_resilver_deferred = B_FALSE;
5638
5639 return (!vdev_is_dead(vd) && !vd->vdev_offline &&
5640 vdev_resilver_needed(vd, NULL, NULL));
5641 }
5642
5643 boolean_t
vdev_xlate_is_empty(range_seg64_t * rs)5644 vdev_xlate_is_empty(range_seg64_t *rs)
5645 {
5646 return (rs->rs_start == rs->rs_end);
5647 }
5648
5649 /*
5650 * Translate a logical range to the first contiguous physical range for the
5651 * specified vdev_t. This function is initially called with a leaf vdev and
5652 * will walk each parent vdev until it reaches a top-level vdev. Once the
5653 * top-level is reached the physical range is initialized and the recursive
5654 * function begins to unwind. As it unwinds it calls the parent's vdev
5655 * specific translation function to do the real conversion.
5656 */
5657 void
vdev_xlate(vdev_t * vd,const range_seg64_t * logical_rs,range_seg64_t * physical_rs,range_seg64_t * remain_rs)5658 vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
5659 range_seg64_t *physical_rs, range_seg64_t *remain_rs)
5660 {
5661 /*
5662 * Walk up the vdev tree
5663 */
5664 if (vd != vd->vdev_top) {
5665 vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
5666 remain_rs);
5667 } else {
5668 /*
5669 * We've reached the top-level vdev, initialize the physical
5670 * range to the logical range and set an empty remaining
5671 * range then start to unwind.
5672 */
5673 physical_rs->rs_start = logical_rs->rs_start;
5674 physical_rs->rs_end = logical_rs->rs_end;
5675
5676 remain_rs->rs_start = logical_rs->rs_start;
5677 remain_rs->rs_end = logical_rs->rs_start;
5678
5679 return;
5680 }
5681
5682 vdev_t *pvd = vd->vdev_parent;
5683 ASSERT3P(pvd, !=, NULL);
5684 ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
5685
5686 /*
5687 * As this recursive function unwinds, translate the logical
5688 * range into its physical and any remaining components by calling
5689 * the vdev specific translate function.
5690 */
5691 range_seg64_t intermediate = { 0 };
5692 pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
5693
5694 physical_rs->rs_start = intermediate.rs_start;
5695 physical_rs->rs_end = intermediate.rs_end;
5696 }
5697
5698 void
vdev_xlate_walk(vdev_t * vd,const range_seg64_t * logical_rs,vdev_xlate_func_t * func,void * arg)5699 vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
5700 vdev_xlate_func_t *func, void *arg)
5701 {
5702 range_seg64_t iter_rs = *logical_rs;
5703 range_seg64_t physical_rs;
5704 range_seg64_t remain_rs;
5705
5706 while (!vdev_xlate_is_empty(&iter_rs)) {
5707
5708 vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
5709
5710 /*
5711 * With raidz and dRAID, it's possible that the logical range
5712 * does not live on this leaf vdev. Only when there is a non-
5713 * zero physical size call the provided function.
5714 */
5715 if (!vdev_xlate_is_empty(&physical_rs))
5716 func(arg, &physical_rs);
5717
5718 iter_rs = remain_rs;
5719 }
5720 }
5721
5722 static char *
vdev_name(vdev_t * vd,char * buf,int buflen)5723 vdev_name(vdev_t *vd, char *buf, int buflen)
5724 {
5725 if (vd->vdev_path == NULL) {
5726 if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) {
5727 strlcpy(buf, vd->vdev_spa->spa_name, buflen);
5728 } else if (!vd->vdev_ops->vdev_op_leaf) {
5729 snprintf(buf, buflen, "%s-%llu",
5730 vd->vdev_ops->vdev_op_type,
5731 (u_longlong_t)vd->vdev_id);
5732 }
5733 } else {
5734 strlcpy(buf, vd->vdev_path, buflen);
5735 }
5736 return (buf);
5737 }
5738
5739 /*
5740 * Look at the vdev tree and determine whether any devices are currently being
5741 * replaced.
5742 */
5743 boolean_t
vdev_replace_in_progress(vdev_t * vdev)5744 vdev_replace_in_progress(vdev_t *vdev)
5745 {
5746 ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
5747
5748 if (vdev->vdev_ops == &vdev_replacing_ops)
5749 return (B_TRUE);
5750
5751 /*
5752 * A 'spare' vdev indicates that we have a replace in progress, unless
5753 * it has exactly two children, and the second, the hot spare, has
5754 * finished being resilvered.
5755 */
5756 if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
5757 !vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
5758 return (B_TRUE);
5759
5760 for (int i = 0; i < vdev->vdev_children; i++) {
5761 if (vdev_replace_in_progress(vdev->vdev_child[i]))
5762 return (B_TRUE);
5763 }
5764
5765 return (B_FALSE);
5766 }
5767
5768 /*
5769 * Add a (source=src, propname=propval) list to an nvlist.
5770 */
5771 static void
vdev_prop_add_list(nvlist_t * nvl,const char * propname,const char * strval,uint64_t intval,zprop_source_t src)5772 vdev_prop_add_list(nvlist_t *nvl, const char *propname, const char *strval,
5773 uint64_t intval, zprop_source_t src)
5774 {
5775 nvlist_t *propval;
5776
5777 propval = fnvlist_alloc();
5778 fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
5779
5780 if (strval != NULL)
5781 fnvlist_add_string(propval, ZPROP_VALUE, strval);
5782 else
5783 fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
5784
5785 fnvlist_add_nvlist(nvl, propname, propval);
5786 nvlist_free(propval);
5787 }
5788
5789 static void
vdev_props_set_sync(void * arg,dmu_tx_t * tx)5790 vdev_props_set_sync(void *arg, dmu_tx_t *tx)
5791 {
5792 vdev_t *vd;
5793 nvlist_t *nvp = arg;
5794 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
5795 objset_t *mos = spa->spa_meta_objset;
5796 nvpair_t *elem = NULL;
5797 uint64_t vdev_guid;
5798 uint64_t objid;
5799 nvlist_t *nvprops;
5800
5801 vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV);
5802 nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS);
5803 vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
5804
5805 /* this vdev could get removed while waiting for this sync task */
5806 if (vd == NULL)
5807 return;
5808
5809 /*
5810 * Set vdev property values in the vdev props mos object.
5811 */
5812 if (vd->vdev_root_zap != 0) {
5813 objid = vd->vdev_root_zap;
5814 } else if (vd->vdev_top_zap != 0) {
5815 objid = vd->vdev_top_zap;
5816 } else if (vd->vdev_leaf_zap != 0) {
5817 objid = vd->vdev_leaf_zap;
5818 } else {
5819 panic("unexpected vdev type");
5820 }
5821
5822 mutex_enter(&spa->spa_props_lock);
5823
5824 while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
5825 uint64_t intval;
5826 const char *strval;
5827 vdev_prop_t prop;
5828 const char *propname = nvpair_name(elem);
5829 zprop_type_t proptype;
5830
5831 switch (prop = vdev_name_to_prop(propname)) {
5832 case VDEV_PROP_USERPROP:
5833 if (vdev_prop_user(propname)) {
5834 strval = fnvpair_value_string(elem);
5835 if (strlen(strval) == 0) {
5836 /* remove the property if value == "" */
5837 (void) zap_remove(mos, objid, propname,
5838 tx);
5839 } else {
5840 VERIFY0(zap_update(mos, objid, propname,
5841 1, strlen(strval) + 1, strval, tx));
5842 }
5843 spa_history_log_internal(spa, "vdev set", tx,
5844 "vdev_guid=%llu: %s=%s",
5845 (u_longlong_t)vdev_guid, nvpair_name(elem),
5846 strval);
5847 }
5848 break;
5849 default:
5850 /* normalize the property name */
5851 propname = vdev_prop_to_name(prop);
5852 proptype = vdev_prop_get_type(prop);
5853
5854 if (nvpair_type(elem) == DATA_TYPE_STRING) {
5855 ASSERT(proptype == PROP_TYPE_STRING);
5856 strval = fnvpair_value_string(elem);
5857 VERIFY0(zap_update(mos, objid, propname,
5858 1, strlen(strval) + 1, strval, tx));
5859 spa_history_log_internal(spa, "vdev set", tx,
5860 "vdev_guid=%llu: %s=%s",
5861 (u_longlong_t)vdev_guid, nvpair_name(elem),
5862 strval);
5863 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
5864 intval = fnvpair_value_uint64(elem);
5865
5866 if (proptype == PROP_TYPE_INDEX) {
5867 const char *unused;
5868 VERIFY0(vdev_prop_index_to_string(
5869 prop, intval, &unused));
5870 }
5871 VERIFY0(zap_update(mos, objid, propname,
5872 sizeof (uint64_t), 1, &intval, tx));
5873 spa_history_log_internal(spa, "vdev set", tx,
5874 "vdev_guid=%llu: %s=%lld",
5875 (u_longlong_t)vdev_guid,
5876 nvpair_name(elem), (longlong_t)intval);
5877 } else {
5878 panic("invalid vdev property type %u",
5879 nvpair_type(elem));
5880 }
5881 }
5882
5883 }
5884
5885 mutex_exit(&spa->spa_props_lock);
5886 }
5887
5888 int
vdev_prop_set(vdev_t * vd,nvlist_t * innvl,nvlist_t * outnvl)5889 vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
5890 {
5891 spa_t *spa = vd->vdev_spa;
5892 nvpair_t *elem = NULL;
5893 uint64_t vdev_guid;
5894 nvlist_t *nvprops;
5895 int error = 0;
5896
5897 ASSERT(vd != NULL);
5898
5899 /* Check that vdev has a zap we can use */
5900 if (vd->vdev_root_zap == 0 &&
5901 vd->vdev_top_zap == 0 &&
5902 vd->vdev_leaf_zap == 0)
5903 return (SET_ERROR(EINVAL));
5904
5905 if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
5906 &vdev_guid) != 0)
5907 return (SET_ERROR(EINVAL));
5908
5909 if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS,
5910 &nvprops) != 0)
5911 return (SET_ERROR(EINVAL));
5912
5913 if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
5914 return (SET_ERROR(EINVAL));
5915
5916 while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
5917 const char *propname = nvpair_name(elem);
5918 vdev_prop_t prop = vdev_name_to_prop(propname);
5919 uint64_t intval = 0;
5920 const char *strval = NULL;
5921
5922 if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) {
5923 error = EINVAL;
5924 goto end;
5925 }
5926
5927 if (vdev_prop_readonly(prop)) {
5928 error = EROFS;
5929 goto end;
5930 }
5931
5932 /* Special Processing */
5933 switch (prop) {
5934 case VDEV_PROP_PATH:
5935 if (vd->vdev_path == NULL) {
5936 error = EROFS;
5937 break;
5938 }
5939 if (nvpair_value_string(elem, &strval) != 0) {
5940 error = EINVAL;
5941 break;
5942 }
5943 /* New path must start with /dev/ */
5944 if (strncmp(strval, "/dev/", 5)) {
5945 error = EINVAL;
5946 break;
5947 }
5948 error = spa_vdev_setpath(spa, vdev_guid, strval);
5949 break;
5950 case VDEV_PROP_ALLOCATING:
5951 if (nvpair_value_uint64(elem, &intval) != 0) {
5952 error = EINVAL;
5953 break;
5954 }
5955 if (intval != vd->vdev_noalloc)
5956 break;
5957 if (intval == 0)
5958 error = spa_vdev_noalloc(spa, vdev_guid);
5959 else
5960 error = spa_vdev_alloc(spa, vdev_guid);
5961 break;
5962 case VDEV_PROP_FAILFAST:
5963 if (nvpair_value_uint64(elem, &intval) != 0) {
5964 error = EINVAL;
5965 break;
5966 }
5967 vd->vdev_failfast = intval & 1;
5968 break;
5969 case VDEV_PROP_CHECKSUM_N:
5970 if (nvpair_value_uint64(elem, &intval) != 0) {
5971 error = EINVAL;
5972 break;
5973 }
5974 vd->vdev_checksum_n = intval;
5975 break;
5976 case VDEV_PROP_CHECKSUM_T:
5977 if (nvpair_value_uint64(elem, &intval) != 0) {
5978 error = EINVAL;
5979 break;
5980 }
5981 vd->vdev_checksum_t = intval;
5982 break;
5983 case VDEV_PROP_IO_N:
5984 if (nvpair_value_uint64(elem, &intval) != 0) {
5985 error = EINVAL;
5986 break;
5987 }
5988 vd->vdev_io_n = intval;
5989 break;
5990 case VDEV_PROP_IO_T:
5991 if (nvpair_value_uint64(elem, &intval) != 0) {
5992 error = EINVAL;
5993 break;
5994 }
5995 vd->vdev_io_t = intval;
5996 break;
5997 case VDEV_PROP_SLOW_IO_N:
5998 if (nvpair_value_uint64(elem, &intval) != 0) {
5999 error = EINVAL;
6000 break;
6001 }
6002 vd->vdev_slow_io_n = intval;
6003 break;
6004 case VDEV_PROP_SLOW_IO_T:
6005 if (nvpair_value_uint64(elem, &intval) != 0) {
6006 error = EINVAL;
6007 break;
6008 }
6009 vd->vdev_slow_io_t = intval;
6010 break;
6011 default:
6012 /* Most processing is done in vdev_props_set_sync */
6013 break;
6014 }
6015 end:
6016 if (error != 0) {
6017 intval = error;
6018 vdev_prop_add_list(outnvl, propname, strval, intval, 0);
6019 return (error);
6020 }
6021 }
6022
6023 return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
6024 innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED));
6025 }
6026
6027 int
vdev_prop_get(vdev_t * vd,nvlist_t * innvl,nvlist_t * outnvl)6028 vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
6029 {
6030 spa_t *spa = vd->vdev_spa;
6031 objset_t *mos = spa->spa_meta_objset;
6032 int err = 0;
6033 uint64_t objid;
6034 uint64_t vdev_guid;
6035 nvpair_t *elem = NULL;
6036 nvlist_t *nvprops = NULL;
6037 uint64_t intval = 0;
6038 char *strval = NULL;
6039 const char *propname = NULL;
6040 vdev_prop_t prop;
6041
6042 ASSERT(vd != NULL);
6043 ASSERT(mos != NULL);
6044
6045 if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
6046 &vdev_guid) != 0)
6047 return (SET_ERROR(EINVAL));
6048
6049 nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops);
6050
6051 if (vd->vdev_root_zap != 0) {
6052 objid = vd->vdev_root_zap;
6053 } else if (vd->vdev_top_zap != 0) {
6054 objid = vd->vdev_top_zap;
6055 } else if (vd->vdev_leaf_zap != 0) {
6056 objid = vd->vdev_leaf_zap;
6057 } else {
6058 return (SET_ERROR(EINVAL));
6059 }
6060 ASSERT(objid != 0);
6061
6062 mutex_enter(&spa->spa_props_lock);
6063
6064 if (nvprops != NULL) {
6065 char namebuf[64] = { 0 };
6066
6067 while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
6068 intval = 0;
6069 strval = NULL;
6070 propname = nvpair_name(elem);
6071 prop = vdev_name_to_prop(propname);
6072 zprop_source_t src = ZPROP_SRC_DEFAULT;
6073 uint64_t integer_size, num_integers;
6074
6075 switch (prop) {
6076 /* Special Read-only Properties */
6077 case VDEV_PROP_NAME:
6078 strval = vdev_name(vd, namebuf,
6079 sizeof (namebuf));
6080 if (strval == NULL)
6081 continue;
6082 vdev_prop_add_list(outnvl, propname, strval, 0,
6083 ZPROP_SRC_NONE);
6084 continue;
6085 case VDEV_PROP_CAPACITY:
6086 /* percent used */
6087 intval = (vd->vdev_stat.vs_dspace == 0) ? 0 :
6088 (vd->vdev_stat.vs_alloc * 100 /
6089 vd->vdev_stat.vs_dspace);
6090 vdev_prop_add_list(outnvl, propname, NULL,
6091 intval, ZPROP_SRC_NONE);
6092 continue;
6093 case VDEV_PROP_STATE:
6094 vdev_prop_add_list(outnvl, propname, NULL,
6095 vd->vdev_state, ZPROP_SRC_NONE);
6096 continue;
6097 case VDEV_PROP_GUID:
6098 vdev_prop_add_list(outnvl, propname, NULL,
6099 vd->vdev_guid, ZPROP_SRC_NONE);
6100 continue;
6101 case VDEV_PROP_ASIZE:
6102 vdev_prop_add_list(outnvl, propname, NULL,
6103 vd->vdev_asize, ZPROP_SRC_NONE);
6104 continue;
6105 case VDEV_PROP_PSIZE:
6106 vdev_prop_add_list(outnvl, propname, NULL,
6107 vd->vdev_psize, ZPROP_SRC_NONE);
6108 continue;
6109 case VDEV_PROP_ASHIFT:
6110 vdev_prop_add_list(outnvl, propname, NULL,
6111 vd->vdev_ashift, ZPROP_SRC_NONE);
6112 continue;
6113 case VDEV_PROP_SIZE:
6114 vdev_prop_add_list(outnvl, propname, NULL,
6115 vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE);
6116 continue;
6117 case VDEV_PROP_FREE:
6118 vdev_prop_add_list(outnvl, propname, NULL,
6119 vd->vdev_stat.vs_dspace -
6120 vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
6121 continue;
6122 case VDEV_PROP_ALLOCATED:
6123 vdev_prop_add_list(outnvl, propname, NULL,
6124 vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
6125 continue;
6126 case VDEV_PROP_EXPANDSZ:
6127 vdev_prop_add_list(outnvl, propname, NULL,
6128 vd->vdev_stat.vs_esize, ZPROP_SRC_NONE);
6129 continue;
6130 case VDEV_PROP_FRAGMENTATION:
6131 vdev_prop_add_list(outnvl, propname, NULL,
6132 vd->vdev_stat.vs_fragmentation,
6133 ZPROP_SRC_NONE);
6134 continue;
6135 case VDEV_PROP_PARITY:
6136 vdev_prop_add_list(outnvl, propname, NULL,
6137 vdev_get_nparity(vd), ZPROP_SRC_NONE);
6138 continue;
6139 case VDEV_PROP_PATH:
6140 if (vd->vdev_path == NULL)
6141 continue;
6142 vdev_prop_add_list(outnvl, propname,
6143 vd->vdev_path, 0, ZPROP_SRC_NONE);
6144 continue;
6145 case VDEV_PROP_DEVID:
6146 if (vd->vdev_devid == NULL)
6147 continue;
6148 vdev_prop_add_list(outnvl, propname,
6149 vd->vdev_devid, 0, ZPROP_SRC_NONE);
6150 continue;
6151 case VDEV_PROP_PHYS_PATH:
6152 if (vd->vdev_physpath == NULL)
6153 continue;
6154 vdev_prop_add_list(outnvl, propname,
6155 vd->vdev_physpath, 0, ZPROP_SRC_NONE);
6156 continue;
6157 case VDEV_PROP_ENC_PATH:
6158 if (vd->vdev_enc_sysfs_path == NULL)
6159 continue;
6160 vdev_prop_add_list(outnvl, propname,
6161 vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE);
6162 continue;
6163 case VDEV_PROP_FRU:
6164 if (vd->vdev_fru == NULL)
6165 continue;
6166 vdev_prop_add_list(outnvl, propname,
6167 vd->vdev_fru, 0, ZPROP_SRC_NONE);
6168 continue;
6169 case VDEV_PROP_PARENT:
6170 if (vd->vdev_parent != NULL) {
6171 strval = vdev_name(vd->vdev_parent,
6172 namebuf, sizeof (namebuf));
6173 vdev_prop_add_list(outnvl, propname,
6174 strval, 0, ZPROP_SRC_NONE);
6175 }
6176 continue;
6177 case VDEV_PROP_CHILDREN:
6178 if (vd->vdev_children > 0)
6179 strval = kmem_zalloc(ZAP_MAXVALUELEN,
6180 KM_SLEEP);
6181 for (uint64_t i = 0; i < vd->vdev_children;
6182 i++) {
6183 const char *vname;
6184
6185 vname = vdev_name(vd->vdev_child[i],
6186 namebuf, sizeof (namebuf));
6187 if (vname == NULL)
6188 vname = "(unknown)";
6189 if (strlen(strval) > 0)
6190 strlcat(strval, ",",
6191 ZAP_MAXVALUELEN);
6192 strlcat(strval, vname, ZAP_MAXVALUELEN);
6193 }
6194 if (strval != NULL) {
6195 vdev_prop_add_list(outnvl, propname,
6196 strval, 0, ZPROP_SRC_NONE);
6197 kmem_free(strval, ZAP_MAXVALUELEN);
6198 }
6199 continue;
6200 case VDEV_PROP_NUMCHILDREN:
6201 vdev_prop_add_list(outnvl, propname, NULL,
6202 vd->vdev_children, ZPROP_SRC_NONE);
6203 continue;
6204 case VDEV_PROP_READ_ERRORS:
6205 vdev_prop_add_list(outnvl, propname, NULL,
6206 vd->vdev_stat.vs_read_errors,
6207 ZPROP_SRC_NONE);
6208 continue;
6209 case VDEV_PROP_WRITE_ERRORS:
6210 vdev_prop_add_list(outnvl, propname, NULL,
6211 vd->vdev_stat.vs_write_errors,
6212 ZPROP_SRC_NONE);
6213 continue;
6214 case VDEV_PROP_CHECKSUM_ERRORS:
6215 vdev_prop_add_list(outnvl, propname, NULL,
6216 vd->vdev_stat.vs_checksum_errors,
6217 ZPROP_SRC_NONE);
6218 continue;
6219 case VDEV_PROP_INITIALIZE_ERRORS:
6220 vdev_prop_add_list(outnvl, propname, NULL,
6221 vd->vdev_stat.vs_initialize_errors,
6222 ZPROP_SRC_NONE);
6223 continue;
6224 case VDEV_PROP_OPS_NULL:
6225 vdev_prop_add_list(outnvl, propname, NULL,
6226 vd->vdev_stat.vs_ops[ZIO_TYPE_NULL],
6227 ZPROP_SRC_NONE);
6228 continue;
6229 case VDEV_PROP_OPS_READ:
6230 vdev_prop_add_list(outnvl, propname, NULL,
6231 vd->vdev_stat.vs_ops[ZIO_TYPE_READ],
6232 ZPROP_SRC_NONE);
6233 continue;
6234 case VDEV_PROP_OPS_WRITE:
6235 vdev_prop_add_list(outnvl, propname, NULL,
6236 vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE],
6237 ZPROP_SRC_NONE);
6238 continue;
6239 case VDEV_PROP_OPS_FREE:
6240 vdev_prop_add_list(outnvl, propname, NULL,
6241 vd->vdev_stat.vs_ops[ZIO_TYPE_FREE],
6242 ZPROP_SRC_NONE);
6243 continue;
6244 case VDEV_PROP_OPS_CLAIM:
6245 vdev_prop_add_list(outnvl, propname, NULL,
6246 vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM],
6247 ZPROP_SRC_NONE);
6248 continue;
6249 case VDEV_PROP_OPS_TRIM:
6250 /*
6251 * TRIM ops and bytes are reported to user
6252 * space as ZIO_TYPE_FLUSH. This is done to
6253 * preserve the vdev_stat_t structure layout
6254 * for user space.
6255 */
6256 vdev_prop_add_list(outnvl, propname, NULL,
6257 vd->vdev_stat.vs_ops[ZIO_TYPE_FLUSH],
6258 ZPROP_SRC_NONE);
6259 continue;
6260 case VDEV_PROP_BYTES_NULL:
6261 vdev_prop_add_list(outnvl, propname, NULL,
6262 vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL],
6263 ZPROP_SRC_NONE);
6264 continue;
6265 case VDEV_PROP_BYTES_READ:
6266 vdev_prop_add_list(outnvl, propname, NULL,
6267 vd->vdev_stat.vs_bytes[ZIO_TYPE_READ],
6268 ZPROP_SRC_NONE);
6269 continue;
6270 case VDEV_PROP_BYTES_WRITE:
6271 vdev_prop_add_list(outnvl, propname, NULL,
6272 vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE],
6273 ZPROP_SRC_NONE);
6274 continue;
6275 case VDEV_PROP_BYTES_FREE:
6276 vdev_prop_add_list(outnvl, propname, NULL,
6277 vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE],
6278 ZPROP_SRC_NONE);
6279 continue;
6280 case VDEV_PROP_BYTES_CLAIM:
6281 vdev_prop_add_list(outnvl, propname, NULL,
6282 vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM],
6283 ZPROP_SRC_NONE);
6284 continue;
6285 case VDEV_PROP_BYTES_TRIM:
6286 /*
6287 * TRIM ops and bytes are reported to user
6288 * space as ZIO_TYPE_FLUSH. This is done to
6289 * preserve the vdev_stat_t structure layout
6290 * for user space.
6291 */
6292 vdev_prop_add_list(outnvl, propname, NULL,
6293 vd->vdev_stat.vs_bytes[ZIO_TYPE_FLUSH],
6294 ZPROP_SRC_NONE);
6295 continue;
6296 case VDEV_PROP_REMOVING:
6297 vdev_prop_add_list(outnvl, propname, NULL,
6298 vd->vdev_removing, ZPROP_SRC_NONE);
6299 continue;
6300 case VDEV_PROP_RAIDZ_EXPANDING:
6301 /* Only expose this for raidz */
6302 if (vd->vdev_ops == &vdev_raidz_ops) {
6303 vdev_prop_add_list(outnvl, propname,
6304 NULL, vd->vdev_rz_expanding,
6305 ZPROP_SRC_NONE);
6306 }
6307 continue;
6308 /* Numeric Properites */
6309 case VDEV_PROP_ALLOCATING:
6310 /* Leaf vdevs cannot have this property */
6311 if (vd->vdev_mg == NULL &&
6312 vd->vdev_top != NULL) {
6313 src = ZPROP_SRC_NONE;
6314 intval = ZPROP_BOOLEAN_NA;
6315 } else {
6316 err = vdev_prop_get_int(vd, prop,
6317 &intval);
6318 if (err && err != ENOENT)
6319 break;
6320
6321 if (intval ==
6322 vdev_prop_default_numeric(prop))
6323 src = ZPROP_SRC_DEFAULT;
6324 else
6325 src = ZPROP_SRC_LOCAL;
6326 }
6327
6328 vdev_prop_add_list(outnvl, propname, NULL,
6329 intval, src);
6330 break;
6331 case VDEV_PROP_FAILFAST:
6332 src = ZPROP_SRC_LOCAL;
6333 strval = NULL;
6334
6335 err = zap_lookup(mos, objid, nvpair_name(elem),
6336 sizeof (uint64_t), 1, &intval);
6337 if (err == ENOENT) {
6338 intval = vdev_prop_default_numeric(
6339 prop);
6340 err = 0;
6341 } else if (err) {
6342 break;
6343 }
6344 if (intval == vdev_prop_default_numeric(prop))
6345 src = ZPROP_SRC_DEFAULT;
6346
6347 vdev_prop_add_list(outnvl, propname, strval,
6348 intval, src);
6349 break;
6350 case VDEV_PROP_CHECKSUM_N:
6351 case VDEV_PROP_CHECKSUM_T:
6352 case VDEV_PROP_IO_N:
6353 case VDEV_PROP_IO_T:
6354 case VDEV_PROP_SLOW_IO_N:
6355 case VDEV_PROP_SLOW_IO_T:
6356 err = vdev_prop_get_int(vd, prop, &intval);
6357 if (err && err != ENOENT)
6358 break;
6359
6360 if (intval == vdev_prop_default_numeric(prop))
6361 src = ZPROP_SRC_DEFAULT;
6362 else
6363 src = ZPROP_SRC_LOCAL;
6364
6365 vdev_prop_add_list(outnvl, propname, NULL,
6366 intval, src);
6367 break;
6368 /* Text Properties */
6369 case VDEV_PROP_COMMENT:
6370 /* Exists in the ZAP below */
6371 /* FALLTHRU */
6372 case VDEV_PROP_USERPROP:
6373 /* User Properites */
6374 src = ZPROP_SRC_LOCAL;
6375
6376 err = zap_length(mos, objid, nvpair_name(elem),
6377 &integer_size, &num_integers);
6378 if (err)
6379 break;
6380
6381 switch (integer_size) {
6382 case 8:
6383 /* User properties cannot be integers */
6384 err = EINVAL;
6385 break;
6386 case 1:
6387 /* string property */
6388 strval = kmem_alloc(num_integers,
6389 KM_SLEEP);
6390 err = zap_lookup(mos, objid,
6391 nvpair_name(elem), 1,
6392 num_integers, strval);
6393 if (err) {
6394 kmem_free(strval,
6395 num_integers);
6396 break;
6397 }
6398 vdev_prop_add_list(outnvl, propname,
6399 strval, 0, src);
6400 kmem_free(strval, num_integers);
6401 break;
6402 }
6403 break;
6404 default:
6405 err = ENOENT;
6406 break;
6407 }
6408 if (err)
6409 break;
6410 }
6411 } else {
6412 /*
6413 * Get all properties from the MOS vdev property object.
6414 */
6415 zap_cursor_t zc;
6416 zap_attribute_t za;
6417 for (zap_cursor_init(&zc, mos, objid);
6418 (err = zap_cursor_retrieve(&zc, &za)) == 0;
6419 zap_cursor_advance(&zc)) {
6420 intval = 0;
6421 strval = NULL;
6422 zprop_source_t src = ZPROP_SRC_DEFAULT;
6423 propname = za.za_name;
6424
6425 switch (za.za_integer_length) {
6426 case 8:
6427 /* We do not allow integer user properties */
6428 /* This is likely an internal value */
6429 break;
6430 case 1:
6431 /* string property */
6432 strval = kmem_alloc(za.za_num_integers,
6433 KM_SLEEP);
6434 err = zap_lookup(mos, objid, za.za_name, 1,
6435 za.za_num_integers, strval);
6436 if (err) {
6437 kmem_free(strval, za.za_num_integers);
6438 break;
6439 }
6440 vdev_prop_add_list(outnvl, propname, strval, 0,
6441 src);
6442 kmem_free(strval, za.za_num_integers);
6443 break;
6444
6445 default:
6446 break;
6447 }
6448 }
6449 zap_cursor_fini(&zc);
6450 }
6451
6452 mutex_exit(&spa->spa_props_lock);
6453 if (err && err != ENOENT) {
6454 return (err);
6455 }
6456
6457 return (0);
6458 }
6459
6460 EXPORT_SYMBOL(vdev_fault);
6461 EXPORT_SYMBOL(vdev_degrade);
6462 EXPORT_SYMBOL(vdev_online);
6463 EXPORT_SYMBOL(vdev_offline);
6464 EXPORT_SYMBOL(vdev_clear);
6465
6466 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW,
6467 "Target number of metaslabs per top-level vdev");
6468
6469 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW,
6470 "Default lower limit for metaslab size");
6471
6472 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_ms_shift, UINT, ZMOD_RW,
6473 "Default upper limit for metaslab size");
6474
6475 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW,
6476 "Minimum number of metaslabs per top-level vdev");
6477
6478 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW,
6479 "Practical upper limit of total metaslabs per top-level vdev");
6480
6481 ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
6482 "Rate limit slow IO (delay) events to this many per second");
6483
6484 ZFS_MODULE_PARAM(zfs, zfs_, deadman_events_per_second, UINT, ZMOD_RW,
6485 "Rate limit hung IO (deadman) events to this many per second");
6486
6487 /* BEGIN CSTYLED */
6488 ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
6489 "Rate limit checksum events to this many checksum errors per second "
6490 "(do not set below ZED threshold).");
6491 /* END CSTYLED */
6492
6493 ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
6494 "Ignore errors during resilver/scrub");
6495
6496 ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
6497 "Bypass vdev_validate()");
6498
6499 ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
6500 "Disable cache flushes");
6501
6502 ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
6503 "Minimum number of metaslabs required to dedicate one for log blocks");
6504
6505 /* BEGIN CSTYLED */
6506 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
6507 param_set_min_auto_ashift, param_get_uint, ZMOD_RW,
6508 "Minimum ashift used when creating new top-level vdevs");
6509
6510 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
6511 param_set_max_auto_ashift, param_get_uint, ZMOD_RW,
6512 "Maximum ashift used when optimizing for logical -> physical sector "
6513 "size on new top-level vdevs");
6514 /* END CSTYLED */
6515