1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2021 by Delphix. All rights reserved.
25 * Copyright 2017 Nexenta Systems, Inc.
26 * Copyright (c) 2014 Integros [integros.com]
27 * Copyright 2016 Toomas Soome <tsoome@me.com>
28 * Copyright 2017 Joyent, Inc.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2019, Datto Inc. All rights reserved.
31 * Copyright (c) 2021, Klara Inc.
32 * Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP.
33 */
34
35 #include <sys/zfs_context.h>
36 #include <sys/fm/fs/zfs.h>
37 #include <sys/spa.h>
38 #include <sys/spa_impl.h>
39 #include <sys/bpobj.h>
40 #include <sys/dmu.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/dsl_dir.h>
43 #include <sys/vdev_impl.h>
44 #include <sys/vdev_rebuild.h>
45 #include <sys/vdev_draid.h>
46 #include <sys/uberblock_impl.h>
47 #include <sys/metaslab.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/space_map.h>
50 #include <sys/space_reftree.h>
51 #include <sys/zio.h>
52 #include <sys/zap.h>
53 #include <sys/fs/zfs.h>
54 #include <sys/arc.h>
55 #include <sys/zil.h>
56 #include <sys/dsl_scan.h>
57 #include <sys/vdev_raidz.h>
58 #include <sys/abd.h>
59 #include <sys/vdev_initialize.h>
60 #include <sys/vdev_trim.h>
61 #include <sys/vdev_raidz.h>
62 #include <sys/zvol.h>
63 #include <sys/zfs_ratelimit.h>
64 #include "zfs_prop.h"
65
66 /*
67 * One metaslab from each (normal-class) vdev is used by the ZIL. These are
68 * called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
69 * part of the spa_embedded_log_class. The metaslab with the most free space
70 * in each vdev is selected for this purpose when the pool is opened (or a
71 * vdev is added). See vdev_metaslab_init().
72 *
73 * Log blocks can be allocated from the following locations. Each one is tried
74 * in order until the allocation succeeds:
75 * 1. dedicated log vdevs, aka "slog" (spa_log_class)
76 * 2. embedded slog metaslabs (spa_embedded_log_class)
77 * 3. other metaslabs in normal vdevs (spa_normal_class)
78 *
79 * zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
80 * than this number of metaslabs in the vdev. This ensures that we don't set
81 * aside an unreasonable amount of space for the ZIL. If set to less than
82 * 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
83 * (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
84 */
85 static uint_t zfs_embedded_slog_min_ms = 64;
86
87 /* default target for number of metaslabs per top-level vdev */
88 static uint_t zfs_vdev_default_ms_count = 200;
89
90 /* minimum number of metaslabs per top-level vdev */
91 static uint_t zfs_vdev_min_ms_count = 16;
92
93 /* practical upper limit of total metaslabs per top-level vdev */
94 static uint_t zfs_vdev_ms_count_limit = 1ULL << 17;
95
96 /* lower limit for metaslab size (512M) */
97 static uint_t zfs_vdev_default_ms_shift = 29;
98
99 /* upper limit for metaslab size (16G) */
100 static uint_t zfs_vdev_max_ms_shift = 34;
101
102 int vdev_validate_skip = B_FALSE;
103
104 /*
105 * Since the DTL space map of a vdev is not expected to have a lot of
106 * entries, we default its block size to 4K.
107 */
108 int zfs_vdev_dtl_sm_blksz = (1 << 12);
109
110 /*
111 * Rate limit slow IO (delay) events to this many per second.
112 */
113 static unsigned int zfs_slow_io_events_per_second = 20;
114
115 /*
116 * Rate limit deadman "hung IO" events to this many per second.
117 */
118 static unsigned int zfs_deadman_events_per_second = 1;
119
120 /*
121 * Rate limit checksum events after this many checksum errors per second.
122 */
123 static unsigned int zfs_checksum_events_per_second = 20;
124
125 /*
126 * Ignore errors during scrub/resilver. Allows to work around resilver
127 * upon import when there are pool errors.
128 */
129 static int zfs_scan_ignore_errors = 0;
130
131 /*
132 * vdev-wide space maps that have lots of entries written to them at
133 * the end of each transaction can benefit from a higher I/O bandwidth
134 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
135 */
136 int zfs_vdev_standard_sm_blksz = (1 << 17);
137
138 /*
139 * Tunable parameter for debugging or performance analysis. Setting this
140 * will cause pool corruption on power loss if a volatile out-of-order
141 * write cache is enabled.
142 */
143 int zfs_nocacheflush = 0;
144
145 /*
146 * Maximum and minimum ashift values that can be automatically set based on
147 * vdev's physical ashift (disk's physical sector size). While ASHIFT_MAX
148 * is higher than the maximum value, it is intentionally limited here to not
149 * excessively impact pool space efficiency. Higher ashift values may still
150 * be forced by vdev logical ashift or by user via ashift property, but won't
151 * be set automatically as a performance optimization.
152 */
153 uint_t zfs_vdev_max_auto_ashift = 14;
154 uint_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
155
156 void
vdev_dbgmsg(vdev_t * vd,const char * fmt,...)157 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
158 {
159 va_list adx;
160 char buf[256];
161
162 va_start(adx, fmt);
163 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
164 va_end(adx);
165
166 if (vd->vdev_path != NULL) {
167 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
168 vd->vdev_path, buf);
169 } else {
170 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
171 vd->vdev_ops->vdev_op_type,
172 (u_longlong_t)vd->vdev_id,
173 (u_longlong_t)vd->vdev_guid, buf);
174 }
175 }
176
177 void
vdev_dbgmsg_print_tree(vdev_t * vd,int indent)178 vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
179 {
180 char state[20];
181
182 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
183 zfs_dbgmsg("%*svdev %llu: %s", indent, "",
184 (u_longlong_t)vd->vdev_id,
185 vd->vdev_ops->vdev_op_type);
186 return;
187 }
188
189 switch (vd->vdev_state) {
190 case VDEV_STATE_UNKNOWN:
191 (void) snprintf(state, sizeof (state), "unknown");
192 break;
193 case VDEV_STATE_CLOSED:
194 (void) snprintf(state, sizeof (state), "closed");
195 break;
196 case VDEV_STATE_OFFLINE:
197 (void) snprintf(state, sizeof (state), "offline");
198 break;
199 case VDEV_STATE_REMOVED:
200 (void) snprintf(state, sizeof (state), "removed");
201 break;
202 case VDEV_STATE_CANT_OPEN:
203 (void) snprintf(state, sizeof (state), "can't open");
204 break;
205 case VDEV_STATE_FAULTED:
206 (void) snprintf(state, sizeof (state), "faulted");
207 break;
208 case VDEV_STATE_DEGRADED:
209 (void) snprintf(state, sizeof (state), "degraded");
210 break;
211 case VDEV_STATE_HEALTHY:
212 (void) snprintf(state, sizeof (state), "healthy");
213 break;
214 default:
215 (void) snprintf(state, sizeof (state), "<state %u>",
216 (uint_t)vd->vdev_state);
217 }
218
219 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
220 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
221 vd->vdev_islog ? " (log)" : "",
222 (u_longlong_t)vd->vdev_guid,
223 vd->vdev_path ? vd->vdev_path : "N/A", state);
224
225 for (uint64_t i = 0; i < vd->vdev_children; i++)
226 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
227 }
228
229 /*
230 * Virtual device management.
231 */
232
233 static vdev_ops_t *const vdev_ops_table[] = {
234 &vdev_root_ops,
235 &vdev_raidz_ops,
236 &vdev_draid_ops,
237 &vdev_draid_spare_ops,
238 &vdev_mirror_ops,
239 &vdev_replacing_ops,
240 &vdev_spare_ops,
241 &vdev_disk_ops,
242 &vdev_file_ops,
243 &vdev_missing_ops,
244 &vdev_hole_ops,
245 &vdev_indirect_ops,
246 NULL
247 };
248
249 /*
250 * Given a vdev type, return the appropriate ops vector.
251 */
252 static vdev_ops_t *
vdev_getops(const char * type)253 vdev_getops(const char *type)
254 {
255 vdev_ops_t *ops, *const *opspp;
256
257 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
258 if (strcmp(ops->vdev_op_type, type) == 0)
259 break;
260
261 return (ops);
262 }
263
264 /*
265 * Given a vdev and a metaslab class, find which metaslab group we're
266 * interested in. All vdevs may belong to two different metaslab classes.
267 * Dedicated slog devices use only the primary metaslab group, rather than a
268 * separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
269 */
270 metaslab_group_t *
vdev_get_mg(vdev_t * vd,metaslab_class_t * mc)271 vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
272 {
273 if (mc == spa_embedded_log_class(vd->vdev_spa) &&
274 vd->vdev_log_mg != NULL)
275 return (vd->vdev_log_mg);
276 else
277 return (vd->vdev_mg);
278 }
279
280 void
vdev_default_xlate(vdev_t * vd,const range_seg64_t * logical_rs,range_seg64_t * physical_rs,range_seg64_t * remain_rs)281 vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
282 range_seg64_t *physical_rs, range_seg64_t *remain_rs)
283 {
284 (void) vd, (void) remain_rs;
285
286 physical_rs->rs_start = logical_rs->rs_start;
287 physical_rs->rs_end = logical_rs->rs_end;
288 }
289
290 /*
291 * Derive the enumerated allocation bias from string input.
292 * String origin is either the per-vdev zap or zpool(8).
293 */
294 static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char * bias)295 vdev_derive_alloc_bias(const char *bias)
296 {
297 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
298
299 if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
300 alloc_bias = VDEV_BIAS_LOG;
301 else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
302 alloc_bias = VDEV_BIAS_SPECIAL;
303 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
304 alloc_bias = VDEV_BIAS_DEDUP;
305
306 return (alloc_bias);
307 }
308
309 /*
310 * Default asize function: return the MAX of psize with the asize of
311 * all children. This is what's used by anything other than RAID-Z.
312 */
313 uint64_t
vdev_default_asize(vdev_t * vd,uint64_t psize,uint64_t txg)314 vdev_default_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
315 {
316 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
317 uint64_t csize;
318
319 for (int c = 0; c < vd->vdev_children; c++) {
320 csize = vdev_psize_to_asize_txg(vd->vdev_child[c], psize, txg);
321 asize = MAX(asize, csize);
322 }
323
324 return (asize);
325 }
326
327 uint64_t
vdev_default_min_asize(vdev_t * vd)328 vdev_default_min_asize(vdev_t *vd)
329 {
330 return (vd->vdev_min_asize);
331 }
332
333 /*
334 * Get the minimum allocatable size. We define the allocatable size as
335 * the vdev's asize rounded to the nearest metaslab. This allows us to
336 * replace or attach devices which don't have the same physical size but
337 * can still satisfy the same number of allocations.
338 */
339 uint64_t
vdev_get_min_asize(vdev_t * vd)340 vdev_get_min_asize(vdev_t *vd)
341 {
342 vdev_t *pvd = vd->vdev_parent;
343
344 /*
345 * If our parent is NULL (inactive spare or cache) or is the root,
346 * just return our own asize.
347 */
348 if (pvd == NULL)
349 return (vd->vdev_asize);
350
351 /*
352 * The top-level vdev just returns the allocatable size rounded
353 * to the nearest metaslab.
354 */
355 if (vd == vd->vdev_top)
356 return (P2ALIGN_TYPED(vd->vdev_asize, 1ULL << vd->vdev_ms_shift,
357 uint64_t));
358
359 return (pvd->vdev_ops->vdev_op_min_asize(pvd));
360 }
361
362 void
vdev_set_min_asize(vdev_t * vd)363 vdev_set_min_asize(vdev_t *vd)
364 {
365 vd->vdev_min_asize = vdev_get_min_asize(vd);
366
367 for (int c = 0; c < vd->vdev_children; c++)
368 vdev_set_min_asize(vd->vdev_child[c]);
369 }
370
371 /*
372 * Get the minimal allocation size for the top-level vdev.
373 */
374 uint64_t
vdev_get_min_alloc(vdev_t * vd)375 vdev_get_min_alloc(vdev_t *vd)
376 {
377 uint64_t min_alloc = 1ULL << vd->vdev_ashift;
378
379 if (vd->vdev_ops->vdev_op_min_alloc != NULL)
380 min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
381
382 return (min_alloc);
383 }
384
385 /*
386 * Get the parity level for a top-level vdev.
387 */
388 uint64_t
vdev_get_nparity(vdev_t * vd)389 vdev_get_nparity(vdev_t *vd)
390 {
391 uint64_t nparity = 0;
392
393 if (vd->vdev_ops->vdev_op_nparity != NULL)
394 nparity = vd->vdev_ops->vdev_op_nparity(vd);
395
396 return (nparity);
397 }
398
399 static int
vdev_prop_get_int(vdev_t * vd,vdev_prop_t prop,uint64_t * value)400 vdev_prop_get_int(vdev_t *vd, vdev_prop_t prop, uint64_t *value)
401 {
402 spa_t *spa = vd->vdev_spa;
403 objset_t *mos = spa->spa_meta_objset;
404 uint64_t objid;
405 int err;
406
407 if (vd->vdev_root_zap != 0) {
408 objid = vd->vdev_root_zap;
409 } else if (vd->vdev_top_zap != 0) {
410 objid = vd->vdev_top_zap;
411 } else if (vd->vdev_leaf_zap != 0) {
412 objid = vd->vdev_leaf_zap;
413 } else {
414 return (EINVAL);
415 }
416
417 err = zap_lookup(mos, objid, vdev_prop_to_name(prop),
418 sizeof (uint64_t), 1, value);
419
420 if (err == ENOENT)
421 *value = vdev_prop_default_numeric(prop);
422
423 return (err);
424 }
425
426 /*
427 * Get the number of data disks for a top-level vdev.
428 */
429 uint64_t
vdev_get_ndisks(vdev_t * vd)430 vdev_get_ndisks(vdev_t *vd)
431 {
432 uint64_t ndisks = 1;
433
434 if (vd->vdev_ops->vdev_op_ndisks != NULL)
435 ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
436
437 return (ndisks);
438 }
439
440 vdev_t *
vdev_lookup_top(spa_t * spa,uint64_t vdev)441 vdev_lookup_top(spa_t *spa, uint64_t vdev)
442 {
443 vdev_t *rvd = spa->spa_root_vdev;
444
445 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
446
447 if (vdev < rvd->vdev_children) {
448 ASSERT(rvd->vdev_child[vdev] != NULL);
449 return (rvd->vdev_child[vdev]);
450 }
451
452 return (NULL);
453 }
454
455 vdev_t *
vdev_lookup_by_guid(vdev_t * vd,uint64_t guid)456 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
457 {
458 vdev_t *mvd;
459
460 if (vd->vdev_guid == guid)
461 return (vd);
462
463 for (int c = 0; c < vd->vdev_children; c++)
464 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
465 NULL)
466 return (mvd);
467
468 return (NULL);
469 }
470
471 static int
vdev_count_leaves_impl(vdev_t * vd)472 vdev_count_leaves_impl(vdev_t *vd)
473 {
474 int n = 0;
475
476 if (vd->vdev_ops->vdev_op_leaf)
477 return (1);
478
479 for (int c = 0; c < vd->vdev_children; c++)
480 n += vdev_count_leaves_impl(vd->vdev_child[c]);
481
482 return (n);
483 }
484
485 int
vdev_count_leaves(spa_t * spa)486 vdev_count_leaves(spa_t *spa)
487 {
488 int rc;
489
490 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
491 rc = vdev_count_leaves_impl(spa->spa_root_vdev);
492 spa_config_exit(spa, SCL_VDEV, FTAG);
493
494 return (rc);
495 }
496
497 void
vdev_add_child(vdev_t * pvd,vdev_t * cvd)498 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
499 {
500 size_t oldsize, newsize;
501 uint64_t id = cvd->vdev_id;
502 vdev_t **newchild;
503
504 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
505 ASSERT(cvd->vdev_parent == NULL);
506
507 cvd->vdev_parent = pvd;
508
509 if (pvd == NULL)
510 return;
511
512 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
513
514 oldsize = pvd->vdev_children * sizeof (vdev_t *);
515 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
516 newsize = pvd->vdev_children * sizeof (vdev_t *);
517
518 newchild = kmem_alloc(newsize, KM_SLEEP);
519 if (pvd->vdev_child != NULL) {
520 memcpy(newchild, pvd->vdev_child, oldsize);
521 kmem_free(pvd->vdev_child, oldsize);
522 }
523
524 pvd->vdev_child = newchild;
525 pvd->vdev_child[id] = cvd;
526
527 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
528 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
529
530 /*
531 * Walk up all ancestors to update guid sum.
532 */
533 for (; pvd != NULL; pvd = pvd->vdev_parent)
534 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
535
536 if (cvd->vdev_ops->vdev_op_leaf) {
537 list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
538 cvd->vdev_spa->spa_leaf_list_gen++;
539 }
540 }
541
542 void
vdev_remove_child(vdev_t * pvd,vdev_t * cvd)543 vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
544 {
545 int c;
546 uint_t id = cvd->vdev_id;
547
548 ASSERT(cvd->vdev_parent == pvd);
549
550 if (pvd == NULL)
551 return;
552
553 ASSERT(id < pvd->vdev_children);
554 ASSERT(pvd->vdev_child[id] == cvd);
555
556 pvd->vdev_child[id] = NULL;
557 cvd->vdev_parent = NULL;
558
559 for (c = 0; c < pvd->vdev_children; c++)
560 if (pvd->vdev_child[c])
561 break;
562
563 if (c == pvd->vdev_children) {
564 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
565 pvd->vdev_child = NULL;
566 pvd->vdev_children = 0;
567 }
568
569 if (cvd->vdev_ops->vdev_op_leaf) {
570 spa_t *spa = cvd->vdev_spa;
571 list_remove(&spa->spa_leaf_list, cvd);
572 spa->spa_leaf_list_gen++;
573 }
574
575 /*
576 * Walk up all ancestors to update guid sum.
577 */
578 for (; pvd != NULL; pvd = pvd->vdev_parent)
579 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
580 }
581
582 /*
583 * Remove any holes in the child array.
584 */
585 void
vdev_compact_children(vdev_t * pvd)586 vdev_compact_children(vdev_t *pvd)
587 {
588 vdev_t **newchild, *cvd;
589 int oldc = pvd->vdev_children;
590 int newc;
591
592 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
593
594 if (oldc == 0)
595 return;
596
597 for (int c = newc = 0; c < oldc; c++)
598 if (pvd->vdev_child[c])
599 newc++;
600
601 if (newc > 0) {
602 newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
603
604 for (int c = newc = 0; c < oldc; c++) {
605 if ((cvd = pvd->vdev_child[c]) != NULL) {
606 newchild[newc] = cvd;
607 cvd->vdev_id = newc++;
608 }
609 }
610 } else {
611 newchild = NULL;
612 }
613
614 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
615 pvd->vdev_child = newchild;
616 pvd->vdev_children = newc;
617 }
618
619 /*
620 * Allocate and minimally initialize a vdev_t.
621 */
622 vdev_t *
vdev_alloc_common(spa_t * spa,uint_t id,uint64_t guid,vdev_ops_t * ops)623 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
624 {
625 vdev_t *vd;
626 vdev_indirect_config_t *vic;
627
628 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
629 vic = &vd->vdev_indirect_config;
630
631 if (spa->spa_root_vdev == NULL) {
632 ASSERT(ops == &vdev_root_ops);
633 spa->spa_root_vdev = vd;
634 spa->spa_load_guid = spa_generate_guid(NULL);
635 }
636
637 if (guid == 0 && ops != &vdev_hole_ops) {
638 if (spa->spa_root_vdev == vd) {
639 /*
640 * The root vdev's guid will also be the pool guid,
641 * which must be unique among all pools.
642 */
643 guid = spa_generate_guid(NULL);
644 } else {
645 /*
646 * Any other vdev's guid must be unique within the pool.
647 */
648 guid = spa_generate_guid(spa);
649 }
650 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
651 }
652
653 vd->vdev_spa = spa;
654 vd->vdev_id = id;
655 vd->vdev_guid = guid;
656 vd->vdev_guid_sum = guid;
657 vd->vdev_ops = ops;
658 vd->vdev_state = VDEV_STATE_CLOSED;
659 vd->vdev_ishole = (ops == &vdev_hole_ops);
660 vic->vic_prev_indirect_vdev = UINT64_MAX;
661
662 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
663 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
664 vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
665 0, 0);
666
667 /*
668 * Initialize rate limit structs for events. We rate limit ZIO delay
669 * and checksum events so that we don't overwhelm ZED with thousands
670 * of events when a disk is acting up.
671 */
672 zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
673 1);
674 zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_deadman_events_per_second,
675 1);
676 zfs_ratelimit_init(&vd->vdev_checksum_rl,
677 &zfs_checksum_events_per_second, 1);
678
679 /*
680 * Default Thresholds for tuning ZED
681 */
682 vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N);
683 vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T);
684 vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N);
685 vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T);
686 vd->vdev_slow_io_n = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_N);
687 vd->vdev_slow_io_t = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_T);
688
689 list_link_init(&vd->vdev_config_dirty_node);
690 list_link_init(&vd->vdev_state_dirty_node);
691 list_link_init(&vd->vdev_initialize_node);
692 list_link_init(&vd->vdev_leaf_node);
693 list_link_init(&vd->vdev_trim_node);
694
695 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
696 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
697 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
698 mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
699
700 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
701 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
702 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
703 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
704
705 mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
706 mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
707 mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
708 cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
709 cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
710 cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL);
711 cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
712
713 mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
714 cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
715
716 for (int t = 0; t < DTL_TYPES; t++) {
717 vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
718 0);
719 }
720
721 txg_list_create(&vd->vdev_ms_list, spa,
722 offsetof(struct metaslab, ms_txg_node));
723 txg_list_create(&vd->vdev_dtl_list, spa,
724 offsetof(struct vdev, vdev_dtl_node));
725 vd->vdev_stat.vs_timestamp = gethrtime();
726 vdev_queue_init(vd);
727
728 return (vd);
729 }
730
731 /*
732 * Allocate a new vdev. The 'alloctype' is used to control whether we are
733 * creating a new vdev or loading an existing one - the behavior is slightly
734 * different for each case.
735 */
736 int
vdev_alloc(spa_t * spa,vdev_t ** vdp,nvlist_t * nv,vdev_t * parent,uint_t id,int alloctype)737 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
738 int alloctype)
739 {
740 vdev_ops_t *ops;
741 const char *type;
742 uint64_t guid = 0, islog;
743 vdev_t *vd;
744 vdev_indirect_config_t *vic;
745 const char *tmp = NULL;
746 int rc;
747 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
748 boolean_t top_level = (parent && !parent->vdev_parent);
749
750 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
751
752 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
753 return (SET_ERROR(EINVAL));
754
755 if ((ops = vdev_getops(type)) == NULL)
756 return (SET_ERROR(EINVAL));
757
758 /*
759 * If this is a load, get the vdev guid from the nvlist.
760 * Otherwise, vdev_alloc_common() will generate one for us.
761 */
762 if (alloctype == VDEV_ALLOC_LOAD) {
763 uint64_t label_id;
764
765 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
766 label_id != id)
767 return (SET_ERROR(EINVAL));
768
769 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
770 return (SET_ERROR(EINVAL));
771 } else if (alloctype == VDEV_ALLOC_SPARE) {
772 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
773 return (SET_ERROR(EINVAL));
774 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
775 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
776 return (SET_ERROR(EINVAL));
777 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
778 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
779 return (SET_ERROR(EINVAL));
780 }
781
782 /*
783 * The first allocated vdev must be of type 'root'.
784 */
785 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
786 return (SET_ERROR(EINVAL));
787
788 /*
789 * Determine whether we're a log vdev.
790 */
791 islog = 0;
792 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
793 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
794 return (SET_ERROR(ENOTSUP));
795
796 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
797 return (SET_ERROR(ENOTSUP));
798
799 if (top_level && alloctype == VDEV_ALLOC_ADD) {
800 const char *bias;
801
802 /*
803 * If creating a top-level vdev, check for allocation
804 * classes input.
805 */
806 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
807 &bias) == 0) {
808 alloc_bias = vdev_derive_alloc_bias(bias);
809
810 /* spa_vdev_add() expects feature to be enabled */
811 if (spa->spa_load_state != SPA_LOAD_CREATE &&
812 !spa_feature_is_enabled(spa,
813 SPA_FEATURE_ALLOCATION_CLASSES)) {
814 return (SET_ERROR(ENOTSUP));
815 }
816 }
817
818 /* spa_vdev_add() expects feature to be enabled */
819 if (ops == &vdev_draid_ops &&
820 spa->spa_load_state != SPA_LOAD_CREATE &&
821 !spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
822 return (SET_ERROR(ENOTSUP));
823 }
824 }
825
826 /*
827 * Initialize the vdev specific data. This is done before calling
828 * vdev_alloc_common() since it may fail and this simplifies the
829 * error reporting and cleanup code paths.
830 */
831 void *tsd = NULL;
832 if (ops->vdev_op_init != NULL) {
833 rc = ops->vdev_op_init(spa, nv, &tsd);
834 if (rc != 0) {
835 return (rc);
836 }
837 }
838
839 vd = vdev_alloc_common(spa, id, guid, ops);
840 vd->vdev_tsd = tsd;
841 vd->vdev_islog = islog;
842
843 if (top_level && alloc_bias != VDEV_BIAS_NONE)
844 vd->vdev_alloc_bias = alloc_bias;
845
846 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tmp) == 0)
847 vd->vdev_path = spa_strdup(tmp);
848
849 /*
850 * ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
851 * fault on a vdev and want it to persist across imports (like with
852 * zpool offline -f).
853 */
854 rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
855 if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
856 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
857 vd->vdev_faulted = 1;
858 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
859 }
860
861 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &tmp) == 0)
862 vd->vdev_devid = spa_strdup(tmp);
863 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, &tmp) == 0)
864 vd->vdev_physpath = spa_strdup(tmp);
865
866 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
867 &tmp) == 0)
868 vd->vdev_enc_sysfs_path = spa_strdup(tmp);
869
870 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &tmp) == 0)
871 vd->vdev_fru = spa_strdup(tmp);
872
873 /*
874 * Set the whole_disk property. If it's not specified, leave the value
875 * as -1.
876 */
877 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
878 &vd->vdev_wholedisk) != 0)
879 vd->vdev_wholedisk = -1ULL;
880
881 vic = &vd->vdev_indirect_config;
882
883 ASSERT0(vic->vic_mapping_object);
884 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
885 &vic->vic_mapping_object);
886 ASSERT0(vic->vic_births_object);
887 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
888 &vic->vic_births_object);
889 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
890 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
891 &vic->vic_prev_indirect_vdev);
892
893 /*
894 * Look for the 'not present' flag. This will only be set if the device
895 * was not present at the time of import.
896 */
897 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
898 &vd->vdev_not_present);
899
900 /*
901 * Get the alignment requirement. Ignore pool ashift for vdev
902 * attach case.
903 */
904 if (alloctype != VDEV_ALLOC_ATTACH) {
905 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT,
906 &vd->vdev_ashift);
907 } else {
908 vd->vdev_attaching = B_TRUE;
909 }
910
911 /*
912 * Retrieve the vdev creation time.
913 */
914 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
915 &vd->vdev_crtxg);
916
917 if (vd->vdev_ops == &vdev_root_ops &&
918 (alloctype == VDEV_ALLOC_LOAD ||
919 alloctype == VDEV_ALLOC_SPLIT ||
920 alloctype == VDEV_ALLOC_ROOTPOOL)) {
921 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
922 &vd->vdev_root_zap);
923 }
924
925 /*
926 * If we're a top-level vdev, try to load the allocation parameters.
927 */
928 if (top_level &&
929 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
930 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
931 &vd->vdev_ms_array);
932 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
933 &vd->vdev_ms_shift);
934 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
935 &vd->vdev_asize);
936 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
937 &vd->vdev_noalloc);
938 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
939 &vd->vdev_removing);
940 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
941 &vd->vdev_top_zap);
942 vd->vdev_rz_expanding = nvlist_exists(nv,
943 ZPOOL_CONFIG_RAIDZ_EXPANDING);
944 } else {
945 ASSERT0(vd->vdev_top_zap);
946 }
947
948 if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
949 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
950 alloctype == VDEV_ALLOC_ADD ||
951 alloctype == VDEV_ALLOC_SPLIT ||
952 alloctype == VDEV_ALLOC_ROOTPOOL);
953 /* Note: metaslab_group_create() is now deferred */
954 }
955
956 if (vd->vdev_ops->vdev_op_leaf &&
957 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
958 (void) nvlist_lookup_uint64(nv,
959 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
960 } else {
961 ASSERT0(vd->vdev_leaf_zap);
962 }
963
964 /*
965 * If we're a leaf vdev, try to load the DTL object and other state.
966 */
967
968 if (vd->vdev_ops->vdev_op_leaf &&
969 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
970 alloctype == VDEV_ALLOC_ROOTPOOL)) {
971 if (alloctype == VDEV_ALLOC_LOAD) {
972 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
973 &vd->vdev_dtl_object);
974 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
975 &vd->vdev_unspare);
976 }
977
978 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
979 uint64_t spare = 0;
980
981 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
982 &spare) == 0 && spare)
983 spa_spare_add(vd);
984 }
985
986 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
987 &vd->vdev_offline);
988
989 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
990 &vd->vdev_resilver_txg);
991
992 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
993 &vd->vdev_rebuild_txg);
994
995 if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
996 vdev_defer_resilver(vd);
997
998 /*
999 * In general, when importing a pool we want to ignore the
1000 * persistent fault state, as the diagnosis made on another
1001 * system may not be valid in the current context. The only
1002 * exception is if we forced a vdev to a persistently faulted
1003 * state with 'zpool offline -f'. The persistent fault will
1004 * remain across imports until cleared.
1005 *
1006 * Local vdevs will remain in the faulted state.
1007 */
1008 if (spa_load_state(spa) == SPA_LOAD_OPEN ||
1009 spa_load_state(spa) == SPA_LOAD_IMPORT) {
1010 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
1011 &vd->vdev_faulted);
1012 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
1013 &vd->vdev_degraded);
1014 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
1015 &vd->vdev_removed);
1016
1017 if (vd->vdev_faulted || vd->vdev_degraded) {
1018 const char *aux;
1019
1020 vd->vdev_label_aux =
1021 VDEV_AUX_ERR_EXCEEDED;
1022 if (nvlist_lookup_string(nv,
1023 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
1024 strcmp(aux, "external") == 0)
1025 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
1026 else
1027 vd->vdev_faulted = 0ULL;
1028 }
1029 }
1030 }
1031
1032 /*
1033 * Add ourselves to the parent's list of children.
1034 */
1035 vdev_add_child(parent, vd);
1036
1037 *vdp = vd;
1038
1039 return (0);
1040 }
1041
1042 void
vdev_free(vdev_t * vd)1043 vdev_free(vdev_t *vd)
1044 {
1045 spa_t *spa = vd->vdev_spa;
1046
1047 ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
1048 ASSERT3P(vd->vdev_trim_thread, ==, NULL);
1049 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
1050 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
1051
1052 /*
1053 * Scan queues are normally destroyed at the end of a scan. If the
1054 * queue exists here, that implies the vdev is being removed while
1055 * the scan is still running.
1056 */
1057 if (vd->vdev_scan_io_queue != NULL) {
1058 mutex_enter(&vd->vdev_scan_io_queue_lock);
1059 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
1060 vd->vdev_scan_io_queue = NULL;
1061 mutex_exit(&vd->vdev_scan_io_queue_lock);
1062 }
1063
1064 /*
1065 * vdev_free() implies closing the vdev first. This is simpler than
1066 * trying to ensure complicated semantics for all callers.
1067 */
1068 vdev_close(vd);
1069
1070 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
1071 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
1072
1073 /*
1074 * Free all children.
1075 */
1076 for (int c = 0; c < vd->vdev_children; c++)
1077 vdev_free(vd->vdev_child[c]);
1078
1079 ASSERT(vd->vdev_child == NULL);
1080 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
1081
1082 if (vd->vdev_ops->vdev_op_fini != NULL)
1083 vd->vdev_ops->vdev_op_fini(vd);
1084
1085 /*
1086 * Discard allocation state.
1087 */
1088 if (vd->vdev_mg != NULL) {
1089 vdev_metaslab_fini(vd);
1090 metaslab_group_destroy(vd->vdev_mg);
1091 vd->vdev_mg = NULL;
1092 }
1093 if (vd->vdev_log_mg != NULL) {
1094 ASSERT0(vd->vdev_ms_count);
1095 metaslab_group_destroy(vd->vdev_log_mg);
1096 vd->vdev_log_mg = NULL;
1097 }
1098
1099 ASSERT0(vd->vdev_stat.vs_space);
1100 ASSERT0(vd->vdev_stat.vs_dspace);
1101 ASSERT0(vd->vdev_stat.vs_alloc);
1102
1103 /*
1104 * Remove this vdev from its parent's child list.
1105 */
1106 vdev_remove_child(vd->vdev_parent, vd);
1107
1108 ASSERT(vd->vdev_parent == NULL);
1109 ASSERT(!list_link_active(&vd->vdev_leaf_node));
1110
1111 /*
1112 * Clean up vdev structure.
1113 */
1114 vdev_queue_fini(vd);
1115
1116 if (vd->vdev_path)
1117 spa_strfree(vd->vdev_path);
1118 if (vd->vdev_devid)
1119 spa_strfree(vd->vdev_devid);
1120 if (vd->vdev_physpath)
1121 spa_strfree(vd->vdev_physpath);
1122
1123 if (vd->vdev_enc_sysfs_path)
1124 spa_strfree(vd->vdev_enc_sysfs_path);
1125
1126 if (vd->vdev_fru)
1127 spa_strfree(vd->vdev_fru);
1128
1129 if (vd->vdev_isspare)
1130 spa_spare_remove(vd);
1131 if (vd->vdev_isl2cache)
1132 spa_l2cache_remove(vd);
1133
1134 txg_list_destroy(&vd->vdev_ms_list);
1135 txg_list_destroy(&vd->vdev_dtl_list);
1136
1137 mutex_enter(&vd->vdev_dtl_lock);
1138 space_map_close(vd->vdev_dtl_sm);
1139 for (int t = 0; t < DTL_TYPES; t++) {
1140 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
1141 range_tree_destroy(vd->vdev_dtl[t]);
1142 }
1143 mutex_exit(&vd->vdev_dtl_lock);
1144
1145 EQUIV(vd->vdev_indirect_births != NULL,
1146 vd->vdev_indirect_mapping != NULL);
1147 if (vd->vdev_indirect_births != NULL) {
1148 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
1149 vdev_indirect_births_close(vd->vdev_indirect_births);
1150 }
1151
1152 if (vd->vdev_obsolete_sm != NULL) {
1153 ASSERT(vd->vdev_removing ||
1154 vd->vdev_ops == &vdev_indirect_ops);
1155 space_map_close(vd->vdev_obsolete_sm);
1156 vd->vdev_obsolete_sm = NULL;
1157 }
1158 range_tree_destroy(vd->vdev_obsolete_segments);
1159 rw_destroy(&vd->vdev_indirect_rwlock);
1160 mutex_destroy(&vd->vdev_obsolete_lock);
1161
1162 mutex_destroy(&vd->vdev_dtl_lock);
1163 mutex_destroy(&vd->vdev_stat_lock);
1164 mutex_destroy(&vd->vdev_probe_lock);
1165 mutex_destroy(&vd->vdev_scan_io_queue_lock);
1166
1167 mutex_destroy(&vd->vdev_initialize_lock);
1168 mutex_destroy(&vd->vdev_initialize_io_lock);
1169 cv_destroy(&vd->vdev_initialize_io_cv);
1170 cv_destroy(&vd->vdev_initialize_cv);
1171
1172 mutex_destroy(&vd->vdev_trim_lock);
1173 mutex_destroy(&vd->vdev_autotrim_lock);
1174 mutex_destroy(&vd->vdev_trim_io_lock);
1175 cv_destroy(&vd->vdev_trim_cv);
1176 cv_destroy(&vd->vdev_autotrim_cv);
1177 cv_destroy(&vd->vdev_autotrim_kick_cv);
1178 cv_destroy(&vd->vdev_trim_io_cv);
1179
1180 mutex_destroy(&vd->vdev_rebuild_lock);
1181 cv_destroy(&vd->vdev_rebuild_cv);
1182
1183 zfs_ratelimit_fini(&vd->vdev_delay_rl);
1184 zfs_ratelimit_fini(&vd->vdev_deadman_rl);
1185 zfs_ratelimit_fini(&vd->vdev_checksum_rl);
1186
1187 if (vd == spa->spa_root_vdev)
1188 spa->spa_root_vdev = NULL;
1189
1190 kmem_free(vd, sizeof (vdev_t));
1191 }
1192
1193 /*
1194 * Transfer top-level vdev state from svd to tvd.
1195 */
1196 static void
vdev_top_transfer(vdev_t * svd,vdev_t * tvd)1197 vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
1198 {
1199 spa_t *spa = svd->vdev_spa;
1200 metaslab_t *msp;
1201 vdev_t *vd;
1202 int t;
1203
1204 ASSERT(tvd == tvd->vdev_top);
1205
1206 tvd->vdev_ms_array = svd->vdev_ms_array;
1207 tvd->vdev_ms_shift = svd->vdev_ms_shift;
1208 tvd->vdev_ms_count = svd->vdev_ms_count;
1209 tvd->vdev_top_zap = svd->vdev_top_zap;
1210
1211 svd->vdev_ms_array = 0;
1212 svd->vdev_ms_shift = 0;
1213 svd->vdev_ms_count = 0;
1214 svd->vdev_top_zap = 0;
1215
1216 if (tvd->vdev_mg)
1217 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
1218 if (tvd->vdev_log_mg)
1219 ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
1220 tvd->vdev_mg = svd->vdev_mg;
1221 tvd->vdev_log_mg = svd->vdev_log_mg;
1222 tvd->vdev_ms = svd->vdev_ms;
1223
1224 svd->vdev_mg = NULL;
1225 svd->vdev_log_mg = NULL;
1226 svd->vdev_ms = NULL;
1227
1228 if (tvd->vdev_mg != NULL)
1229 tvd->vdev_mg->mg_vd = tvd;
1230 if (tvd->vdev_log_mg != NULL)
1231 tvd->vdev_log_mg->mg_vd = tvd;
1232
1233 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
1234 svd->vdev_checkpoint_sm = NULL;
1235
1236 tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
1237 svd->vdev_alloc_bias = VDEV_BIAS_NONE;
1238
1239 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
1240 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
1241 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
1242
1243 svd->vdev_stat.vs_alloc = 0;
1244 svd->vdev_stat.vs_space = 0;
1245 svd->vdev_stat.vs_dspace = 0;
1246
1247 /*
1248 * State which may be set on a top-level vdev that's in the
1249 * process of being removed.
1250 */
1251 ASSERT0(tvd->vdev_indirect_config.vic_births_object);
1252 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
1253 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
1254 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
1255 ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
1256 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
1257 ASSERT0(tvd->vdev_noalloc);
1258 ASSERT0(tvd->vdev_removing);
1259 ASSERT0(tvd->vdev_rebuilding);
1260 tvd->vdev_noalloc = svd->vdev_noalloc;
1261 tvd->vdev_removing = svd->vdev_removing;
1262 tvd->vdev_rebuilding = svd->vdev_rebuilding;
1263 tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
1264 tvd->vdev_indirect_config = svd->vdev_indirect_config;
1265 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
1266 tvd->vdev_indirect_births = svd->vdev_indirect_births;
1267 range_tree_swap(&svd->vdev_obsolete_segments,
1268 &tvd->vdev_obsolete_segments);
1269 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
1270 svd->vdev_indirect_config.vic_mapping_object = 0;
1271 svd->vdev_indirect_config.vic_births_object = 0;
1272 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
1273 svd->vdev_indirect_mapping = NULL;
1274 svd->vdev_indirect_births = NULL;
1275 svd->vdev_obsolete_sm = NULL;
1276 svd->vdev_noalloc = 0;
1277 svd->vdev_removing = 0;
1278 svd->vdev_rebuilding = 0;
1279
1280 for (t = 0; t < TXG_SIZE; t++) {
1281 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
1282 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
1283 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
1284 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
1285 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
1286 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
1287 }
1288
1289 if (list_link_active(&svd->vdev_config_dirty_node)) {
1290 vdev_config_clean(svd);
1291 vdev_config_dirty(tvd);
1292 }
1293
1294 if (list_link_active(&svd->vdev_state_dirty_node)) {
1295 vdev_state_clean(svd);
1296 vdev_state_dirty(tvd);
1297 }
1298
1299 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
1300 svd->vdev_deflate_ratio = 0;
1301
1302 tvd->vdev_islog = svd->vdev_islog;
1303 svd->vdev_islog = 0;
1304
1305 dsl_scan_io_queue_vdev_xfer(svd, tvd);
1306 }
1307
1308 static void
vdev_top_update(vdev_t * tvd,vdev_t * vd)1309 vdev_top_update(vdev_t *tvd, vdev_t *vd)
1310 {
1311 if (vd == NULL)
1312 return;
1313
1314 vd->vdev_top = tvd;
1315
1316 for (int c = 0; c < vd->vdev_children; c++)
1317 vdev_top_update(tvd, vd->vdev_child[c]);
1318 }
1319
1320 /*
1321 * Add a mirror/replacing vdev above an existing vdev. There is no need to
1322 * call .vdev_op_init() since mirror/replacing vdevs do not have private state.
1323 */
1324 vdev_t *
vdev_add_parent(vdev_t * cvd,vdev_ops_t * ops)1325 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
1326 {
1327 spa_t *spa = cvd->vdev_spa;
1328 vdev_t *pvd = cvd->vdev_parent;
1329 vdev_t *mvd;
1330
1331 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1332
1333 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
1334
1335 mvd->vdev_asize = cvd->vdev_asize;
1336 mvd->vdev_min_asize = cvd->vdev_min_asize;
1337 mvd->vdev_max_asize = cvd->vdev_max_asize;
1338 mvd->vdev_psize = cvd->vdev_psize;
1339 mvd->vdev_ashift = cvd->vdev_ashift;
1340 mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
1341 mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
1342 mvd->vdev_state = cvd->vdev_state;
1343 mvd->vdev_crtxg = cvd->vdev_crtxg;
1344
1345 vdev_remove_child(pvd, cvd);
1346 vdev_add_child(pvd, mvd);
1347 cvd->vdev_id = mvd->vdev_children;
1348 vdev_add_child(mvd, cvd);
1349 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1350
1351 if (mvd == mvd->vdev_top)
1352 vdev_top_transfer(cvd, mvd);
1353
1354 return (mvd);
1355 }
1356
1357 /*
1358 * Remove a 1-way mirror/replacing vdev from the tree.
1359 */
1360 void
vdev_remove_parent(vdev_t * cvd)1361 vdev_remove_parent(vdev_t *cvd)
1362 {
1363 vdev_t *mvd = cvd->vdev_parent;
1364 vdev_t *pvd = mvd->vdev_parent;
1365
1366 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1367
1368 ASSERT(mvd->vdev_children == 1);
1369 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
1370 mvd->vdev_ops == &vdev_replacing_ops ||
1371 mvd->vdev_ops == &vdev_spare_ops);
1372 cvd->vdev_ashift = mvd->vdev_ashift;
1373 cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
1374 cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
1375 vdev_remove_child(mvd, cvd);
1376 vdev_remove_child(pvd, mvd);
1377
1378 /*
1379 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
1380 * Otherwise, we could have detached an offline device, and when we
1381 * go to import the pool we'll think we have two top-level vdevs,
1382 * instead of a different version of the same top-level vdev.
1383 */
1384 if (mvd->vdev_top == mvd) {
1385 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
1386 cvd->vdev_orig_guid = cvd->vdev_guid;
1387 cvd->vdev_guid += guid_delta;
1388 cvd->vdev_guid_sum += guid_delta;
1389
1390 /*
1391 * If pool not set for autoexpand, we need to also preserve
1392 * mvd's asize to prevent automatic expansion of cvd.
1393 * Otherwise if we are adjusting the mirror by attaching and
1394 * detaching children of non-uniform sizes, the mirror could
1395 * autoexpand, unexpectedly requiring larger devices to
1396 * re-establish the mirror.
1397 */
1398 if (!cvd->vdev_spa->spa_autoexpand)
1399 cvd->vdev_asize = mvd->vdev_asize;
1400 }
1401 cvd->vdev_id = mvd->vdev_id;
1402 vdev_add_child(pvd, cvd);
1403 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1404
1405 if (cvd == cvd->vdev_top)
1406 vdev_top_transfer(mvd, cvd);
1407
1408 ASSERT(mvd->vdev_children == 0);
1409 vdev_free(mvd);
1410 }
1411
1412 /*
1413 * Choose GCD for spa_gcd_alloc.
1414 */
1415 static uint64_t
vdev_gcd(uint64_t a,uint64_t b)1416 vdev_gcd(uint64_t a, uint64_t b)
1417 {
1418 while (b != 0) {
1419 uint64_t t = b;
1420 b = a % b;
1421 a = t;
1422 }
1423 return (a);
1424 }
1425
1426 /*
1427 * Set spa_min_alloc and spa_gcd_alloc.
1428 */
1429 static void
vdev_spa_set_alloc(spa_t * spa,uint64_t min_alloc)1430 vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc)
1431 {
1432 if (min_alloc < spa->spa_min_alloc)
1433 spa->spa_min_alloc = min_alloc;
1434 if (spa->spa_gcd_alloc == INT_MAX) {
1435 spa->spa_gcd_alloc = min_alloc;
1436 } else {
1437 spa->spa_gcd_alloc = vdev_gcd(min_alloc,
1438 spa->spa_gcd_alloc);
1439 }
1440 }
1441
1442 void
vdev_metaslab_group_create(vdev_t * vd)1443 vdev_metaslab_group_create(vdev_t *vd)
1444 {
1445 spa_t *spa = vd->vdev_spa;
1446
1447 /*
1448 * metaslab_group_create was delayed until allocation bias was available
1449 */
1450 if (vd->vdev_mg == NULL) {
1451 metaslab_class_t *mc;
1452
1453 if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
1454 vd->vdev_alloc_bias = VDEV_BIAS_LOG;
1455
1456 ASSERT3U(vd->vdev_islog, ==,
1457 (vd->vdev_alloc_bias == VDEV_BIAS_LOG));
1458
1459 switch (vd->vdev_alloc_bias) {
1460 case VDEV_BIAS_LOG:
1461 mc = spa_log_class(spa);
1462 break;
1463 case VDEV_BIAS_SPECIAL:
1464 mc = spa_special_class(spa);
1465 break;
1466 case VDEV_BIAS_DEDUP:
1467 mc = spa_dedup_class(spa);
1468 break;
1469 default:
1470 mc = spa_normal_class(spa);
1471 }
1472
1473 vd->vdev_mg = metaslab_group_create(mc, vd,
1474 spa->spa_alloc_count);
1475
1476 if (!vd->vdev_islog) {
1477 vd->vdev_log_mg = metaslab_group_create(
1478 spa_embedded_log_class(spa), vd, 1);
1479 }
1480
1481 /*
1482 * The spa ashift min/max only apply for the normal metaslab
1483 * class. Class destination is late binding so ashift boundary
1484 * setting had to wait until now.
1485 */
1486 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1487 mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
1488 if (vd->vdev_ashift > spa->spa_max_ashift)
1489 spa->spa_max_ashift = vd->vdev_ashift;
1490 if (vd->vdev_ashift < spa->spa_min_ashift)
1491 spa->spa_min_ashift = vd->vdev_ashift;
1492
1493 uint64_t min_alloc = vdev_get_min_alloc(vd);
1494 vdev_spa_set_alloc(spa, min_alloc);
1495 }
1496 }
1497 }
1498
1499 int
vdev_metaslab_init(vdev_t * vd,uint64_t txg)1500 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
1501 {
1502 spa_t *spa = vd->vdev_spa;
1503 uint64_t oldc = vd->vdev_ms_count;
1504 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
1505 metaslab_t **mspp;
1506 int error;
1507 boolean_t expanding = (oldc != 0);
1508
1509 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1510
1511 /*
1512 * This vdev is not being allocated from yet or is a hole.
1513 */
1514 if (vd->vdev_ms_shift == 0)
1515 return (0);
1516
1517 ASSERT(!vd->vdev_ishole);
1518
1519 ASSERT(oldc <= newc);
1520
1521 mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
1522
1523 if (expanding) {
1524 memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
1525 vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
1526 }
1527
1528 vd->vdev_ms = mspp;
1529 vd->vdev_ms_count = newc;
1530
1531 for (uint64_t m = oldc; m < newc; m++) {
1532 uint64_t object = 0;
1533 /*
1534 * vdev_ms_array may be 0 if we are creating the "fake"
1535 * metaslabs for an indirect vdev for zdb's leak detection.
1536 * See zdb_leak_init().
1537 */
1538 if (txg == 0 && vd->vdev_ms_array != 0) {
1539 error = dmu_read(spa->spa_meta_objset,
1540 vd->vdev_ms_array,
1541 m * sizeof (uint64_t), sizeof (uint64_t), &object,
1542 DMU_READ_PREFETCH);
1543 if (error != 0) {
1544 vdev_dbgmsg(vd, "unable to read the metaslab "
1545 "array [error=%d]", error);
1546 return (error);
1547 }
1548 }
1549
1550 error = metaslab_init(vd->vdev_mg, m, object, txg,
1551 &(vd->vdev_ms[m]));
1552 if (error != 0) {
1553 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
1554 error);
1555 return (error);
1556 }
1557 }
1558
1559 /*
1560 * Find the emptiest metaslab on the vdev and mark it for use for
1561 * embedded slog by moving it from the regular to the log metaslab
1562 * group.
1563 */
1564 if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
1565 vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
1566 avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
1567 uint64_t slog_msid = 0;
1568 uint64_t smallest = UINT64_MAX;
1569
1570 /*
1571 * Note, we only search the new metaslabs, because the old
1572 * (pre-existing) ones may be active (e.g. have non-empty
1573 * range_tree's), and we don't move them to the new
1574 * metaslab_t.
1575 */
1576 for (uint64_t m = oldc; m < newc; m++) {
1577 uint64_t alloc =
1578 space_map_allocated(vd->vdev_ms[m]->ms_sm);
1579 if (alloc < smallest) {
1580 slog_msid = m;
1581 smallest = alloc;
1582 }
1583 }
1584 metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
1585 /*
1586 * The metaslab was marked as dirty at the end of
1587 * metaslab_init(). Remove it from the dirty list so that we
1588 * can uninitialize and reinitialize it to the new class.
1589 */
1590 if (txg != 0) {
1591 (void) txg_list_remove_this(&vd->vdev_ms_list,
1592 slog_ms, txg);
1593 }
1594 uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
1595 metaslab_fini(slog_ms);
1596 VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
1597 &vd->vdev_ms[slog_msid]));
1598 }
1599
1600 if (txg == 0)
1601 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
1602
1603 /*
1604 * If the vdev is marked as non-allocating then don't
1605 * activate the metaslabs since we want to ensure that
1606 * no allocations are performed on this device.
1607 */
1608 if (vd->vdev_noalloc) {
1609 /* track non-allocating vdev space */
1610 spa->spa_nonallocating_dspace += spa_deflate(spa) ?
1611 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1612 } else if (!expanding) {
1613 metaslab_group_activate(vd->vdev_mg);
1614 if (vd->vdev_log_mg != NULL)
1615 metaslab_group_activate(vd->vdev_log_mg);
1616 }
1617
1618 if (txg == 0)
1619 spa_config_exit(spa, SCL_ALLOC, FTAG);
1620
1621 return (0);
1622 }
1623
1624 void
vdev_metaslab_fini(vdev_t * vd)1625 vdev_metaslab_fini(vdev_t *vd)
1626 {
1627 if (vd->vdev_checkpoint_sm != NULL) {
1628 ASSERT(spa_feature_is_active(vd->vdev_spa,
1629 SPA_FEATURE_POOL_CHECKPOINT));
1630 space_map_close(vd->vdev_checkpoint_sm);
1631 /*
1632 * Even though we close the space map, we need to set its
1633 * pointer to NULL. The reason is that vdev_metaslab_fini()
1634 * may be called multiple times for certain operations
1635 * (i.e. when destroying a pool) so we need to ensure that
1636 * this clause never executes twice. This logic is similar
1637 * to the one used for the vdev_ms clause below.
1638 */
1639 vd->vdev_checkpoint_sm = NULL;
1640 }
1641
1642 if (vd->vdev_ms != NULL) {
1643 metaslab_group_t *mg = vd->vdev_mg;
1644
1645 metaslab_group_passivate(mg);
1646 if (vd->vdev_log_mg != NULL) {
1647 ASSERT(!vd->vdev_islog);
1648 metaslab_group_passivate(vd->vdev_log_mg);
1649 }
1650
1651 uint64_t count = vd->vdev_ms_count;
1652 for (uint64_t m = 0; m < count; m++) {
1653 metaslab_t *msp = vd->vdev_ms[m];
1654 if (msp != NULL)
1655 metaslab_fini(msp);
1656 }
1657 vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
1658 vd->vdev_ms = NULL;
1659 vd->vdev_ms_count = 0;
1660
1661 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1662 ASSERT0(mg->mg_histogram[i]);
1663 if (vd->vdev_log_mg != NULL)
1664 ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
1665 }
1666 }
1667 ASSERT0(vd->vdev_ms_count);
1668 }
1669
1670 typedef struct vdev_probe_stats {
1671 boolean_t vps_readable;
1672 boolean_t vps_writeable;
1673 boolean_t vps_zio_done_probe;
1674 int vps_flags;
1675 } vdev_probe_stats_t;
1676
1677 static void
vdev_probe_done(zio_t * zio)1678 vdev_probe_done(zio_t *zio)
1679 {
1680 spa_t *spa = zio->io_spa;
1681 vdev_t *vd = zio->io_vd;
1682 vdev_probe_stats_t *vps = zio->io_private;
1683
1684 ASSERT(vd->vdev_probe_zio != NULL);
1685
1686 if (zio->io_type == ZIO_TYPE_READ) {
1687 if (zio->io_error == 0)
1688 vps->vps_readable = 1;
1689 if (zio->io_error == 0 && spa_writeable(spa)) {
1690 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
1691 zio->io_offset, zio->io_size, zio->io_abd,
1692 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1693 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
1694 } else {
1695 abd_free(zio->io_abd);
1696 }
1697 } else if (zio->io_type == ZIO_TYPE_WRITE) {
1698 if (zio->io_error == 0)
1699 vps->vps_writeable = 1;
1700 abd_free(zio->io_abd);
1701 } else if (zio->io_type == ZIO_TYPE_NULL) {
1702 zio_t *pio;
1703 zio_link_t *zl;
1704
1705 vd->vdev_cant_read |= !vps->vps_readable;
1706 vd->vdev_cant_write |= !vps->vps_writeable;
1707 vdev_dbgmsg(vd, "probe done, cant_read=%u cant_write=%u",
1708 vd->vdev_cant_read, vd->vdev_cant_write);
1709
1710 if (vdev_readable(vd) &&
1711 (vdev_writeable(vd) || !spa_writeable(spa))) {
1712 zio->io_error = 0;
1713 } else {
1714 ASSERT(zio->io_error != 0);
1715 vdev_dbgmsg(vd, "failed probe");
1716 (void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
1717 spa, vd, NULL, NULL, 0);
1718 zio->io_error = SET_ERROR(ENXIO);
1719
1720 /*
1721 * If this probe was initiated from zio pipeline, then
1722 * change the state in a spa_async_request. Probes that
1723 * were initiated from a vdev_open can change the state
1724 * as part of the open call.
1725 */
1726 if (vps->vps_zio_done_probe) {
1727 vd->vdev_fault_wanted = B_TRUE;
1728 spa_async_request(spa, SPA_ASYNC_FAULT_VDEV);
1729 }
1730 }
1731
1732 mutex_enter(&vd->vdev_probe_lock);
1733 ASSERT(vd->vdev_probe_zio == zio);
1734 vd->vdev_probe_zio = NULL;
1735 mutex_exit(&vd->vdev_probe_lock);
1736
1737 zl = NULL;
1738 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
1739 if (!vdev_accessible(vd, pio))
1740 pio->io_error = SET_ERROR(ENXIO);
1741
1742 kmem_free(vps, sizeof (*vps));
1743 }
1744 }
1745
1746 /*
1747 * Determine whether this device is accessible.
1748 *
1749 * Read and write to several known locations: the pad regions of each
1750 * vdev label but the first, which we leave alone in case it contains
1751 * a VTOC.
1752 */
1753 zio_t *
vdev_probe(vdev_t * vd,zio_t * zio)1754 vdev_probe(vdev_t *vd, zio_t *zio)
1755 {
1756 spa_t *spa = vd->vdev_spa;
1757 vdev_probe_stats_t *vps = NULL;
1758 zio_t *pio;
1759
1760 ASSERT(vd->vdev_ops->vdev_op_leaf);
1761
1762 /*
1763 * Don't probe the probe.
1764 */
1765 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
1766 return (NULL);
1767
1768 /*
1769 * To prevent 'probe storms' when a device fails, we create
1770 * just one probe i/o at a time. All zios that want to probe
1771 * this vdev will become parents of the probe io.
1772 */
1773 mutex_enter(&vd->vdev_probe_lock);
1774
1775 if ((pio = vd->vdev_probe_zio) == NULL) {
1776 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
1777
1778 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
1779 ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_TRYHARD;
1780 vps->vps_zio_done_probe = (zio != NULL);
1781
1782 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1783 /*
1784 * vdev_cant_read and vdev_cant_write can only
1785 * transition from TRUE to FALSE when we have the
1786 * SCL_ZIO lock as writer; otherwise they can only
1787 * transition from FALSE to TRUE. This ensures that
1788 * any zio looking at these values can assume that
1789 * failures persist for the life of the I/O. That's
1790 * important because when a device has intermittent
1791 * connectivity problems, we want to ensure that
1792 * they're ascribed to the device (ENXIO) and not
1793 * the zio (EIO).
1794 *
1795 * Since we hold SCL_ZIO as writer here, clear both
1796 * values so the probe can reevaluate from first
1797 * principles.
1798 */
1799 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1800 vd->vdev_cant_read = B_FALSE;
1801 vd->vdev_cant_write = B_FALSE;
1802 }
1803
1804 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1805 vdev_probe_done, vps,
1806 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1807 }
1808
1809 if (zio != NULL)
1810 zio_add_child(zio, pio);
1811
1812 mutex_exit(&vd->vdev_probe_lock);
1813
1814 if (vps == NULL) {
1815 ASSERT(zio != NULL);
1816 return (NULL);
1817 }
1818
1819 for (int l = 1; l < VDEV_LABELS; l++) {
1820 zio_nowait(zio_read_phys(pio, vd,
1821 vdev_label_offset(vd->vdev_psize, l,
1822 offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
1823 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
1824 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1825 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1826 }
1827
1828 if (zio == NULL)
1829 return (pio);
1830
1831 zio_nowait(pio);
1832 return (NULL);
1833 }
1834
1835 static void
vdev_load_child(void * arg)1836 vdev_load_child(void *arg)
1837 {
1838 vdev_t *vd = arg;
1839
1840 vd->vdev_load_error = vdev_load(vd);
1841 }
1842
1843 static void
vdev_open_child(void * arg)1844 vdev_open_child(void *arg)
1845 {
1846 vdev_t *vd = arg;
1847
1848 vd->vdev_open_thread = curthread;
1849 vd->vdev_open_error = vdev_open(vd);
1850 vd->vdev_open_thread = NULL;
1851 }
1852
1853 static boolean_t
vdev_uses_zvols(vdev_t * vd)1854 vdev_uses_zvols(vdev_t *vd)
1855 {
1856 #ifdef _KERNEL
1857 if (zvol_is_zvol(vd->vdev_path))
1858 return (B_TRUE);
1859 #endif
1860
1861 for (int c = 0; c < vd->vdev_children; c++)
1862 if (vdev_uses_zvols(vd->vdev_child[c]))
1863 return (B_TRUE);
1864
1865 return (B_FALSE);
1866 }
1867
1868 /*
1869 * Returns B_TRUE if the passed child should be opened.
1870 */
1871 static boolean_t
vdev_default_open_children_func(vdev_t * vd)1872 vdev_default_open_children_func(vdev_t *vd)
1873 {
1874 (void) vd;
1875 return (B_TRUE);
1876 }
1877
1878 /*
1879 * Open the requested child vdevs. If any of the leaf vdevs are using
1880 * a ZFS volume then do the opens in a single thread. This avoids a
1881 * deadlock when the current thread is holding the spa_namespace_lock.
1882 */
1883 static void
vdev_open_children_impl(vdev_t * vd,vdev_open_children_func_t * open_func)1884 vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
1885 {
1886 int children = vd->vdev_children;
1887
1888 taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
1889 children, children, TASKQ_PREPOPULATE);
1890 vd->vdev_nonrot = B_TRUE;
1891
1892 for (int c = 0; c < children; c++) {
1893 vdev_t *cvd = vd->vdev_child[c];
1894
1895 if (open_func(cvd) == B_FALSE)
1896 continue;
1897
1898 if (tq == NULL || vdev_uses_zvols(vd)) {
1899 cvd->vdev_open_error = vdev_open(cvd);
1900 } else {
1901 VERIFY(taskq_dispatch(tq, vdev_open_child,
1902 cvd, TQ_SLEEP) != TASKQID_INVALID);
1903 }
1904
1905 vd->vdev_nonrot &= cvd->vdev_nonrot;
1906 }
1907
1908 if (tq != NULL) {
1909 taskq_wait(tq);
1910 taskq_destroy(tq);
1911 }
1912 }
1913
1914 /*
1915 * Open all child vdevs.
1916 */
1917 void
vdev_open_children(vdev_t * vd)1918 vdev_open_children(vdev_t *vd)
1919 {
1920 vdev_open_children_impl(vd, vdev_default_open_children_func);
1921 }
1922
1923 /*
1924 * Conditionally open a subset of child vdevs.
1925 */
1926 void
vdev_open_children_subset(vdev_t * vd,vdev_open_children_func_t * open_func)1927 vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
1928 {
1929 vdev_open_children_impl(vd, open_func);
1930 }
1931
1932 /*
1933 * Compute the raidz-deflation ratio. Note, we hard-code 128k (1 << 17)
1934 * because it is the "typical" blocksize. Even though SPA_MAXBLOCKSIZE
1935 * changed, this algorithm can not change, otherwise it would inconsistently
1936 * account for existing bp's. We also hard-code txg 0 for the same reason
1937 * since expanded RAIDZ vdevs can use a different asize for different birth
1938 * txg's.
1939 */
1940 static void
vdev_set_deflate_ratio(vdev_t * vd)1941 vdev_set_deflate_ratio(vdev_t *vd)
1942 {
1943 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
1944 vd->vdev_deflate_ratio = (1 << 17) /
1945 (vdev_psize_to_asize_txg(vd, 1 << 17, 0) >>
1946 SPA_MINBLOCKSHIFT);
1947 }
1948 }
1949
1950 /*
1951 * Choose the best of two ashifts, preferring one between logical ashift
1952 * (absolute minimum) and administrator defined maximum, otherwise take
1953 * the biggest of the two.
1954 */
1955 uint64_t
vdev_best_ashift(uint64_t logical,uint64_t a,uint64_t b)1956 vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b)
1957 {
1958 if (a > logical && a <= zfs_vdev_max_auto_ashift) {
1959 if (b <= logical || b > zfs_vdev_max_auto_ashift)
1960 return (a);
1961 else
1962 return (MAX(a, b));
1963 } else if (b <= logical || b > zfs_vdev_max_auto_ashift)
1964 return (MAX(a, b));
1965 return (b);
1966 }
1967
1968 /*
1969 * Maximize performance by inflating the configured ashift for top level
1970 * vdevs to be as close to the physical ashift as possible while maintaining
1971 * administrator defined limits and ensuring it doesn't go below the
1972 * logical ashift.
1973 */
1974 static void
vdev_ashift_optimize(vdev_t * vd)1975 vdev_ashift_optimize(vdev_t *vd)
1976 {
1977 ASSERT(vd == vd->vdev_top);
1978
1979 if (vd->vdev_ashift < vd->vdev_physical_ashift &&
1980 vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) {
1981 vd->vdev_ashift = MIN(
1982 MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
1983 MAX(zfs_vdev_min_auto_ashift,
1984 vd->vdev_physical_ashift));
1985 } else {
1986 /*
1987 * If the logical and physical ashifts are the same, then
1988 * we ensure that the top-level vdev's ashift is not smaller
1989 * than our minimum ashift value. For the unusual case
1990 * where logical ashift > physical ashift, we can't cap
1991 * the calculated ashift based on max ashift as that
1992 * would cause failures.
1993 * We still check if we need to increase it to match
1994 * the min ashift.
1995 */
1996 vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
1997 vd->vdev_ashift);
1998 }
1999 }
2000
2001 /*
2002 * Prepare a virtual device for access.
2003 */
2004 int
vdev_open(vdev_t * vd)2005 vdev_open(vdev_t *vd)
2006 {
2007 spa_t *spa = vd->vdev_spa;
2008 int error;
2009 uint64_t osize = 0;
2010 uint64_t max_osize = 0;
2011 uint64_t asize, max_asize, psize;
2012 uint64_t logical_ashift = 0;
2013 uint64_t physical_ashift = 0;
2014
2015 ASSERT(vd->vdev_open_thread == curthread ||
2016 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2017 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
2018 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
2019 vd->vdev_state == VDEV_STATE_OFFLINE);
2020
2021 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2022 vd->vdev_cant_read = B_FALSE;
2023 vd->vdev_cant_write = B_FALSE;
2024 vd->vdev_fault_wanted = B_FALSE;
2025 vd->vdev_min_asize = vdev_get_min_asize(vd);
2026
2027 /*
2028 * If this vdev is not removed, check its fault status. If it's
2029 * faulted, bail out of the open.
2030 */
2031 if (!vd->vdev_removed && vd->vdev_faulted) {
2032 ASSERT(vd->vdev_children == 0);
2033 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
2034 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
2035 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2036 vd->vdev_label_aux);
2037 return (SET_ERROR(ENXIO));
2038 } else if (vd->vdev_offline) {
2039 ASSERT(vd->vdev_children == 0);
2040 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
2041 return (SET_ERROR(ENXIO));
2042 }
2043
2044 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
2045 &logical_ashift, &physical_ashift);
2046
2047 /* Keep the device in removed state if unplugged */
2048 if (error == ENOENT && vd->vdev_removed) {
2049 vdev_set_state(vd, B_TRUE, VDEV_STATE_REMOVED,
2050 VDEV_AUX_NONE);
2051 return (error);
2052 }
2053
2054 /*
2055 * Physical volume size should never be larger than its max size, unless
2056 * the disk has shrunk while we were reading it or the device is buggy
2057 * or damaged: either way it's not safe for use, bail out of the open.
2058 */
2059 if (osize > max_osize) {
2060 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2061 VDEV_AUX_OPEN_FAILED);
2062 return (SET_ERROR(ENXIO));
2063 }
2064
2065 /*
2066 * Reset the vdev_reopening flag so that we actually close
2067 * the vdev on error.
2068 */
2069 vd->vdev_reopening = B_FALSE;
2070 if (zio_injection_enabled && error == 0)
2071 error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
2072
2073 if (error) {
2074 if (vd->vdev_removed &&
2075 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
2076 vd->vdev_removed = B_FALSE;
2077
2078 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
2079 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
2080 vd->vdev_stat.vs_aux);
2081 } else {
2082 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2083 vd->vdev_stat.vs_aux);
2084 }
2085 return (error);
2086 }
2087
2088 vd->vdev_removed = B_FALSE;
2089
2090 /*
2091 * Recheck the faulted flag now that we have confirmed that
2092 * the vdev is accessible. If we're faulted, bail.
2093 */
2094 if (vd->vdev_faulted) {
2095 ASSERT(vd->vdev_children == 0);
2096 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
2097 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
2098 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2099 vd->vdev_label_aux);
2100 return (SET_ERROR(ENXIO));
2101 }
2102
2103 if (vd->vdev_degraded) {
2104 ASSERT(vd->vdev_children == 0);
2105 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
2106 VDEV_AUX_ERR_EXCEEDED);
2107 } else {
2108 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
2109 }
2110
2111 /*
2112 * For hole or missing vdevs we just return success.
2113 */
2114 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
2115 return (0);
2116
2117 for (int c = 0; c < vd->vdev_children; c++) {
2118 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
2119 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
2120 VDEV_AUX_NONE);
2121 break;
2122 }
2123 }
2124
2125 osize = P2ALIGN_TYPED(osize, sizeof (vdev_label_t), uint64_t);
2126 max_osize = P2ALIGN_TYPED(max_osize, sizeof (vdev_label_t), uint64_t);
2127
2128 if (vd->vdev_children == 0) {
2129 if (osize < SPA_MINDEVSIZE) {
2130 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2131 VDEV_AUX_TOO_SMALL);
2132 return (SET_ERROR(EOVERFLOW));
2133 }
2134 psize = osize;
2135 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
2136 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
2137 VDEV_LABEL_END_SIZE);
2138 } else {
2139 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
2140 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
2141 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2142 VDEV_AUX_TOO_SMALL);
2143 return (SET_ERROR(EOVERFLOW));
2144 }
2145 psize = 0;
2146 asize = osize;
2147 max_asize = max_osize;
2148 }
2149
2150 /*
2151 * If the vdev was expanded, record this so that we can re-create the
2152 * uberblock rings in labels {2,3}, during the next sync.
2153 */
2154 if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
2155 vd->vdev_copy_uberblocks = B_TRUE;
2156
2157 vd->vdev_psize = psize;
2158
2159 /*
2160 * Make sure the allocatable size hasn't shrunk too much.
2161 */
2162 if (asize < vd->vdev_min_asize) {
2163 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2164 VDEV_AUX_BAD_LABEL);
2165 return (SET_ERROR(EINVAL));
2166 }
2167
2168 /*
2169 * We can always set the logical/physical ashift members since
2170 * their values are only used to calculate the vdev_ashift when
2171 * the device is first added to the config. These values should
2172 * not be used for anything else since they may change whenever
2173 * the device is reopened and we don't store them in the label.
2174 */
2175 vd->vdev_physical_ashift =
2176 MAX(physical_ashift, vd->vdev_physical_ashift);
2177 vd->vdev_logical_ashift = MAX(logical_ashift,
2178 vd->vdev_logical_ashift);
2179
2180 if (vd->vdev_asize == 0) {
2181 /*
2182 * This is the first-ever open, so use the computed values.
2183 * For compatibility, a different ashift can be requested.
2184 */
2185 vd->vdev_asize = asize;
2186 vd->vdev_max_asize = max_asize;
2187
2188 /*
2189 * If the vdev_ashift was not overridden at creation time,
2190 * then set it the logical ashift and optimize the ashift.
2191 */
2192 if (vd->vdev_ashift == 0) {
2193 vd->vdev_ashift = vd->vdev_logical_ashift;
2194
2195 if (vd->vdev_logical_ashift > ASHIFT_MAX) {
2196 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2197 VDEV_AUX_ASHIFT_TOO_BIG);
2198 return (SET_ERROR(EDOM));
2199 }
2200
2201 if (vd->vdev_top == vd && vd->vdev_attaching == B_FALSE)
2202 vdev_ashift_optimize(vd);
2203 vd->vdev_attaching = B_FALSE;
2204 }
2205 if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
2206 vd->vdev_ashift > ASHIFT_MAX)) {
2207 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2208 VDEV_AUX_BAD_ASHIFT);
2209 return (SET_ERROR(EDOM));
2210 }
2211 } else {
2212 /*
2213 * Make sure the alignment required hasn't increased.
2214 */
2215 if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
2216 vd->vdev_ops->vdev_op_leaf) {
2217 (void) zfs_ereport_post(
2218 FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
2219 spa, vd, NULL, NULL, 0);
2220 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2221 VDEV_AUX_BAD_LABEL);
2222 return (SET_ERROR(EDOM));
2223 }
2224 vd->vdev_max_asize = max_asize;
2225 }
2226
2227 /*
2228 * If all children are healthy we update asize if either:
2229 * The asize has increased, due to a device expansion caused by dynamic
2230 * LUN growth or vdev replacement, and automatic expansion is enabled;
2231 * making the additional space available.
2232 *
2233 * The asize has decreased, due to a device shrink usually caused by a
2234 * vdev replace with a smaller device. This ensures that calculations
2235 * based of max_asize and asize e.g. esize are always valid. It's safe
2236 * to do this as we've already validated that asize is greater than
2237 * vdev_min_asize.
2238 */
2239 if (vd->vdev_state == VDEV_STATE_HEALTHY &&
2240 ((asize > vd->vdev_asize &&
2241 (vd->vdev_expanding || spa->spa_autoexpand)) ||
2242 (asize < vd->vdev_asize)))
2243 vd->vdev_asize = asize;
2244
2245 vdev_set_min_asize(vd);
2246
2247 /*
2248 * Ensure we can issue some IO before declaring the
2249 * vdev open for business.
2250 */
2251 if (vd->vdev_ops->vdev_op_leaf &&
2252 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
2253 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2254 VDEV_AUX_ERR_EXCEEDED);
2255 return (error);
2256 }
2257
2258 /*
2259 * Track the minimum allocation size.
2260 */
2261 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
2262 vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
2263 uint64_t min_alloc = vdev_get_min_alloc(vd);
2264 vdev_spa_set_alloc(spa, min_alloc);
2265 }
2266
2267 /*
2268 * If this is a leaf vdev, assess whether a resilver is needed.
2269 * But don't do this if we are doing a reopen for a scrub, since
2270 * this would just restart the scrub we are already doing.
2271 */
2272 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
2273 dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
2274
2275 return (0);
2276 }
2277
2278 static void
vdev_validate_child(void * arg)2279 vdev_validate_child(void *arg)
2280 {
2281 vdev_t *vd = arg;
2282
2283 vd->vdev_validate_thread = curthread;
2284 vd->vdev_validate_error = vdev_validate(vd);
2285 vd->vdev_validate_thread = NULL;
2286 }
2287
2288 /*
2289 * Called once the vdevs are all opened, this routine validates the label
2290 * contents. This needs to be done before vdev_load() so that we don't
2291 * inadvertently do repair I/Os to the wrong device.
2292 *
2293 * This function will only return failure if one of the vdevs indicates that it
2294 * has since been destroyed or exported. This is only possible if
2295 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
2296 * will be updated but the function will return 0.
2297 */
2298 int
vdev_validate(vdev_t * vd)2299 vdev_validate(vdev_t *vd)
2300 {
2301 spa_t *spa = vd->vdev_spa;
2302 taskq_t *tq = NULL;
2303 nvlist_t *label;
2304 uint64_t guid = 0, aux_guid = 0, top_guid;
2305 uint64_t state;
2306 nvlist_t *nvl;
2307 uint64_t txg;
2308 int children = vd->vdev_children;
2309
2310 if (vdev_validate_skip)
2311 return (0);
2312
2313 if (children > 0) {
2314 tq = taskq_create("vdev_validate", children, minclsyspri,
2315 children, children, TASKQ_PREPOPULATE);
2316 }
2317
2318 for (uint64_t c = 0; c < children; c++) {
2319 vdev_t *cvd = vd->vdev_child[c];
2320
2321 if (tq == NULL || vdev_uses_zvols(cvd)) {
2322 vdev_validate_child(cvd);
2323 } else {
2324 VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
2325 TQ_SLEEP) != TASKQID_INVALID);
2326 }
2327 }
2328 if (tq != NULL) {
2329 taskq_wait(tq);
2330 taskq_destroy(tq);
2331 }
2332 for (int c = 0; c < children; c++) {
2333 int error = vd->vdev_child[c]->vdev_validate_error;
2334
2335 if (error != 0)
2336 return (SET_ERROR(EBADF));
2337 }
2338
2339
2340 /*
2341 * If the device has already failed, or was marked offline, don't do
2342 * any further validation. Otherwise, label I/O will fail and we will
2343 * overwrite the previous state.
2344 */
2345 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
2346 return (0);
2347
2348 /*
2349 * If we are performing an extreme rewind, we allow for a label that
2350 * was modified at a point after the current txg.
2351 * If config lock is not held do not check for the txg. spa_sync could
2352 * be updating the vdev's label before updating spa_last_synced_txg.
2353 */
2354 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
2355 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
2356 txg = UINT64_MAX;
2357 else
2358 txg = spa_last_synced_txg(spa);
2359
2360 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
2361 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2362 VDEV_AUX_BAD_LABEL);
2363 vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
2364 "txg %llu", (u_longlong_t)txg);
2365 return (0);
2366 }
2367
2368 /*
2369 * Determine if this vdev has been split off into another
2370 * pool. If so, then refuse to open it.
2371 */
2372 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
2373 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
2374 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2375 VDEV_AUX_SPLIT_POOL);
2376 nvlist_free(label);
2377 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
2378 return (0);
2379 }
2380
2381 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
2382 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2383 VDEV_AUX_CORRUPT_DATA);
2384 nvlist_free(label);
2385 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2386 ZPOOL_CONFIG_POOL_GUID);
2387 return (0);
2388 }
2389
2390 /*
2391 * If config is not trusted then ignore the spa guid check. This is
2392 * necessary because if the machine crashed during a re-guid the new
2393 * guid might have been written to all of the vdev labels, but not the
2394 * cached config. The check will be performed again once we have the
2395 * trusted config from the MOS.
2396 */
2397 if (spa->spa_trust_config && guid != spa_guid(spa)) {
2398 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2399 VDEV_AUX_CORRUPT_DATA);
2400 nvlist_free(label);
2401 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
2402 "match config (%llu != %llu)", (u_longlong_t)guid,
2403 (u_longlong_t)spa_guid(spa));
2404 return (0);
2405 }
2406
2407 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
2408 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
2409 &aux_guid) != 0)
2410 aux_guid = 0;
2411
2412 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
2413 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2414 VDEV_AUX_CORRUPT_DATA);
2415 nvlist_free(label);
2416 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2417 ZPOOL_CONFIG_GUID);
2418 return (0);
2419 }
2420
2421 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
2422 != 0) {
2423 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2424 VDEV_AUX_CORRUPT_DATA);
2425 nvlist_free(label);
2426 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2427 ZPOOL_CONFIG_TOP_GUID);
2428 return (0);
2429 }
2430
2431 /*
2432 * If this vdev just became a top-level vdev because its sibling was
2433 * detached, it will have adopted the parent's vdev guid -- but the
2434 * label may or may not be on disk yet. Fortunately, either version
2435 * of the label will have the same top guid, so if we're a top-level
2436 * vdev, we can safely compare to that instead.
2437 * However, if the config comes from a cachefile that failed to update
2438 * after the detach, a top-level vdev will appear as a non top-level
2439 * vdev in the config. Also relax the constraints if we perform an
2440 * extreme rewind.
2441 *
2442 * If we split this vdev off instead, then we also check the
2443 * original pool's guid. We don't want to consider the vdev
2444 * corrupt if it is partway through a split operation.
2445 */
2446 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
2447 boolean_t mismatch = B_FALSE;
2448 if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
2449 if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
2450 mismatch = B_TRUE;
2451 } else {
2452 if (vd->vdev_guid != top_guid &&
2453 vd->vdev_top->vdev_guid != guid)
2454 mismatch = B_TRUE;
2455 }
2456
2457 if (mismatch) {
2458 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2459 VDEV_AUX_CORRUPT_DATA);
2460 nvlist_free(label);
2461 vdev_dbgmsg(vd, "vdev_validate: config guid "
2462 "doesn't match label guid");
2463 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
2464 (u_longlong_t)vd->vdev_guid,
2465 (u_longlong_t)vd->vdev_top->vdev_guid);
2466 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
2467 "aux_guid %llu", (u_longlong_t)guid,
2468 (u_longlong_t)top_guid, (u_longlong_t)aux_guid);
2469 return (0);
2470 }
2471 }
2472
2473 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
2474 &state) != 0) {
2475 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2476 VDEV_AUX_CORRUPT_DATA);
2477 nvlist_free(label);
2478 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2479 ZPOOL_CONFIG_POOL_STATE);
2480 return (0);
2481 }
2482
2483 nvlist_free(label);
2484
2485 /*
2486 * If this is a verbatim import, no need to check the
2487 * state of the pool.
2488 */
2489 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
2490 spa_load_state(spa) == SPA_LOAD_OPEN &&
2491 state != POOL_STATE_ACTIVE) {
2492 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
2493 "for spa %s", (u_longlong_t)state, spa->spa_name);
2494 return (SET_ERROR(EBADF));
2495 }
2496
2497 /*
2498 * If we were able to open and validate a vdev that was
2499 * previously marked permanently unavailable, clear that state
2500 * now.
2501 */
2502 if (vd->vdev_not_present)
2503 vd->vdev_not_present = 0;
2504
2505 return (0);
2506 }
2507
2508 static void
vdev_update_path(const char * prefix,char * svd,char ** dvd,uint64_t guid)2509 vdev_update_path(const char *prefix, char *svd, char **dvd, uint64_t guid)
2510 {
2511 if (svd != NULL && *dvd != NULL) {
2512 if (strcmp(svd, *dvd) != 0) {
2513 zfs_dbgmsg("vdev_copy_path: vdev %llu: %s changed "
2514 "from '%s' to '%s'", (u_longlong_t)guid, prefix,
2515 *dvd, svd);
2516 spa_strfree(*dvd);
2517 *dvd = spa_strdup(svd);
2518 }
2519 } else if (svd != NULL) {
2520 *dvd = spa_strdup(svd);
2521 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
2522 (u_longlong_t)guid, *dvd);
2523 }
2524 }
2525
2526 static void
vdev_copy_path_impl(vdev_t * svd,vdev_t * dvd)2527 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
2528 {
2529 char *old, *new;
2530
2531 vdev_update_path("vdev_path", svd->vdev_path, &dvd->vdev_path,
2532 dvd->vdev_guid);
2533
2534 vdev_update_path("vdev_devid", svd->vdev_devid, &dvd->vdev_devid,
2535 dvd->vdev_guid);
2536
2537 vdev_update_path("vdev_physpath", svd->vdev_physpath,
2538 &dvd->vdev_physpath, dvd->vdev_guid);
2539
2540 /*
2541 * Our enclosure sysfs path may have changed between imports
2542 */
2543 old = dvd->vdev_enc_sysfs_path;
2544 new = svd->vdev_enc_sysfs_path;
2545 if ((old != NULL && new == NULL) ||
2546 (old == NULL && new != NULL) ||
2547 ((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
2548 zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
2549 "changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
2550 old, new);
2551
2552 if (dvd->vdev_enc_sysfs_path)
2553 spa_strfree(dvd->vdev_enc_sysfs_path);
2554
2555 if (svd->vdev_enc_sysfs_path) {
2556 dvd->vdev_enc_sysfs_path = spa_strdup(
2557 svd->vdev_enc_sysfs_path);
2558 } else {
2559 dvd->vdev_enc_sysfs_path = NULL;
2560 }
2561 }
2562 }
2563
2564 /*
2565 * Recursively copy vdev paths from one vdev to another. Source and destination
2566 * vdev trees must have same geometry otherwise return error. Intended to copy
2567 * paths from userland config into MOS config.
2568 */
2569 int
vdev_copy_path_strict(vdev_t * svd,vdev_t * dvd)2570 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
2571 {
2572 if ((svd->vdev_ops == &vdev_missing_ops) ||
2573 (svd->vdev_ishole && dvd->vdev_ishole) ||
2574 (dvd->vdev_ops == &vdev_indirect_ops))
2575 return (0);
2576
2577 if (svd->vdev_ops != dvd->vdev_ops) {
2578 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
2579 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
2580 return (SET_ERROR(EINVAL));
2581 }
2582
2583 if (svd->vdev_guid != dvd->vdev_guid) {
2584 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
2585 "%llu)", (u_longlong_t)svd->vdev_guid,
2586 (u_longlong_t)dvd->vdev_guid);
2587 return (SET_ERROR(EINVAL));
2588 }
2589
2590 if (svd->vdev_children != dvd->vdev_children) {
2591 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
2592 "%llu != %llu", (u_longlong_t)svd->vdev_children,
2593 (u_longlong_t)dvd->vdev_children);
2594 return (SET_ERROR(EINVAL));
2595 }
2596
2597 for (uint64_t i = 0; i < svd->vdev_children; i++) {
2598 int error = vdev_copy_path_strict(svd->vdev_child[i],
2599 dvd->vdev_child[i]);
2600 if (error != 0)
2601 return (error);
2602 }
2603
2604 if (svd->vdev_ops->vdev_op_leaf)
2605 vdev_copy_path_impl(svd, dvd);
2606
2607 return (0);
2608 }
2609
2610 static void
vdev_copy_path_search(vdev_t * stvd,vdev_t * dvd)2611 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
2612 {
2613 ASSERT(stvd->vdev_top == stvd);
2614 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
2615
2616 for (uint64_t i = 0; i < dvd->vdev_children; i++) {
2617 vdev_copy_path_search(stvd, dvd->vdev_child[i]);
2618 }
2619
2620 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
2621 return;
2622
2623 /*
2624 * The idea here is that while a vdev can shift positions within
2625 * a top vdev (when replacing, attaching mirror, etc.) it cannot
2626 * step outside of it.
2627 */
2628 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
2629
2630 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
2631 return;
2632
2633 ASSERT(vd->vdev_ops->vdev_op_leaf);
2634
2635 vdev_copy_path_impl(vd, dvd);
2636 }
2637
2638 /*
2639 * Recursively copy vdev paths from one root vdev to another. Source and
2640 * destination vdev trees may differ in geometry. For each destination leaf
2641 * vdev, search a vdev with the same guid and top vdev id in the source.
2642 * Intended to copy paths from userland config into MOS config.
2643 */
2644 void
vdev_copy_path_relaxed(vdev_t * srvd,vdev_t * drvd)2645 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
2646 {
2647 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
2648 ASSERT(srvd->vdev_ops == &vdev_root_ops);
2649 ASSERT(drvd->vdev_ops == &vdev_root_ops);
2650
2651 for (uint64_t i = 0; i < children; i++) {
2652 vdev_copy_path_search(srvd->vdev_child[i],
2653 drvd->vdev_child[i]);
2654 }
2655 }
2656
2657 /*
2658 * Close a virtual device.
2659 */
2660 void
vdev_close(vdev_t * vd)2661 vdev_close(vdev_t *vd)
2662 {
2663 vdev_t *pvd = vd->vdev_parent;
2664 spa_t *spa __maybe_unused = vd->vdev_spa;
2665
2666 ASSERT(vd != NULL);
2667 ASSERT(vd->vdev_open_thread == curthread ||
2668 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2669
2670 /*
2671 * If our parent is reopening, then we are as well, unless we are
2672 * going offline.
2673 */
2674 if (pvd != NULL && pvd->vdev_reopening)
2675 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
2676
2677 vd->vdev_ops->vdev_op_close(vd);
2678
2679 /*
2680 * We record the previous state before we close it, so that if we are
2681 * doing a reopen(), we don't generate FMA ereports if we notice that
2682 * it's still faulted.
2683 */
2684 vd->vdev_prevstate = vd->vdev_state;
2685
2686 if (vd->vdev_offline)
2687 vd->vdev_state = VDEV_STATE_OFFLINE;
2688 else
2689 vd->vdev_state = VDEV_STATE_CLOSED;
2690 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2691 }
2692
2693 void
vdev_hold(vdev_t * vd)2694 vdev_hold(vdev_t *vd)
2695 {
2696 spa_t *spa = vd->vdev_spa;
2697
2698 ASSERT(spa_is_root(spa));
2699 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
2700 return;
2701
2702 for (int c = 0; c < vd->vdev_children; c++)
2703 vdev_hold(vd->vdev_child[c]);
2704
2705 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
2706 vd->vdev_ops->vdev_op_hold(vd);
2707 }
2708
2709 void
vdev_rele(vdev_t * vd)2710 vdev_rele(vdev_t *vd)
2711 {
2712 ASSERT(spa_is_root(vd->vdev_spa));
2713 for (int c = 0; c < vd->vdev_children; c++)
2714 vdev_rele(vd->vdev_child[c]);
2715
2716 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
2717 vd->vdev_ops->vdev_op_rele(vd);
2718 }
2719
2720 /*
2721 * Reopen all interior vdevs and any unopened leaves. We don't actually
2722 * reopen leaf vdevs which had previously been opened as they might deadlock
2723 * on the spa_config_lock. Instead we only obtain the leaf's physical size.
2724 * If the leaf has never been opened then open it, as usual.
2725 */
2726 void
vdev_reopen(vdev_t * vd)2727 vdev_reopen(vdev_t *vd)
2728 {
2729 spa_t *spa = vd->vdev_spa;
2730
2731 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2732
2733 /* set the reopening flag unless we're taking the vdev offline */
2734 vd->vdev_reopening = !vd->vdev_offline;
2735 vdev_close(vd);
2736 (void) vdev_open(vd);
2737
2738 /*
2739 * Call vdev_validate() here to make sure we have the same device.
2740 * Otherwise, a device with an invalid label could be successfully
2741 * opened in response to vdev_reopen().
2742 */
2743 if (vd->vdev_aux) {
2744 (void) vdev_validate_aux(vd);
2745 if (vdev_readable(vd) && vdev_writeable(vd) &&
2746 vd->vdev_aux == &spa->spa_l2cache) {
2747 /*
2748 * In case the vdev is present we should evict all ARC
2749 * buffers and pointers to log blocks and reclaim their
2750 * space before restoring its contents to L2ARC.
2751 */
2752 if (l2arc_vdev_present(vd)) {
2753 l2arc_rebuild_vdev(vd, B_TRUE);
2754 } else {
2755 l2arc_add_vdev(spa, vd);
2756 }
2757 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
2758 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
2759 }
2760 } else {
2761 (void) vdev_validate(vd);
2762 }
2763
2764 /*
2765 * Recheck if resilver is still needed and cancel any
2766 * scheduled resilver if resilver is unneeded.
2767 */
2768 if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) &&
2769 spa->spa_async_tasks & SPA_ASYNC_RESILVER) {
2770 mutex_enter(&spa->spa_async_lock);
2771 spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER;
2772 mutex_exit(&spa->spa_async_lock);
2773 }
2774
2775 /*
2776 * Reassess parent vdev's health.
2777 */
2778 vdev_propagate_state(vd);
2779 }
2780
2781 int
vdev_create(vdev_t * vd,uint64_t txg,boolean_t isreplacing)2782 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
2783 {
2784 int error;
2785
2786 /*
2787 * Normally, partial opens (e.g. of a mirror) are allowed.
2788 * For a create, however, we want to fail the request if
2789 * there are any components we can't open.
2790 */
2791 error = vdev_open(vd);
2792
2793 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
2794 vdev_close(vd);
2795 return (error ? error : SET_ERROR(ENXIO));
2796 }
2797
2798 /*
2799 * Recursively load DTLs and initialize all labels.
2800 */
2801 if ((error = vdev_dtl_load(vd)) != 0 ||
2802 (error = vdev_label_init(vd, txg, isreplacing ?
2803 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
2804 vdev_close(vd);
2805 return (error);
2806 }
2807
2808 return (0);
2809 }
2810
2811 void
vdev_metaslab_set_size(vdev_t * vd)2812 vdev_metaslab_set_size(vdev_t *vd)
2813 {
2814 uint64_t asize = vd->vdev_asize;
2815 uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
2816 uint64_t ms_shift;
2817
2818 /*
2819 * There are two dimensions to the metaslab sizing calculation:
2820 * the size of the metaslab and the count of metaslabs per vdev.
2821 *
2822 * The default values used below are a good balance between memory
2823 * usage (larger metaslab size means more memory needed for loaded
2824 * metaslabs; more metaslabs means more memory needed for the
2825 * metaslab_t structs), metaslab load time (larger metaslabs take
2826 * longer to load), and metaslab sync time (more metaslabs means
2827 * more time spent syncing all of them).
2828 *
2829 * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
2830 * The range of the dimensions are as follows:
2831 *
2832 * 2^29 <= ms_size <= 2^34
2833 * 16 <= ms_count <= 131,072
2834 *
2835 * On the lower end of vdev sizes, we aim for metaslabs sizes of
2836 * at least 512MB (2^29) to minimize fragmentation effects when
2837 * testing with smaller devices. However, the count constraint
2838 * of at least 16 metaslabs will override this minimum size goal.
2839 *
2840 * On the upper end of vdev sizes, we aim for a maximum metaslab
2841 * size of 16GB. However, we will cap the total count to 2^17
2842 * metaslabs to keep our memory footprint in check and let the
2843 * metaslab size grow from there if that limit is hit.
2844 *
2845 * The net effect of applying above constrains is summarized below.
2846 *
2847 * vdev size metaslab count
2848 * --------------|-----------------
2849 * < 8GB ~16
2850 * 8GB - 100GB one per 512MB
2851 * 100GB - 3TB ~200
2852 * 3TB - 2PB one per 16GB
2853 * > 2PB ~131,072
2854 * --------------------------------
2855 *
2856 * Finally, note that all of the above calculate the initial
2857 * number of metaslabs. Expanding a top-level vdev will result
2858 * in additional metaslabs being allocated making it possible
2859 * to exceed the zfs_vdev_ms_count_limit.
2860 */
2861
2862 if (ms_count < zfs_vdev_min_ms_count)
2863 ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
2864 else if (ms_count > zfs_vdev_default_ms_count)
2865 ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
2866 else
2867 ms_shift = zfs_vdev_default_ms_shift;
2868
2869 if (ms_shift < SPA_MAXBLOCKSHIFT) {
2870 ms_shift = SPA_MAXBLOCKSHIFT;
2871 } else if (ms_shift > zfs_vdev_max_ms_shift) {
2872 ms_shift = zfs_vdev_max_ms_shift;
2873 /* cap the total count to constrain memory footprint */
2874 if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
2875 ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
2876 }
2877
2878 vd->vdev_ms_shift = ms_shift;
2879 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
2880 }
2881
2882 void
vdev_dirty(vdev_t * vd,int flags,void * arg,uint64_t txg)2883 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
2884 {
2885 ASSERT(vd == vd->vdev_top);
2886 /* indirect vdevs don't have metaslabs or dtls */
2887 ASSERT(vdev_is_concrete(vd) || flags == 0);
2888 ASSERT(ISP2(flags));
2889 ASSERT(spa_writeable(vd->vdev_spa));
2890
2891 if (flags & VDD_METASLAB)
2892 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
2893
2894 if (flags & VDD_DTL)
2895 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
2896
2897 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
2898 }
2899
2900 void
vdev_dirty_leaves(vdev_t * vd,int flags,uint64_t txg)2901 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
2902 {
2903 for (int c = 0; c < vd->vdev_children; c++)
2904 vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
2905
2906 if (vd->vdev_ops->vdev_op_leaf)
2907 vdev_dirty(vd->vdev_top, flags, vd, txg);
2908 }
2909
2910 /*
2911 * DTLs.
2912 *
2913 * A vdev's DTL (dirty time log) is the set of transaction groups for which
2914 * the vdev has less than perfect replication. There are four kinds of DTL:
2915 *
2916 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
2917 *
2918 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
2919 *
2920 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
2921 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
2922 * txgs that was scrubbed.
2923 *
2924 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
2925 * persistent errors or just some device being offline.
2926 * Unlike the other three, the DTL_OUTAGE map is not generally
2927 * maintained; it's only computed when needed, typically to
2928 * determine whether a device can be detached.
2929 *
2930 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
2931 * either has the data or it doesn't.
2932 *
2933 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
2934 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
2935 * if any child is less than fully replicated, then so is its parent.
2936 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
2937 * comprising only those txgs which appear in 'maxfaults' or more children;
2938 * those are the txgs we don't have enough replication to read. For example,
2939 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
2940 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
2941 * two child DTL_MISSING maps.
2942 *
2943 * It should be clear from the above that to compute the DTLs and outage maps
2944 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
2945 * Therefore, that is all we keep on disk. When loading the pool, or after
2946 * a configuration change, we generate all other DTLs from first principles.
2947 */
2948 void
vdev_dtl_dirty(vdev_t * vd,vdev_dtl_type_t t,uint64_t txg,uint64_t size)2949 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2950 {
2951 range_tree_t *rt = vd->vdev_dtl[t];
2952
2953 ASSERT(t < DTL_TYPES);
2954 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2955 ASSERT(spa_writeable(vd->vdev_spa));
2956
2957 mutex_enter(&vd->vdev_dtl_lock);
2958 if (!range_tree_contains(rt, txg, size))
2959 range_tree_add(rt, txg, size);
2960 mutex_exit(&vd->vdev_dtl_lock);
2961 }
2962
2963 boolean_t
vdev_dtl_contains(vdev_t * vd,vdev_dtl_type_t t,uint64_t txg,uint64_t size)2964 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2965 {
2966 range_tree_t *rt = vd->vdev_dtl[t];
2967 boolean_t dirty = B_FALSE;
2968
2969 ASSERT(t < DTL_TYPES);
2970 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2971
2972 /*
2973 * While we are loading the pool, the DTLs have not been loaded yet.
2974 * This isn't a problem but it can result in devices being tried
2975 * which are known to not have the data. In which case, the import
2976 * is relying on the checksum to ensure that we get the right data.
2977 * Note that while importing we are only reading the MOS, which is
2978 * always checksummed.
2979 */
2980 mutex_enter(&vd->vdev_dtl_lock);
2981 if (!range_tree_is_empty(rt))
2982 dirty = range_tree_contains(rt, txg, size);
2983 mutex_exit(&vd->vdev_dtl_lock);
2984
2985 return (dirty);
2986 }
2987
2988 boolean_t
vdev_dtl_empty(vdev_t * vd,vdev_dtl_type_t t)2989 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
2990 {
2991 range_tree_t *rt = vd->vdev_dtl[t];
2992 boolean_t empty;
2993
2994 mutex_enter(&vd->vdev_dtl_lock);
2995 empty = range_tree_is_empty(rt);
2996 mutex_exit(&vd->vdev_dtl_lock);
2997
2998 return (empty);
2999 }
3000
3001 /*
3002 * Check if the txg falls within the range which must be
3003 * resilvered. DVAs outside this range can always be skipped.
3004 */
3005 boolean_t
vdev_default_need_resilver(vdev_t * vd,const dva_t * dva,size_t psize,uint64_t phys_birth)3006 vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
3007 uint64_t phys_birth)
3008 {
3009 (void) dva, (void) psize;
3010
3011 /* Set by sequential resilver. */
3012 if (phys_birth == TXG_UNKNOWN)
3013 return (B_TRUE);
3014
3015 return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
3016 }
3017
3018 /*
3019 * Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
3020 */
3021 boolean_t
vdev_dtl_need_resilver(vdev_t * vd,const dva_t * dva,size_t psize,uint64_t phys_birth)3022 vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
3023 uint64_t phys_birth)
3024 {
3025 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
3026
3027 if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
3028 vd->vdev_ops->vdev_op_leaf)
3029 return (B_TRUE);
3030
3031 return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
3032 phys_birth));
3033 }
3034
3035 /*
3036 * Returns the lowest txg in the DTL range.
3037 */
3038 static uint64_t
vdev_dtl_min(vdev_t * vd)3039 vdev_dtl_min(vdev_t *vd)
3040 {
3041 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
3042 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
3043 ASSERT0(vd->vdev_children);
3044
3045 return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
3046 }
3047
3048 /*
3049 * Returns the highest txg in the DTL.
3050 */
3051 static uint64_t
vdev_dtl_max(vdev_t * vd)3052 vdev_dtl_max(vdev_t *vd)
3053 {
3054 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
3055 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
3056 ASSERT0(vd->vdev_children);
3057
3058 return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
3059 }
3060
3061 /*
3062 * Determine if a resilvering vdev should remove any DTL entries from
3063 * its range. If the vdev was resilvering for the entire duration of the
3064 * scan then it should excise that range from its DTLs. Otherwise, this
3065 * vdev is considered partially resilvered and should leave its DTL
3066 * entries intact. The comment in vdev_dtl_reassess() describes how we
3067 * excise the DTLs.
3068 */
3069 static boolean_t
vdev_dtl_should_excise(vdev_t * vd,boolean_t rebuild_done)3070 vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
3071 {
3072 ASSERT0(vd->vdev_children);
3073
3074 if (vd->vdev_state < VDEV_STATE_DEGRADED)
3075 return (B_FALSE);
3076
3077 if (vd->vdev_resilver_deferred)
3078 return (B_FALSE);
3079
3080 if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
3081 return (B_TRUE);
3082
3083 if (rebuild_done) {
3084 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
3085 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
3086
3087 /* Rebuild not initiated by attach */
3088 if (vd->vdev_rebuild_txg == 0)
3089 return (B_TRUE);
3090
3091 /*
3092 * When a rebuild completes without error then all missing data
3093 * up to the rebuild max txg has been reconstructed and the DTL
3094 * is eligible for excision.
3095 */
3096 if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
3097 vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
3098 ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
3099 ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
3100 ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
3101 return (B_TRUE);
3102 }
3103 } else {
3104 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
3105 dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
3106
3107 /* Resilver not initiated by attach */
3108 if (vd->vdev_resilver_txg == 0)
3109 return (B_TRUE);
3110
3111 /*
3112 * When a resilver is initiated the scan will assign the
3113 * scn_max_txg value to the highest txg value that exists
3114 * in all DTLs. If this device's max DTL is not part of this
3115 * scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
3116 * then it is not eligible for excision.
3117 */
3118 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
3119 ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
3120 ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
3121 ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
3122 return (B_TRUE);
3123 }
3124 }
3125
3126 return (B_FALSE);
3127 }
3128
3129 /*
3130 * Reassess DTLs after a config change or scrub completion. If txg == 0 no
3131 * write operations will be issued to the pool.
3132 */
3133 void
vdev_dtl_reassess(vdev_t * vd,uint64_t txg,uint64_t scrub_txg,boolean_t scrub_done,boolean_t rebuild_done)3134 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
3135 boolean_t scrub_done, boolean_t rebuild_done)
3136 {
3137 spa_t *spa = vd->vdev_spa;
3138 avl_tree_t reftree;
3139 int minref;
3140
3141 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
3142
3143 for (int c = 0; c < vd->vdev_children; c++)
3144 vdev_dtl_reassess(vd->vdev_child[c], txg,
3145 scrub_txg, scrub_done, rebuild_done);
3146
3147 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
3148 return;
3149
3150 if (vd->vdev_ops->vdev_op_leaf) {
3151 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
3152 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
3153 boolean_t check_excise = B_FALSE;
3154 boolean_t wasempty = B_TRUE;
3155
3156 mutex_enter(&vd->vdev_dtl_lock);
3157
3158 /*
3159 * If requested, pretend the scan or rebuild completed cleanly.
3160 */
3161 if (zfs_scan_ignore_errors) {
3162 if (scn != NULL)
3163 scn->scn_phys.scn_errors = 0;
3164 if (vr != NULL)
3165 vr->vr_rebuild_phys.vrp_errors = 0;
3166 }
3167
3168 if (scrub_txg != 0 &&
3169 !range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
3170 wasempty = B_FALSE;
3171 zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
3172 "dtl:%llu/%llu errors:%llu",
3173 (u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
3174 (u_longlong_t)scrub_txg, spa->spa_scrub_started,
3175 (u_longlong_t)vdev_dtl_min(vd),
3176 (u_longlong_t)vdev_dtl_max(vd),
3177 (u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
3178 }
3179
3180 /*
3181 * If we've completed a scrub/resilver or a rebuild cleanly
3182 * then determine if this vdev should remove any DTLs. We
3183 * only want to excise regions on vdevs that were available
3184 * during the entire duration of this scan.
3185 */
3186 if (rebuild_done &&
3187 vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
3188 check_excise = B_TRUE;
3189 } else {
3190 if (spa->spa_scrub_started ||
3191 (scn != NULL && scn->scn_phys.scn_errors == 0)) {
3192 check_excise = B_TRUE;
3193 }
3194 }
3195
3196 if (scrub_txg && check_excise &&
3197 vdev_dtl_should_excise(vd, rebuild_done)) {
3198 /*
3199 * We completed a scrub, resilver or rebuild up to
3200 * scrub_txg. If we did it without rebooting, then
3201 * the scrub dtl will be valid, so excise the old
3202 * region and fold in the scrub dtl. Otherwise,
3203 * leave the dtl as-is if there was an error.
3204 *
3205 * There's little trick here: to excise the beginning
3206 * of the DTL_MISSING map, we put it into a reference
3207 * tree and then add a segment with refcnt -1 that
3208 * covers the range [0, scrub_txg). This means
3209 * that each txg in that range has refcnt -1 or 0.
3210 * We then add DTL_SCRUB with a refcnt of 2, so that
3211 * entries in the range [0, scrub_txg) will have a
3212 * positive refcnt -- either 1 or 2. We then convert
3213 * the reference tree into the new DTL_MISSING map.
3214 */
3215 space_reftree_create(&reftree);
3216 space_reftree_add_map(&reftree,
3217 vd->vdev_dtl[DTL_MISSING], 1);
3218 space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
3219 space_reftree_add_map(&reftree,
3220 vd->vdev_dtl[DTL_SCRUB], 2);
3221 space_reftree_generate_map(&reftree,
3222 vd->vdev_dtl[DTL_MISSING], 1);
3223 space_reftree_destroy(&reftree);
3224
3225 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
3226 zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
3227 (u_longlong_t)vdev_dtl_min(vd),
3228 (u_longlong_t)vdev_dtl_max(vd));
3229 } else if (!wasempty) {
3230 zfs_dbgmsg("DTL_MISSING is now empty");
3231 }
3232 }
3233 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
3234 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
3235 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
3236 if (scrub_done)
3237 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
3238 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
3239 if (!vdev_readable(vd))
3240 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
3241 else
3242 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
3243 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
3244
3245 /*
3246 * If the vdev was resilvering or rebuilding and no longer
3247 * has any DTLs then reset the appropriate flag and dirty
3248 * the top level so that we persist the change.
3249 */
3250 if (txg != 0 &&
3251 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
3252 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
3253 if (vd->vdev_rebuild_txg != 0) {
3254 vd->vdev_rebuild_txg = 0;
3255 vdev_config_dirty(vd->vdev_top);
3256 } else if (vd->vdev_resilver_txg != 0) {
3257 vd->vdev_resilver_txg = 0;
3258 vdev_config_dirty(vd->vdev_top);
3259 }
3260 }
3261
3262 mutex_exit(&vd->vdev_dtl_lock);
3263
3264 if (txg != 0)
3265 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
3266 } else {
3267 mutex_enter(&vd->vdev_dtl_lock);
3268 for (int t = 0; t < DTL_TYPES; t++) {
3269 /* account for child's outage in parent's missing map */
3270 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
3271 if (t == DTL_SCRUB) {
3272 /* leaf vdevs only */
3273 continue;
3274 }
3275 if (t == DTL_PARTIAL) {
3276 /* i.e. non-zero */
3277 minref = 1;
3278 } else if (vdev_get_nparity(vd) != 0) {
3279 /* RAIDZ, DRAID */
3280 minref = vdev_get_nparity(vd) + 1;
3281 } else {
3282 /* any kind of mirror */
3283 minref = vd->vdev_children;
3284 }
3285 space_reftree_create(&reftree);
3286 for (int c = 0; c < vd->vdev_children; c++) {
3287 vdev_t *cvd = vd->vdev_child[c];
3288 mutex_enter(&cvd->vdev_dtl_lock);
3289 space_reftree_add_map(&reftree,
3290 cvd->vdev_dtl[s], 1);
3291 mutex_exit(&cvd->vdev_dtl_lock);
3292 }
3293 space_reftree_generate_map(&reftree,
3294 vd->vdev_dtl[t], minref);
3295 space_reftree_destroy(&reftree);
3296 }
3297 mutex_exit(&vd->vdev_dtl_lock);
3298 }
3299
3300 if (vd->vdev_top->vdev_ops == &vdev_raidz_ops) {
3301 raidz_dtl_reassessed(vd);
3302 }
3303 }
3304
3305 /*
3306 * Iterate over all the vdevs except spare, and post kobj events
3307 */
3308 void
vdev_post_kobj_evt(vdev_t * vd)3309 vdev_post_kobj_evt(vdev_t *vd)
3310 {
3311 if (vd->vdev_ops->vdev_op_kobj_evt_post &&
3312 vd->vdev_kobj_flag == B_FALSE) {
3313 vd->vdev_kobj_flag = B_TRUE;
3314 vd->vdev_ops->vdev_op_kobj_evt_post(vd);
3315 }
3316
3317 for (int c = 0; c < vd->vdev_children; c++)
3318 vdev_post_kobj_evt(vd->vdev_child[c]);
3319 }
3320
3321 /*
3322 * Iterate over all the vdevs except spare, and clear kobj events
3323 */
3324 void
vdev_clear_kobj_evt(vdev_t * vd)3325 vdev_clear_kobj_evt(vdev_t *vd)
3326 {
3327 vd->vdev_kobj_flag = B_FALSE;
3328
3329 for (int c = 0; c < vd->vdev_children; c++)
3330 vdev_clear_kobj_evt(vd->vdev_child[c]);
3331 }
3332
3333 int
vdev_dtl_load(vdev_t * vd)3334 vdev_dtl_load(vdev_t *vd)
3335 {
3336 spa_t *spa = vd->vdev_spa;
3337 objset_t *mos = spa->spa_meta_objset;
3338 range_tree_t *rt;
3339 int error = 0;
3340
3341 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
3342 ASSERT(vdev_is_concrete(vd));
3343
3344 /*
3345 * If the dtl cannot be sync'd there is no need to open it.
3346 */
3347 if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
3348 return (0);
3349
3350 error = space_map_open(&vd->vdev_dtl_sm, mos,
3351 vd->vdev_dtl_object, 0, -1ULL, 0);
3352 if (error)
3353 return (error);
3354 ASSERT(vd->vdev_dtl_sm != NULL);
3355
3356 rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
3357 error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
3358 if (error == 0) {
3359 mutex_enter(&vd->vdev_dtl_lock);
3360 range_tree_walk(rt, range_tree_add,
3361 vd->vdev_dtl[DTL_MISSING]);
3362 mutex_exit(&vd->vdev_dtl_lock);
3363 }
3364
3365 range_tree_vacate(rt, NULL, NULL);
3366 range_tree_destroy(rt);
3367
3368 return (error);
3369 }
3370
3371 for (int c = 0; c < vd->vdev_children; c++) {
3372 error = vdev_dtl_load(vd->vdev_child[c]);
3373 if (error != 0)
3374 break;
3375 }
3376
3377 return (error);
3378 }
3379
3380 static void
vdev_zap_allocation_data(vdev_t * vd,dmu_tx_t * tx)3381 vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
3382 {
3383 spa_t *spa = vd->vdev_spa;
3384 objset_t *mos = spa->spa_meta_objset;
3385 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
3386 const char *string;
3387
3388 ASSERT(alloc_bias != VDEV_BIAS_NONE);
3389
3390 string =
3391 (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
3392 (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
3393 (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
3394
3395 ASSERT(string != NULL);
3396 VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
3397 1, strlen(string) + 1, string, tx));
3398
3399 if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
3400 spa_activate_allocation_classes(spa, tx);
3401 }
3402 }
3403
3404 void
vdev_destroy_unlink_zap(vdev_t * vd,uint64_t zapobj,dmu_tx_t * tx)3405 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
3406 {
3407 spa_t *spa = vd->vdev_spa;
3408
3409 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
3410 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3411 zapobj, tx));
3412 }
3413
3414 uint64_t
vdev_create_link_zap(vdev_t * vd,dmu_tx_t * tx)3415 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
3416 {
3417 spa_t *spa = vd->vdev_spa;
3418 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
3419 DMU_OT_NONE, 0, tx);
3420
3421 ASSERT(zap != 0);
3422 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3423 zap, tx));
3424
3425 return (zap);
3426 }
3427
3428 void
vdev_construct_zaps(vdev_t * vd,dmu_tx_t * tx)3429 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
3430 {
3431 if (vd->vdev_ops != &vdev_hole_ops &&
3432 vd->vdev_ops != &vdev_missing_ops &&
3433 vd->vdev_ops != &vdev_root_ops &&
3434 !vd->vdev_top->vdev_removing) {
3435 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
3436 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
3437 }
3438 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
3439 vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
3440 if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
3441 vdev_zap_allocation_data(vd, tx);
3442 }
3443 }
3444 if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 &&
3445 spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
3446 if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2))
3447 spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx);
3448 vd->vdev_root_zap = vdev_create_link_zap(vd, tx);
3449 }
3450
3451 for (uint64_t i = 0; i < vd->vdev_children; i++) {
3452 vdev_construct_zaps(vd->vdev_child[i], tx);
3453 }
3454 }
3455
3456 static void
vdev_dtl_sync(vdev_t * vd,uint64_t txg)3457 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
3458 {
3459 spa_t *spa = vd->vdev_spa;
3460 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
3461 objset_t *mos = spa->spa_meta_objset;
3462 range_tree_t *rtsync;
3463 dmu_tx_t *tx;
3464 uint64_t object = space_map_object(vd->vdev_dtl_sm);
3465
3466 ASSERT(vdev_is_concrete(vd));
3467 ASSERT(vd->vdev_ops->vdev_op_leaf);
3468
3469 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3470
3471 if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
3472 mutex_enter(&vd->vdev_dtl_lock);
3473 space_map_free(vd->vdev_dtl_sm, tx);
3474 space_map_close(vd->vdev_dtl_sm);
3475 vd->vdev_dtl_sm = NULL;
3476 mutex_exit(&vd->vdev_dtl_lock);
3477
3478 /*
3479 * We only destroy the leaf ZAP for detached leaves or for
3480 * removed log devices. Removed data devices handle leaf ZAP
3481 * cleanup later, once cancellation is no longer possible.
3482 */
3483 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
3484 vd->vdev_top->vdev_islog)) {
3485 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
3486 vd->vdev_leaf_zap = 0;
3487 }
3488
3489 dmu_tx_commit(tx);
3490 return;
3491 }
3492
3493 if (vd->vdev_dtl_sm == NULL) {
3494 uint64_t new_object;
3495
3496 new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
3497 VERIFY3U(new_object, !=, 0);
3498
3499 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
3500 0, -1ULL, 0));
3501 ASSERT(vd->vdev_dtl_sm != NULL);
3502 }
3503
3504 rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
3505
3506 mutex_enter(&vd->vdev_dtl_lock);
3507 range_tree_walk(rt, range_tree_add, rtsync);
3508 mutex_exit(&vd->vdev_dtl_lock);
3509
3510 space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
3511 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
3512 range_tree_vacate(rtsync, NULL, NULL);
3513
3514 range_tree_destroy(rtsync);
3515
3516 /*
3517 * If the object for the space map has changed then dirty
3518 * the top level so that we update the config.
3519 */
3520 if (object != space_map_object(vd->vdev_dtl_sm)) {
3521 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
3522 "new object %llu", (u_longlong_t)txg, spa_name(spa),
3523 (u_longlong_t)object,
3524 (u_longlong_t)space_map_object(vd->vdev_dtl_sm));
3525 vdev_config_dirty(vd->vdev_top);
3526 }
3527
3528 dmu_tx_commit(tx);
3529 }
3530
3531 /*
3532 * Determine whether the specified vdev can be offlined/detached/removed
3533 * without losing data.
3534 */
3535 boolean_t
vdev_dtl_required(vdev_t * vd)3536 vdev_dtl_required(vdev_t *vd)
3537 {
3538 spa_t *spa = vd->vdev_spa;
3539 vdev_t *tvd = vd->vdev_top;
3540 uint8_t cant_read = vd->vdev_cant_read;
3541 boolean_t required;
3542
3543 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3544
3545 if (vd == spa->spa_root_vdev || vd == tvd)
3546 return (B_TRUE);
3547
3548 /*
3549 * Temporarily mark the device as unreadable, and then determine
3550 * whether this results in any DTL outages in the top-level vdev.
3551 * If not, we can safely offline/detach/remove the device.
3552 */
3553 vd->vdev_cant_read = B_TRUE;
3554 vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
3555 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
3556 vd->vdev_cant_read = cant_read;
3557 vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
3558
3559 if (!required && zio_injection_enabled) {
3560 required = !!zio_handle_device_injection(vd, NULL,
3561 SET_ERROR(ECHILD));
3562 }
3563
3564 return (required);
3565 }
3566
3567 /*
3568 * Determine if resilver is needed, and if so the txg range.
3569 */
3570 boolean_t
vdev_resilver_needed(vdev_t * vd,uint64_t * minp,uint64_t * maxp)3571 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
3572 {
3573 boolean_t needed = B_FALSE;
3574 uint64_t thismin = UINT64_MAX;
3575 uint64_t thismax = 0;
3576
3577 if (vd->vdev_children == 0) {
3578 mutex_enter(&vd->vdev_dtl_lock);
3579 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
3580 vdev_writeable(vd)) {
3581
3582 thismin = vdev_dtl_min(vd);
3583 thismax = vdev_dtl_max(vd);
3584 needed = B_TRUE;
3585 }
3586 mutex_exit(&vd->vdev_dtl_lock);
3587 } else {
3588 for (int c = 0; c < vd->vdev_children; c++) {
3589 vdev_t *cvd = vd->vdev_child[c];
3590 uint64_t cmin, cmax;
3591
3592 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
3593 thismin = MIN(thismin, cmin);
3594 thismax = MAX(thismax, cmax);
3595 needed = B_TRUE;
3596 }
3597 }
3598 }
3599
3600 if (needed && minp) {
3601 *minp = thismin;
3602 *maxp = thismax;
3603 }
3604 return (needed);
3605 }
3606
3607 /*
3608 * Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
3609 * will contain either the checkpoint spacemap object or zero if none exists.
3610 * All other errors are returned to the caller.
3611 */
3612 int
vdev_checkpoint_sm_object(vdev_t * vd,uint64_t * sm_obj)3613 vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
3614 {
3615 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
3616
3617 if (vd->vdev_top_zap == 0) {
3618 *sm_obj = 0;
3619 return (0);
3620 }
3621
3622 int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
3623 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
3624 if (error == ENOENT) {
3625 *sm_obj = 0;
3626 error = 0;
3627 }
3628
3629 return (error);
3630 }
3631
3632 int
vdev_load(vdev_t * vd)3633 vdev_load(vdev_t *vd)
3634 {
3635 int children = vd->vdev_children;
3636 int error = 0;
3637 taskq_t *tq = NULL;
3638
3639 /*
3640 * It's only worthwhile to use the taskq for the root vdev, because the
3641 * slow part is metaslab_init, and that only happens for top-level
3642 * vdevs.
3643 */
3644 if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
3645 tq = taskq_create("vdev_load", children, minclsyspri,
3646 children, children, TASKQ_PREPOPULATE);
3647 }
3648
3649 /*
3650 * Recursively load all children.
3651 */
3652 for (int c = 0; c < vd->vdev_children; c++) {
3653 vdev_t *cvd = vd->vdev_child[c];
3654
3655 if (tq == NULL || vdev_uses_zvols(cvd)) {
3656 cvd->vdev_load_error = vdev_load(cvd);
3657 } else {
3658 VERIFY(taskq_dispatch(tq, vdev_load_child,
3659 cvd, TQ_SLEEP) != TASKQID_INVALID);
3660 }
3661 }
3662
3663 if (tq != NULL) {
3664 taskq_wait(tq);
3665 taskq_destroy(tq);
3666 }
3667
3668 for (int c = 0; c < vd->vdev_children; c++) {
3669 int error = vd->vdev_child[c]->vdev_load_error;
3670
3671 if (error != 0)
3672 return (error);
3673 }
3674
3675 vdev_set_deflate_ratio(vd);
3676
3677 if (vd->vdev_ops == &vdev_raidz_ops) {
3678 error = vdev_raidz_load(vd);
3679 if (error != 0)
3680 return (error);
3681 }
3682
3683 /*
3684 * On spa_load path, grab the allocation bias from our zap
3685 */
3686 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3687 spa_t *spa = vd->vdev_spa;
3688 char bias_str[64];
3689
3690 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
3691 VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
3692 bias_str);
3693 if (error == 0) {
3694 ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
3695 vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
3696 } else if (error != ENOENT) {
3697 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3698 VDEV_AUX_CORRUPT_DATA);
3699 vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
3700 "failed [error=%d]",
3701 (u_longlong_t)vd->vdev_top_zap, error);
3702 return (error);
3703 }
3704 }
3705
3706 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3707 spa_t *spa = vd->vdev_spa;
3708 uint64_t failfast;
3709
3710 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
3711 vdev_prop_to_name(VDEV_PROP_FAILFAST), sizeof (failfast),
3712 1, &failfast);
3713 if (error == 0) {
3714 vd->vdev_failfast = failfast & 1;
3715 } else if (error == ENOENT) {
3716 vd->vdev_failfast = vdev_prop_default_numeric(
3717 VDEV_PROP_FAILFAST);
3718 } else {
3719 vdev_dbgmsg(vd,
3720 "vdev_load: zap_lookup(top_zap=%llu) "
3721 "failed [error=%d]",
3722 (u_longlong_t)vd->vdev_top_zap, error);
3723 }
3724 }
3725
3726 /*
3727 * Load any rebuild state from the top-level vdev zap.
3728 */
3729 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3730 error = vdev_rebuild_load(vd);
3731 if (error && error != ENOTSUP) {
3732 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3733 VDEV_AUX_CORRUPT_DATA);
3734 vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
3735 "failed [error=%d]", error);
3736 return (error);
3737 }
3738 }
3739
3740 if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) {
3741 uint64_t zapobj;
3742
3743 if (vd->vdev_top_zap != 0)
3744 zapobj = vd->vdev_top_zap;
3745 else
3746 zapobj = vd->vdev_leaf_zap;
3747
3748 error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_N,
3749 &vd->vdev_checksum_n);
3750 if (error && error != ENOENT)
3751 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3752 "failed [error=%d]", (u_longlong_t)zapobj, error);
3753
3754 error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_T,
3755 &vd->vdev_checksum_t);
3756 if (error && error != ENOENT)
3757 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3758 "failed [error=%d]", (u_longlong_t)zapobj, error);
3759
3760 error = vdev_prop_get_int(vd, VDEV_PROP_IO_N,
3761 &vd->vdev_io_n);
3762 if (error && error != ENOENT)
3763 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3764 "failed [error=%d]", (u_longlong_t)zapobj, error);
3765
3766 error = vdev_prop_get_int(vd, VDEV_PROP_IO_T,
3767 &vd->vdev_io_t);
3768 if (error && error != ENOENT)
3769 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3770 "failed [error=%d]", (u_longlong_t)zapobj, error);
3771
3772 error = vdev_prop_get_int(vd, VDEV_PROP_SLOW_IO_N,
3773 &vd->vdev_slow_io_n);
3774 if (error && error != ENOENT)
3775 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3776 "failed [error=%d]", (u_longlong_t)zapobj, error);
3777
3778 error = vdev_prop_get_int(vd, VDEV_PROP_SLOW_IO_T,
3779 &vd->vdev_slow_io_t);
3780 if (error && error != ENOENT)
3781 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3782 "failed [error=%d]", (u_longlong_t)zapobj, error);
3783 }
3784
3785 /*
3786 * If this is a top-level vdev, initialize its metaslabs.
3787 */
3788 if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
3789 vdev_metaslab_group_create(vd);
3790
3791 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
3792 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3793 VDEV_AUX_CORRUPT_DATA);
3794 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
3795 "asize=%llu", (u_longlong_t)vd->vdev_ashift,
3796 (u_longlong_t)vd->vdev_asize);
3797 return (SET_ERROR(ENXIO));
3798 }
3799
3800 error = vdev_metaslab_init(vd, 0);
3801 if (error != 0) {
3802 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
3803 "[error=%d]", error);
3804 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3805 VDEV_AUX_CORRUPT_DATA);
3806 return (error);
3807 }
3808
3809 uint64_t checkpoint_sm_obj;
3810 error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
3811 if (error == 0 && checkpoint_sm_obj != 0) {
3812 objset_t *mos = spa_meta_objset(vd->vdev_spa);
3813 ASSERT(vd->vdev_asize != 0);
3814 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
3815
3816 error = space_map_open(&vd->vdev_checkpoint_sm,
3817 mos, checkpoint_sm_obj, 0, vd->vdev_asize,
3818 vd->vdev_ashift);
3819 if (error != 0) {
3820 vdev_dbgmsg(vd, "vdev_load: space_map_open "
3821 "failed for checkpoint spacemap (obj %llu) "
3822 "[error=%d]",
3823 (u_longlong_t)checkpoint_sm_obj, error);
3824 return (error);
3825 }
3826 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3827
3828 /*
3829 * Since the checkpoint_sm contains free entries
3830 * exclusively we can use space_map_allocated() to
3831 * indicate the cumulative checkpointed space that
3832 * has been freed.
3833 */
3834 vd->vdev_stat.vs_checkpoint_space =
3835 -space_map_allocated(vd->vdev_checkpoint_sm);
3836 vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
3837 vd->vdev_stat.vs_checkpoint_space;
3838 } else if (error != 0) {
3839 vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
3840 "checkpoint space map object from vdev ZAP "
3841 "[error=%d]", error);
3842 return (error);
3843 }
3844 }
3845
3846 /*
3847 * If this is a leaf vdev, load its DTL.
3848 */
3849 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
3850 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3851 VDEV_AUX_CORRUPT_DATA);
3852 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
3853 "[error=%d]", error);
3854 return (error);
3855 }
3856
3857 uint64_t obsolete_sm_object;
3858 error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
3859 if (error == 0 && obsolete_sm_object != 0) {
3860 objset_t *mos = vd->vdev_spa->spa_meta_objset;
3861 ASSERT(vd->vdev_asize != 0);
3862 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
3863
3864 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
3865 obsolete_sm_object, 0, vd->vdev_asize, 0))) {
3866 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3867 VDEV_AUX_CORRUPT_DATA);
3868 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
3869 "obsolete spacemap (obj %llu) [error=%d]",
3870 (u_longlong_t)obsolete_sm_object, error);
3871 return (error);
3872 }
3873 } else if (error != 0) {
3874 vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
3875 "space map object from vdev ZAP [error=%d]", error);
3876 return (error);
3877 }
3878
3879 return (0);
3880 }
3881
3882 /*
3883 * The special vdev case is used for hot spares and l2cache devices. Its
3884 * sole purpose it to set the vdev state for the associated vdev. To do this,
3885 * we make sure that we can open the underlying device, then try to read the
3886 * label, and make sure that the label is sane and that it hasn't been
3887 * repurposed to another pool.
3888 */
3889 int
vdev_validate_aux(vdev_t * vd)3890 vdev_validate_aux(vdev_t *vd)
3891 {
3892 nvlist_t *label;
3893 uint64_t guid, version;
3894 uint64_t state;
3895
3896 if (!vdev_readable(vd))
3897 return (0);
3898
3899 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
3900 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3901 VDEV_AUX_CORRUPT_DATA);
3902 return (-1);
3903 }
3904
3905 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
3906 !SPA_VERSION_IS_SUPPORTED(version) ||
3907 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
3908 guid != vd->vdev_guid ||
3909 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
3910 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3911 VDEV_AUX_CORRUPT_DATA);
3912 nvlist_free(label);
3913 return (-1);
3914 }
3915
3916 /*
3917 * We don't actually check the pool state here. If it's in fact in
3918 * use by another pool, we update this fact on the fly when requested.
3919 */
3920 nvlist_free(label);
3921 return (0);
3922 }
3923
3924 static void
vdev_destroy_ms_flush_data(vdev_t * vd,dmu_tx_t * tx)3925 vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
3926 {
3927 objset_t *mos = spa_meta_objset(vd->vdev_spa);
3928
3929 if (vd->vdev_top_zap == 0)
3930 return;
3931
3932 uint64_t object = 0;
3933 int err = zap_lookup(mos, vd->vdev_top_zap,
3934 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
3935 if (err == ENOENT)
3936 return;
3937 VERIFY0(err);
3938
3939 VERIFY0(dmu_object_free(mos, object, tx));
3940 VERIFY0(zap_remove(mos, vd->vdev_top_zap,
3941 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
3942 }
3943
3944 /*
3945 * Free the objects used to store this vdev's spacemaps, and the array
3946 * that points to them.
3947 */
3948 void
vdev_destroy_spacemaps(vdev_t * vd,dmu_tx_t * tx)3949 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
3950 {
3951 if (vd->vdev_ms_array == 0)
3952 return;
3953
3954 objset_t *mos = vd->vdev_spa->spa_meta_objset;
3955 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
3956 size_t array_bytes = array_count * sizeof (uint64_t);
3957 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
3958 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
3959 array_bytes, smobj_array, 0));
3960
3961 for (uint64_t i = 0; i < array_count; i++) {
3962 uint64_t smobj = smobj_array[i];
3963 if (smobj == 0)
3964 continue;
3965
3966 space_map_free_obj(mos, smobj, tx);
3967 }
3968
3969 kmem_free(smobj_array, array_bytes);
3970 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
3971 vdev_destroy_ms_flush_data(vd, tx);
3972 vd->vdev_ms_array = 0;
3973 }
3974
3975 static void
vdev_remove_empty_log(vdev_t * vd,uint64_t txg)3976 vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
3977 {
3978 spa_t *spa = vd->vdev_spa;
3979
3980 ASSERT(vd->vdev_islog);
3981 ASSERT(vd == vd->vdev_top);
3982 ASSERT3U(txg, ==, spa_syncing_txg(spa));
3983
3984 dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3985
3986 vdev_destroy_spacemaps(vd, tx);
3987 if (vd->vdev_top_zap != 0) {
3988 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
3989 vd->vdev_top_zap = 0;
3990 }
3991
3992 dmu_tx_commit(tx);
3993 }
3994
3995 void
vdev_sync_done(vdev_t * vd,uint64_t txg)3996 vdev_sync_done(vdev_t *vd, uint64_t txg)
3997 {
3998 metaslab_t *msp;
3999 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
4000
4001 ASSERT(vdev_is_concrete(vd));
4002
4003 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
4004 != NULL)
4005 metaslab_sync_done(msp, txg);
4006
4007 if (reassess) {
4008 metaslab_sync_reassess(vd->vdev_mg);
4009 if (vd->vdev_log_mg != NULL)
4010 metaslab_sync_reassess(vd->vdev_log_mg);
4011 }
4012 }
4013
4014 void
vdev_sync(vdev_t * vd,uint64_t txg)4015 vdev_sync(vdev_t *vd, uint64_t txg)
4016 {
4017 spa_t *spa = vd->vdev_spa;
4018 vdev_t *lvd;
4019 metaslab_t *msp;
4020
4021 ASSERT3U(txg, ==, spa->spa_syncing_txg);
4022 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
4023 if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
4024 ASSERT(vd->vdev_removing ||
4025 vd->vdev_ops == &vdev_indirect_ops);
4026
4027 vdev_indirect_sync_obsolete(vd, tx);
4028
4029 /*
4030 * If the vdev is indirect, it can't have dirty
4031 * metaslabs or DTLs.
4032 */
4033 if (vd->vdev_ops == &vdev_indirect_ops) {
4034 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
4035 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
4036 dmu_tx_commit(tx);
4037 return;
4038 }
4039 }
4040
4041 ASSERT(vdev_is_concrete(vd));
4042
4043 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
4044 !vd->vdev_removing) {
4045 ASSERT(vd == vd->vdev_top);
4046 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
4047 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
4048 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
4049 ASSERT(vd->vdev_ms_array != 0);
4050 vdev_config_dirty(vd);
4051 }
4052
4053 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
4054 metaslab_sync(msp, txg);
4055 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
4056 }
4057
4058 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
4059 vdev_dtl_sync(lvd, txg);
4060
4061 /*
4062 * If this is an empty log device being removed, destroy the
4063 * metadata associated with it.
4064 */
4065 if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
4066 vdev_remove_empty_log(vd, txg);
4067
4068 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
4069 dmu_tx_commit(tx);
4070 }
4071
4072 /*
4073 * Return the amount of space that should be (or was) allocated for the given
4074 * psize (compressed block size) in the given TXG. Note that for expanded
4075 * RAIDZ vdevs, the size allocated for older BP's may be larger. See
4076 * vdev_raidz_asize().
4077 */
4078 uint64_t
vdev_psize_to_asize_txg(vdev_t * vd,uint64_t psize,uint64_t txg)4079 vdev_psize_to_asize_txg(vdev_t *vd, uint64_t psize, uint64_t txg)
4080 {
4081 return (vd->vdev_ops->vdev_op_asize(vd, psize, txg));
4082 }
4083
4084 uint64_t
vdev_psize_to_asize(vdev_t * vd,uint64_t psize)4085 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
4086 {
4087 return (vdev_psize_to_asize_txg(vd, psize, 0));
4088 }
4089
4090 /*
4091 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
4092 * not be opened, and no I/O is attempted.
4093 */
4094 int
vdev_fault(spa_t * spa,uint64_t guid,vdev_aux_t aux)4095 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
4096 {
4097 vdev_t *vd, *tvd;
4098
4099 spa_vdev_state_enter(spa, SCL_NONE);
4100
4101 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4102 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4103
4104 if (!vd->vdev_ops->vdev_op_leaf)
4105 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4106
4107 tvd = vd->vdev_top;
4108
4109 /*
4110 * If user did a 'zpool offline -f' then make the fault persist across
4111 * reboots.
4112 */
4113 if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
4114 /*
4115 * There are two kinds of forced faults: temporary and
4116 * persistent. Temporary faults go away at pool import, while
4117 * persistent faults stay set. Both types of faults can be
4118 * cleared with a zpool clear.
4119 *
4120 * We tell if a vdev is persistently faulted by looking at the
4121 * ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
4122 * import then it's a persistent fault. Otherwise, it's
4123 * temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
4124 * by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
4125 * tells vdev_config_generate() (which gets run later) to set
4126 * ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
4127 */
4128 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
4129 vd->vdev_tmpoffline = B_FALSE;
4130 aux = VDEV_AUX_EXTERNAL;
4131 } else {
4132 vd->vdev_tmpoffline = B_TRUE;
4133 }
4134
4135 /*
4136 * We don't directly use the aux state here, but if we do a
4137 * vdev_reopen(), we need this value to be present to remember why we
4138 * were faulted.
4139 */
4140 vd->vdev_label_aux = aux;
4141
4142 /*
4143 * Faulted state takes precedence over degraded.
4144 */
4145 vd->vdev_delayed_close = B_FALSE;
4146 vd->vdev_faulted = 1ULL;
4147 vd->vdev_degraded = 0ULL;
4148 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
4149
4150 /*
4151 * If this device has the only valid copy of the data, then
4152 * back off and simply mark the vdev as degraded instead.
4153 */
4154 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
4155 vd->vdev_degraded = 1ULL;
4156 vd->vdev_faulted = 0ULL;
4157
4158 /*
4159 * If we reopen the device and it's not dead, only then do we
4160 * mark it degraded.
4161 */
4162 vdev_reopen(tvd);
4163
4164 if (vdev_readable(vd))
4165 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
4166 }
4167
4168 return (spa_vdev_state_exit(spa, vd, 0));
4169 }
4170
4171 /*
4172 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
4173 * user that something is wrong. The vdev continues to operate as normal as far
4174 * as I/O is concerned.
4175 */
4176 int
vdev_degrade(spa_t * spa,uint64_t guid,vdev_aux_t aux)4177 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
4178 {
4179 vdev_t *vd;
4180
4181 spa_vdev_state_enter(spa, SCL_NONE);
4182
4183 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4184 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4185
4186 if (!vd->vdev_ops->vdev_op_leaf)
4187 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4188
4189 /*
4190 * If the vdev is already faulted, then don't do anything.
4191 */
4192 if (vd->vdev_faulted || vd->vdev_degraded)
4193 return (spa_vdev_state_exit(spa, NULL, 0));
4194
4195 vd->vdev_degraded = 1ULL;
4196 if (!vdev_is_dead(vd))
4197 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
4198 aux);
4199
4200 return (spa_vdev_state_exit(spa, vd, 0));
4201 }
4202
4203 int
vdev_remove_wanted(spa_t * spa,uint64_t guid)4204 vdev_remove_wanted(spa_t *spa, uint64_t guid)
4205 {
4206 vdev_t *vd;
4207
4208 spa_vdev_state_enter(spa, SCL_NONE);
4209
4210 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4211 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4212
4213 /*
4214 * If the vdev is already removed, or expanding which can trigger
4215 * repartition add/remove events, then don't do anything.
4216 */
4217 if (vd->vdev_removed || vd->vdev_expanding)
4218 return (spa_vdev_state_exit(spa, NULL, 0));
4219
4220 /*
4221 * Confirm the vdev has been removed, otherwise don't do anything.
4222 */
4223 if (vd->vdev_ops->vdev_op_leaf && !zio_wait(vdev_probe(vd, NULL)))
4224 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(EEXIST)));
4225
4226 vd->vdev_remove_wanted = B_TRUE;
4227 spa_async_request(spa, SPA_ASYNC_REMOVE);
4228
4229 return (spa_vdev_state_exit(spa, vd, 0));
4230 }
4231
4232
4233 /*
4234 * Online the given vdev.
4235 *
4236 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
4237 * spare device should be detached when the device finishes resilvering.
4238 * Second, the online should be treated like a 'test' online case, so no FMA
4239 * events are generated if the device fails to open.
4240 */
4241 int
vdev_online(spa_t * spa,uint64_t guid,uint64_t flags,vdev_state_t * newstate)4242 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
4243 {
4244 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
4245 boolean_t wasoffline;
4246 vdev_state_t oldstate;
4247
4248 spa_vdev_state_enter(spa, SCL_NONE);
4249
4250 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4251 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4252
4253 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
4254 oldstate = vd->vdev_state;
4255
4256 tvd = vd->vdev_top;
4257 vd->vdev_offline = B_FALSE;
4258 vd->vdev_tmpoffline = B_FALSE;
4259 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
4260 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
4261
4262 /* XXX - L2ARC 1.0 does not support expansion */
4263 if (!vd->vdev_aux) {
4264 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4265 pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
4266 spa->spa_autoexpand);
4267 vd->vdev_expansion_time = gethrestime_sec();
4268 }
4269
4270 vdev_reopen(tvd);
4271 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
4272
4273 if (!vd->vdev_aux) {
4274 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4275 pvd->vdev_expanding = B_FALSE;
4276 }
4277
4278 if (newstate)
4279 *newstate = vd->vdev_state;
4280 if ((flags & ZFS_ONLINE_UNSPARE) &&
4281 !vdev_is_dead(vd) && vd->vdev_parent &&
4282 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4283 vd->vdev_parent->vdev_child[0] == vd)
4284 vd->vdev_unspare = B_TRUE;
4285
4286 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
4287
4288 /* XXX - L2ARC 1.0 does not support expansion */
4289 if (vd->vdev_aux)
4290 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
4291 spa->spa_ccw_fail_time = 0;
4292 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
4293 }
4294
4295 /* Restart initializing if necessary */
4296 mutex_enter(&vd->vdev_initialize_lock);
4297 if (vdev_writeable(vd) &&
4298 vd->vdev_initialize_thread == NULL &&
4299 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
4300 (void) vdev_initialize(vd);
4301 }
4302 mutex_exit(&vd->vdev_initialize_lock);
4303
4304 /*
4305 * Restart trimming if necessary. We do not restart trimming for cache
4306 * devices here. This is triggered by l2arc_rebuild_vdev()
4307 * asynchronously for the whole device or in l2arc_evict() as it evicts
4308 * space for upcoming writes.
4309 */
4310 mutex_enter(&vd->vdev_trim_lock);
4311 if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
4312 vd->vdev_trim_thread == NULL &&
4313 vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
4314 (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
4315 vd->vdev_trim_secure);
4316 }
4317 mutex_exit(&vd->vdev_trim_lock);
4318
4319 if (wasoffline ||
4320 (oldstate < VDEV_STATE_DEGRADED &&
4321 vd->vdev_state >= VDEV_STATE_DEGRADED)) {
4322 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
4323
4324 /*
4325 * Asynchronously detach spare vdev if resilver or
4326 * rebuild is not required
4327 */
4328 if (vd->vdev_unspare &&
4329 !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4330 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool) &&
4331 !vdev_rebuild_active(tvd))
4332 spa_async_request(spa, SPA_ASYNC_DETACH_SPARE);
4333 }
4334 return (spa_vdev_state_exit(spa, vd, 0));
4335 }
4336
4337 static int
vdev_offline_locked(spa_t * spa,uint64_t guid,uint64_t flags)4338 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
4339 {
4340 vdev_t *vd, *tvd;
4341 int error = 0;
4342 uint64_t generation;
4343 metaslab_group_t *mg;
4344
4345 top:
4346 spa_vdev_state_enter(spa, SCL_ALLOC);
4347
4348 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4349 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4350
4351 if (!vd->vdev_ops->vdev_op_leaf)
4352 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4353
4354 if (vd->vdev_ops == &vdev_draid_spare_ops)
4355 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
4356
4357 tvd = vd->vdev_top;
4358 mg = tvd->vdev_mg;
4359 generation = spa->spa_config_generation + 1;
4360
4361 /*
4362 * If the device isn't already offline, try to offline it.
4363 */
4364 if (!vd->vdev_offline) {
4365 /*
4366 * If this device has the only valid copy of some data,
4367 * don't allow it to be offlined. Log devices are always
4368 * expendable.
4369 */
4370 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
4371 vdev_dtl_required(vd))
4372 return (spa_vdev_state_exit(spa, NULL,
4373 SET_ERROR(EBUSY)));
4374
4375 /*
4376 * If the top-level is a slog and it has had allocations
4377 * then proceed. We check that the vdev's metaslab group
4378 * is not NULL since it's possible that we may have just
4379 * added this vdev but not yet initialized its metaslabs.
4380 */
4381 if (tvd->vdev_islog && mg != NULL) {
4382 /*
4383 * Prevent any future allocations.
4384 */
4385 ASSERT3P(tvd->vdev_log_mg, ==, NULL);
4386 metaslab_group_passivate(mg);
4387 (void) spa_vdev_state_exit(spa, vd, 0);
4388
4389 error = spa_reset_logs(spa);
4390
4391 /*
4392 * If the log device was successfully reset but has
4393 * checkpointed data, do not offline it.
4394 */
4395 if (error == 0 &&
4396 tvd->vdev_checkpoint_sm != NULL) {
4397 ASSERT3U(space_map_allocated(
4398 tvd->vdev_checkpoint_sm), !=, 0);
4399 error = ZFS_ERR_CHECKPOINT_EXISTS;
4400 }
4401
4402 spa_vdev_state_enter(spa, SCL_ALLOC);
4403
4404 /*
4405 * Check to see if the config has changed.
4406 */
4407 if (error || generation != spa->spa_config_generation) {
4408 metaslab_group_activate(mg);
4409 if (error)
4410 return (spa_vdev_state_exit(spa,
4411 vd, error));
4412 (void) spa_vdev_state_exit(spa, vd, 0);
4413 goto top;
4414 }
4415 ASSERT0(tvd->vdev_stat.vs_alloc);
4416 }
4417
4418 /*
4419 * Offline this device and reopen its top-level vdev.
4420 * If the top-level vdev is a log device then just offline
4421 * it. Otherwise, if this action results in the top-level
4422 * vdev becoming unusable, undo it and fail the request.
4423 */
4424 vd->vdev_offline = B_TRUE;
4425 vdev_reopen(tvd);
4426
4427 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
4428 vdev_is_dead(tvd)) {
4429 vd->vdev_offline = B_FALSE;
4430 vdev_reopen(tvd);
4431 return (spa_vdev_state_exit(spa, NULL,
4432 SET_ERROR(EBUSY)));
4433 }
4434
4435 /*
4436 * Add the device back into the metaslab rotor so that
4437 * once we online the device it's open for business.
4438 */
4439 if (tvd->vdev_islog && mg != NULL)
4440 metaslab_group_activate(mg);
4441 }
4442
4443 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
4444
4445 return (spa_vdev_state_exit(spa, vd, 0));
4446 }
4447
4448 int
vdev_offline(spa_t * spa,uint64_t guid,uint64_t flags)4449 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
4450 {
4451 int error;
4452
4453 mutex_enter(&spa->spa_vdev_top_lock);
4454 error = vdev_offline_locked(spa, guid, flags);
4455 mutex_exit(&spa->spa_vdev_top_lock);
4456
4457 return (error);
4458 }
4459
4460 /*
4461 * Clear the error counts associated with this vdev. Unlike vdev_online() and
4462 * vdev_offline(), we assume the spa config is locked. We also clear all
4463 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
4464 */
4465 void
vdev_clear(spa_t * spa,vdev_t * vd)4466 vdev_clear(spa_t *spa, vdev_t *vd)
4467 {
4468 vdev_t *rvd = spa->spa_root_vdev;
4469
4470 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
4471
4472 if (vd == NULL)
4473 vd = rvd;
4474
4475 vd->vdev_stat.vs_read_errors = 0;
4476 vd->vdev_stat.vs_write_errors = 0;
4477 vd->vdev_stat.vs_checksum_errors = 0;
4478 vd->vdev_stat.vs_slow_ios = 0;
4479
4480 for (int c = 0; c < vd->vdev_children; c++)
4481 vdev_clear(spa, vd->vdev_child[c]);
4482
4483 /*
4484 * It makes no sense to "clear" an indirect or removed vdev.
4485 */
4486 if (!vdev_is_concrete(vd) || vd->vdev_removed)
4487 return;
4488
4489 /*
4490 * If we're in the FAULTED state or have experienced failed I/O, then
4491 * clear the persistent state and attempt to reopen the device. We
4492 * also mark the vdev config dirty, so that the new faulted state is
4493 * written out to disk.
4494 */
4495 if (vd->vdev_faulted || vd->vdev_degraded ||
4496 !vdev_readable(vd) || !vdev_writeable(vd)) {
4497 /*
4498 * When reopening in response to a clear event, it may be due to
4499 * a fmadm repair request. In this case, if the device is
4500 * still broken, we want to still post the ereport again.
4501 */
4502 vd->vdev_forcefault = B_TRUE;
4503
4504 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
4505 vd->vdev_cant_read = B_FALSE;
4506 vd->vdev_cant_write = B_FALSE;
4507 vd->vdev_stat.vs_aux = 0;
4508
4509 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
4510
4511 vd->vdev_forcefault = B_FALSE;
4512
4513 if (vd != rvd && vdev_writeable(vd->vdev_top))
4514 vdev_state_dirty(vd->vdev_top);
4515
4516 /* If a resilver isn't required, check if vdevs can be culled */
4517 if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
4518 !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4519 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
4520 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
4521
4522 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
4523 }
4524
4525 /*
4526 * When clearing a FMA-diagnosed fault, we always want to
4527 * unspare the device, as we assume that the original spare was
4528 * done in response to the FMA fault.
4529 */
4530 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
4531 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4532 vd->vdev_parent->vdev_child[0] == vd)
4533 vd->vdev_unspare = B_TRUE;
4534
4535 /* Clear recent error events cache (i.e. duplicate events tracking) */
4536 zfs_ereport_clear(spa, vd);
4537 }
4538
4539 boolean_t
vdev_is_dead(vdev_t * vd)4540 vdev_is_dead(vdev_t *vd)
4541 {
4542 /*
4543 * Holes and missing devices are always considered "dead".
4544 * This simplifies the code since we don't have to check for
4545 * these types of devices in the various code paths.
4546 * Instead we rely on the fact that we skip over dead devices
4547 * before issuing I/O to them.
4548 */
4549 return (vd->vdev_state < VDEV_STATE_DEGRADED ||
4550 vd->vdev_ops == &vdev_hole_ops ||
4551 vd->vdev_ops == &vdev_missing_ops);
4552 }
4553
4554 boolean_t
vdev_readable(vdev_t * vd)4555 vdev_readable(vdev_t *vd)
4556 {
4557 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
4558 }
4559
4560 boolean_t
vdev_writeable(vdev_t * vd)4561 vdev_writeable(vdev_t *vd)
4562 {
4563 return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
4564 vdev_is_concrete(vd));
4565 }
4566
4567 boolean_t
vdev_allocatable(vdev_t * vd)4568 vdev_allocatable(vdev_t *vd)
4569 {
4570 uint64_t state = vd->vdev_state;
4571
4572 /*
4573 * We currently allow allocations from vdevs which may be in the
4574 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
4575 * fails to reopen then we'll catch it later when we're holding
4576 * the proper locks. Note that we have to get the vdev state
4577 * in a local variable because although it changes atomically,
4578 * we're asking two separate questions about it.
4579 */
4580 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
4581 !vd->vdev_cant_write && vdev_is_concrete(vd) &&
4582 vd->vdev_mg->mg_initialized);
4583 }
4584
4585 boolean_t
vdev_accessible(vdev_t * vd,zio_t * zio)4586 vdev_accessible(vdev_t *vd, zio_t *zio)
4587 {
4588 ASSERT(zio->io_vd == vd);
4589
4590 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
4591 return (B_FALSE);
4592
4593 if (zio->io_type == ZIO_TYPE_READ)
4594 return (!vd->vdev_cant_read);
4595
4596 if (zio->io_type == ZIO_TYPE_WRITE)
4597 return (!vd->vdev_cant_write);
4598
4599 return (B_TRUE);
4600 }
4601
4602 static void
vdev_get_child_stat(vdev_t * cvd,vdev_stat_t * vs,vdev_stat_t * cvs)4603 vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
4604 {
4605 /*
4606 * Exclude the dRAID spare when aggregating to avoid double counting
4607 * the ops and bytes. These IOs are counted by the physical leaves.
4608 */
4609 if (cvd->vdev_ops == &vdev_draid_spare_ops)
4610 return;
4611
4612 for (int t = 0; t < VS_ZIO_TYPES; t++) {
4613 vs->vs_ops[t] += cvs->vs_ops[t];
4614 vs->vs_bytes[t] += cvs->vs_bytes[t];
4615 }
4616
4617 cvs->vs_scan_removing = cvd->vdev_removing;
4618 }
4619
4620 /*
4621 * Get extended stats
4622 */
4623 static void
vdev_get_child_stat_ex(vdev_t * cvd,vdev_stat_ex_t * vsx,vdev_stat_ex_t * cvsx)4624 vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
4625 {
4626 (void) cvd;
4627
4628 int t, b;
4629 for (t = 0; t < ZIO_TYPES; t++) {
4630 for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
4631 vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
4632
4633 for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
4634 vsx->vsx_total_histo[t][b] +=
4635 cvsx->vsx_total_histo[t][b];
4636 }
4637 }
4638
4639 for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
4640 for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
4641 vsx->vsx_queue_histo[t][b] +=
4642 cvsx->vsx_queue_histo[t][b];
4643 }
4644 vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
4645 vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
4646
4647 for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
4648 vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
4649
4650 for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
4651 vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
4652 }
4653
4654 }
4655
4656 boolean_t
vdev_is_spacemap_addressable(vdev_t * vd)4657 vdev_is_spacemap_addressable(vdev_t *vd)
4658 {
4659 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
4660 return (B_TRUE);
4661
4662 /*
4663 * If double-word space map entries are not enabled we assume
4664 * 47 bits of the space map entry are dedicated to the entry's
4665 * offset (see SM_OFFSET_BITS in space_map.h). We then use that
4666 * to calculate the maximum address that can be described by a
4667 * space map entry for the given device.
4668 */
4669 uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
4670
4671 if (shift >= 63) /* detect potential overflow */
4672 return (B_TRUE);
4673
4674 return (vd->vdev_asize < (1ULL << shift));
4675 }
4676
4677 /*
4678 * Get statistics for the given vdev.
4679 */
4680 static void
vdev_get_stats_ex_impl(vdev_t * vd,vdev_stat_t * vs,vdev_stat_ex_t * vsx)4681 vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
4682 {
4683 int t;
4684 /*
4685 * If we're getting stats on the root vdev, aggregate the I/O counts
4686 * over all top-level vdevs (i.e. the direct children of the root).
4687 */
4688 if (!vd->vdev_ops->vdev_op_leaf) {
4689 if (vs) {
4690 memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
4691 memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
4692 }
4693 if (vsx)
4694 memset(vsx, 0, sizeof (*vsx));
4695
4696 for (int c = 0; c < vd->vdev_children; c++) {
4697 vdev_t *cvd = vd->vdev_child[c];
4698 vdev_stat_t *cvs = &cvd->vdev_stat;
4699 vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
4700
4701 vdev_get_stats_ex_impl(cvd, cvs, cvsx);
4702 if (vs)
4703 vdev_get_child_stat(cvd, vs, cvs);
4704 if (vsx)
4705 vdev_get_child_stat_ex(cvd, vsx, cvsx);
4706 }
4707 } else {
4708 /*
4709 * We're a leaf. Just copy our ZIO active queue stats in. The
4710 * other leaf stats are updated in vdev_stat_update().
4711 */
4712 if (!vsx)
4713 return;
4714
4715 memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
4716
4717 for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
4718 vsx->vsx_active_queue[t] = vd->vdev_queue.vq_cactive[t];
4719 vsx->vsx_pend_queue[t] = vdev_queue_class_length(vd, t);
4720 }
4721 }
4722 }
4723
4724 void
vdev_get_stats_ex(vdev_t * vd,vdev_stat_t * vs,vdev_stat_ex_t * vsx)4725 vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
4726 {
4727 vdev_t *tvd = vd->vdev_top;
4728 mutex_enter(&vd->vdev_stat_lock);
4729 if (vs) {
4730 memcpy(vs, &vd->vdev_stat, sizeof (*vs));
4731 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
4732 vs->vs_state = vd->vdev_state;
4733 vs->vs_rsize = vdev_get_min_asize(vd);
4734
4735 if (vd->vdev_ops->vdev_op_leaf) {
4736 vs->vs_pspace = vd->vdev_psize;
4737 vs->vs_rsize += VDEV_LABEL_START_SIZE +
4738 VDEV_LABEL_END_SIZE;
4739 /*
4740 * Report initializing progress. Since we don't
4741 * have the initializing locks held, this is only
4742 * an estimate (although a fairly accurate one).
4743 */
4744 vs->vs_initialize_bytes_done =
4745 vd->vdev_initialize_bytes_done;
4746 vs->vs_initialize_bytes_est =
4747 vd->vdev_initialize_bytes_est;
4748 vs->vs_initialize_state = vd->vdev_initialize_state;
4749 vs->vs_initialize_action_time =
4750 vd->vdev_initialize_action_time;
4751
4752 /*
4753 * Report manual TRIM progress. Since we don't have
4754 * the manual TRIM locks held, this is only an
4755 * estimate (although fairly accurate one).
4756 */
4757 vs->vs_trim_notsup = !vd->vdev_has_trim;
4758 vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
4759 vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
4760 vs->vs_trim_state = vd->vdev_trim_state;
4761 vs->vs_trim_action_time = vd->vdev_trim_action_time;
4762
4763 /* Set when there is a deferred resilver. */
4764 vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
4765 }
4766
4767 /*
4768 * Report expandable space on top-level, non-auxiliary devices
4769 * only. The expandable space is reported in terms of metaslab
4770 * sized units since that determines how much space the pool
4771 * can expand.
4772 */
4773 if (vd->vdev_aux == NULL && tvd != NULL) {
4774 vs->vs_esize = P2ALIGN_TYPED(
4775 vd->vdev_max_asize - vd->vdev_asize,
4776 1ULL << tvd->vdev_ms_shift, uint64_t);
4777 }
4778
4779 vs->vs_configured_ashift = vd->vdev_top != NULL
4780 ? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
4781 vs->vs_logical_ashift = vd->vdev_logical_ashift;
4782 if (vd->vdev_physical_ashift <= ASHIFT_MAX)
4783 vs->vs_physical_ashift = vd->vdev_physical_ashift;
4784 else
4785 vs->vs_physical_ashift = 0;
4786
4787 /*
4788 * Report fragmentation and rebuild progress for top-level,
4789 * non-auxiliary, concrete devices.
4790 */
4791 if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
4792 vdev_is_concrete(vd)) {
4793 /*
4794 * The vdev fragmentation rating doesn't take into
4795 * account the embedded slog metaslab (vdev_log_mg).
4796 * Since it's only one metaslab, it would have a tiny
4797 * impact on the overall fragmentation.
4798 */
4799 vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
4800 vd->vdev_mg->mg_fragmentation : 0;
4801 }
4802 vs->vs_noalloc = MAX(vd->vdev_noalloc,
4803 tvd ? tvd->vdev_noalloc : 0);
4804 }
4805
4806 vdev_get_stats_ex_impl(vd, vs, vsx);
4807 mutex_exit(&vd->vdev_stat_lock);
4808 }
4809
4810 void
vdev_get_stats(vdev_t * vd,vdev_stat_t * vs)4811 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
4812 {
4813 return (vdev_get_stats_ex(vd, vs, NULL));
4814 }
4815
4816 void
vdev_clear_stats(vdev_t * vd)4817 vdev_clear_stats(vdev_t *vd)
4818 {
4819 mutex_enter(&vd->vdev_stat_lock);
4820 vd->vdev_stat.vs_space = 0;
4821 vd->vdev_stat.vs_dspace = 0;
4822 vd->vdev_stat.vs_alloc = 0;
4823 mutex_exit(&vd->vdev_stat_lock);
4824 }
4825
4826 void
vdev_scan_stat_init(vdev_t * vd)4827 vdev_scan_stat_init(vdev_t *vd)
4828 {
4829 vdev_stat_t *vs = &vd->vdev_stat;
4830
4831 for (int c = 0; c < vd->vdev_children; c++)
4832 vdev_scan_stat_init(vd->vdev_child[c]);
4833
4834 mutex_enter(&vd->vdev_stat_lock);
4835 vs->vs_scan_processed = 0;
4836 mutex_exit(&vd->vdev_stat_lock);
4837 }
4838
4839 void
vdev_stat_update(zio_t * zio,uint64_t psize)4840 vdev_stat_update(zio_t *zio, uint64_t psize)
4841 {
4842 spa_t *spa = zio->io_spa;
4843 vdev_t *rvd = spa->spa_root_vdev;
4844 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
4845 vdev_t *pvd;
4846 uint64_t txg = zio->io_txg;
4847 /* Suppress ASAN false positive */
4848 #ifdef __SANITIZE_ADDRESS__
4849 vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL;
4850 vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL;
4851 #else
4852 vdev_stat_t *vs = &vd->vdev_stat;
4853 vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
4854 #endif
4855 zio_type_t type = zio->io_type;
4856 int flags = zio->io_flags;
4857
4858 /*
4859 * If this i/o is a gang leader, it didn't do any actual work.
4860 */
4861 if (zio->io_gang_tree)
4862 return;
4863
4864 if (zio->io_error == 0) {
4865 /*
4866 * If this is a root i/o, don't count it -- we've already
4867 * counted the top-level vdevs, and vdev_get_stats() will
4868 * aggregate them when asked. This reduces contention on
4869 * the root vdev_stat_lock and implicitly handles blocks
4870 * that compress away to holes, for which there is no i/o.
4871 * (Holes never create vdev children, so all the counters
4872 * remain zero, which is what we want.)
4873 *
4874 * Note: this only applies to successful i/o (io_error == 0)
4875 * because unlike i/o counts, errors are not additive.
4876 * When reading a ditto block, for example, failure of
4877 * one top-level vdev does not imply a root-level error.
4878 */
4879 if (vd == rvd)
4880 return;
4881
4882 ASSERT(vd == zio->io_vd);
4883
4884 if (flags & ZIO_FLAG_IO_BYPASS)
4885 return;
4886
4887 mutex_enter(&vd->vdev_stat_lock);
4888
4889 if (flags & ZIO_FLAG_IO_REPAIR) {
4890 /*
4891 * Repair is the result of a resilver issued by the
4892 * scan thread (spa_sync).
4893 */
4894 if (flags & ZIO_FLAG_SCAN_THREAD) {
4895 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
4896 dsl_scan_phys_t *scn_phys = &scn->scn_phys;
4897 uint64_t *processed = &scn_phys->scn_processed;
4898
4899 if (vd->vdev_ops->vdev_op_leaf)
4900 atomic_add_64(processed, psize);
4901 vs->vs_scan_processed += psize;
4902 }
4903
4904 /*
4905 * Repair is the result of a rebuild issued by the
4906 * rebuild thread (vdev_rebuild_thread). To avoid
4907 * double counting repaired bytes the virtual dRAID
4908 * spare vdev is excluded from the processed bytes.
4909 */
4910 if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
4911 vdev_t *tvd = vd->vdev_top;
4912 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
4913 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
4914 uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
4915
4916 if (vd->vdev_ops->vdev_op_leaf &&
4917 vd->vdev_ops != &vdev_draid_spare_ops) {
4918 atomic_add_64(rebuilt, psize);
4919 }
4920 vs->vs_rebuild_processed += psize;
4921 }
4922
4923 if (flags & ZIO_FLAG_SELF_HEAL)
4924 vs->vs_self_healed += psize;
4925 }
4926
4927 /*
4928 * The bytes/ops/histograms are recorded at the leaf level and
4929 * aggregated into the higher level vdevs in vdev_get_stats().
4930 */
4931 if (vd->vdev_ops->vdev_op_leaf &&
4932 (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
4933 zio_type_t vs_type = type;
4934 zio_priority_t priority = zio->io_priority;
4935
4936 /*
4937 * TRIM ops and bytes are reported to user space as
4938 * ZIO_TYPE_FLUSH. This is done to preserve the
4939 * vdev_stat_t structure layout for user space.
4940 */
4941 if (type == ZIO_TYPE_TRIM)
4942 vs_type = ZIO_TYPE_FLUSH;
4943
4944 /*
4945 * Solely for the purposes of 'zpool iostat -lqrw'
4946 * reporting use the priority to categorize the IO.
4947 * Only the following are reported to user space:
4948 *
4949 * ZIO_PRIORITY_SYNC_READ,
4950 * ZIO_PRIORITY_SYNC_WRITE,
4951 * ZIO_PRIORITY_ASYNC_READ,
4952 * ZIO_PRIORITY_ASYNC_WRITE,
4953 * ZIO_PRIORITY_SCRUB,
4954 * ZIO_PRIORITY_TRIM,
4955 * ZIO_PRIORITY_REBUILD.
4956 */
4957 if (priority == ZIO_PRIORITY_INITIALIZING) {
4958 ASSERT3U(type, ==, ZIO_TYPE_WRITE);
4959 priority = ZIO_PRIORITY_ASYNC_WRITE;
4960 } else if (priority == ZIO_PRIORITY_REMOVAL) {
4961 priority = ((type == ZIO_TYPE_WRITE) ?
4962 ZIO_PRIORITY_ASYNC_WRITE :
4963 ZIO_PRIORITY_ASYNC_READ);
4964 }
4965
4966 vs->vs_ops[vs_type]++;
4967 vs->vs_bytes[vs_type] += psize;
4968
4969 if (flags & ZIO_FLAG_DELEGATED) {
4970 vsx->vsx_agg_histo[priority]
4971 [RQ_HISTO(zio->io_size)]++;
4972 } else {
4973 vsx->vsx_ind_histo[priority]
4974 [RQ_HISTO(zio->io_size)]++;
4975 }
4976
4977 if (zio->io_delta && zio->io_delay) {
4978 vsx->vsx_queue_histo[priority]
4979 [L_HISTO(zio->io_delta - zio->io_delay)]++;
4980 vsx->vsx_disk_histo[type]
4981 [L_HISTO(zio->io_delay)]++;
4982 vsx->vsx_total_histo[type]
4983 [L_HISTO(zio->io_delta)]++;
4984 }
4985 }
4986
4987 mutex_exit(&vd->vdev_stat_lock);
4988 return;
4989 }
4990
4991 if (flags & ZIO_FLAG_SPECULATIVE)
4992 return;
4993
4994 /*
4995 * If this is an I/O error that is going to be retried, then ignore the
4996 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as
4997 * hard errors, when in reality they can happen for any number of
4998 * innocuous reasons (bus resets, MPxIO link failure, etc).
4999 */
5000 if (zio->io_error == EIO &&
5001 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
5002 return;
5003
5004 /*
5005 * Intent logs writes won't propagate their error to the root
5006 * I/O so don't mark these types of failures as pool-level
5007 * errors.
5008 */
5009 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
5010 return;
5011
5012 if (type == ZIO_TYPE_WRITE && txg != 0 &&
5013 (!(flags & ZIO_FLAG_IO_REPAIR) ||
5014 (flags & ZIO_FLAG_SCAN_THREAD) ||
5015 spa->spa_claiming)) {
5016 /*
5017 * This is either a normal write (not a repair), or it's
5018 * a repair induced by the scrub thread, or it's a repair
5019 * made by zil_claim() during spa_load() in the first txg.
5020 * In the normal case, we commit the DTL change in the same
5021 * txg as the block was born. In the scrub-induced repair
5022 * case, we know that scrubs run in first-pass syncing context,
5023 * so we commit the DTL change in spa_syncing_txg(spa).
5024 * In the zil_claim() case, we commit in spa_first_txg(spa).
5025 *
5026 * We currently do not make DTL entries for failed spontaneous
5027 * self-healing writes triggered by normal (non-scrubbing)
5028 * reads, because we have no transactional context in which to
5029 * do so -- and it's not clear that it'd be desirable anyway.
5030 */
5031 if (vd->vdev_ops->vdev_op_leaf) {
5032 uint64_t commit_txg = txg;
5033 if (flags & ZIO_FLAG_SCAN_THREAD) {
5034 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
5035 ASSERT(spa_sync_pass(spa) == 1);
5036 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
5037 commit_txg = spa_syncing_txg(spa);
5038 } else if (spa->spa_claiming) {
5039 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
5040 commit_txg = spa_first_txg(spa);
5041 }
5042 ASSERT(commit_txg >= spa_syncing_txg(spa));
5043 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
5044 return;
5045 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
5046 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
5047 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
5048 }
5049 if (vd != rvd)
5050 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
5051 }
5052 }
5053
5054 int64_t
vdev_deflated_space(vdev_t * vd,int64_t space)5055 vdev_deflated_space(vdev_t *vd, int64_t space)
5056 {
5057 ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
5058 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
5059
5060 return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
5061 }
5062
5063 /*
5064 * Update the in-core space usage stats for this vdev, its metaslab class,
5065 * and the root vdev.
5066 */
5067 void
vdev_space_update(vdev_t * vd,int64_t alloc_delta,int64_t defer_delta,int64_t space_delta)5068 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
5069 int64_t space_delta)
5070 {
5071 (void) defer_delta;
5072 int64_t dspace_delta;
5073 spa_t *spa = vd->vdev_spa;
5074 vdev_t *rvd = spa->spa_root_vdev;
5075
5076 ASSERT(vd == vd->vdev_top);
5077
5078 /*
5079 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
5080 * factor. We must calculate this here and not at the root vdev
5081 * because the root vdev's psize-to-asize is simply the max of its
5082 * children's, thus not accurate enough for us.
5083 */
5084 dspace_delta = vdev_deflated_space(vd, space_delta);
5085
5086 mutex_enter(&vd->vdev_stat_lock);
5087 /* ensure we won't underflow */
5088 if (alloc_delta < 0) {
5089 ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
5090 }
5091
5092 vd->vdev_stat.vs_alloc += alloc_delta;
5093 vd->vdev_stat.vs_space += space_delta;
5094 vd->vdev_stat.vs_dspace += dspace_delta;
5095 mutex_exit(&vd->vdev_stat_lock);
5096
5097 /* every class but log contributes to root space stats */
5098 if (vd->vdev_mg != NULL && !vd->vdev_islog) {
5099 ASSERT(!vd->vdev_isl2cache);
5100 mutex_enter(&rvd->vdev_stat_lock);
5101 rvd->vdev_stat.vs_alloc += alloc_delta;
5102 rvd->vdev_stat.vs_space += space_delta;
5103 rvd->vdev_stat.vs_dspace += dspace_delta;
5104 mutex_exit(&rvd->vdev_stat_lock);
5105 }
5106 /* Note: metaslab_class_space_update moved to metaslab_space_update */
5107 }
5108
5109 /*
5110 * Mark a top-level vdev's config as dirty, placing it on the dirty list
5111 * so that it will be written out next time the vdev configuration is synced.
5112 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
5113 */
5114 void
vdev_config_dirty(vdev_t * vd)5115 vdev_config_dirty(vdev_t *vd)
5116 {
5117 spa_t *spa = vd->vdev_spa;
5118 vdev_t *rvd = spa->spa_root_vdev;
5119 int c;
5120
5121 ASSERT(spa_writeable(spa));
5122
5123 /*
5124 * If this is an aux vdev (as with l2cache and spare devices), then we
5125 * update the vdev config manually and set the sync flag.
5126 */
5127 if (vd->vdev_aux != NULL) {
5128 spa_aux_vdev_t *sav = vd->vdev_aux;
5129 nvlist_t **aux;
5130 uint_t naux;
5131
5132 for (c = 0; c < sav->sav_count; c++) {
5133 if (sav->sav_vdevs[c] == vd)
5134 break;
5135 }
5136
5137 if (c == sav->sav_count) {
5138 /*
5139 * We're being removed. There's nothing more to do.
5140 */
5141 ASSERT(sav->sav_sync == B_TRUE);
5142 return;
5143 }
5144
5145 sav->sav_sync = B_TRUE;
5146
5147 if (nvlist_lookup_nvlist_array(sav->sav_config,
5148 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
5149 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
5150 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
5151 }
5152
5153 ASSERT(c < naux);
5154
5155 /*
5156 * Setting the nvlist in the middle if the array is a little
5157 * sketchy, but it will work.
5158 */
5159 nvlist_free(aux[c]);
5160 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
5161
5162 return;
5163 }
5164
5165 /*
5166 * The dirty list is protected by the SCL_CONFIG lock. The caller
5167 * must either hold SCL_CONFIG as writer, or must be the sync thread
5168 * (which holds SCL_CONFIG as reader). There's only one sync thread,
5169 * so this is sufficient to ensure mutual exclusion.
5170 */
5171 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5172 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5173 spa_config_held(spa, SCL_CONFIG, RW_READER)));
5174
5175 if (vd == rvd) {
5176 for (c = 0; c < rvd->vdev_children; c++)
5177 vdev_config_dirty(rvd->vdev_child[c]);
5178 } else {
5179 ASSERT(vd == vd->vdev_top);
5180
5181 if (!list_link_active(&vd->vdev_config_dirty_node) &&
5182 vdev_is_concrete(vd)) {
5183 list_insert_head(&spa->spa_config_dirty_list, vd);
5184 }
5185 }
5186 }
5187
5188 void
vdev_config_clean(vdev_t * vd)5189 vdev_config_clean(vdev_t *vd)
5190 {
5191 spa_t *spa = vd->vdev_spa;
5192
5193 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5194 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5195 spa_config_held(spa, SCL_CONFIG, RW_READER)));
5196
5197 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
5198 list_remove(&spa->spa_config_dirty_list, vd);
5199 }
5200
5201 /*
5202 * Mark a top-level vdev's state as dirty, so that the next pass of
5203 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
5204 * the state changes from larger config changes because they require
5205 * much less locking, and are often needed for administrative actions.
5206 */
5207 void
vdev_state_dirty(vdev_t * vd)5208 vdev_state_dirty(vdev_t *vd)
5209 {
5210 spa_t *spa = vd->vdev_spa;
5211
5212 ASSERT(spa_writeable(spa));
5213 ASSERT(vd == vd->vdev_top);
5214
5215 /*
5216 * The state list is protected by the SCL_STATE lock. The caller
5217 * must either hold SCL_STATE as writer, or must be the sync thread
5218 * (which holds SCL_STATE as reader). There's only one sync thread,
5219 * so this is sufficient to ensure mutual exclusion.
5220 */
5221 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5222 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5223 spa_config_held(spa, SCL_STATE, RW_READER)));
5224
5225 if (!list_link_active(&vd->vdev_state_dirty_node) &&
5226 vdev_is_concrete(vd))
5227 list_insert_head(&spa->spa_state_dirty_list, vd);
5228 }
5229
5230 void
vdev_state_clean(vdev_t * vd)5231 vdev_state_clean(vdev_t *vd)
5232 {
5233 spa_t *spa = vd->vdev_spa;
5234
5235 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5236 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5237 spa_config_held(spa, SCL_STATE, RW_READER)));
5238
5239 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
5240 list_remove(&spa->spa_state_dirty_list, vd);
5241 }
5242
5243 /*
5244 * Propagate vdev state up from children to parent.
5245 */
5246 void
vdev_propagate_state(vdev_t * vd)5247 vdev_propagate_state(vdev_t *vd)
5248 {
5249 spa_t *spa = vd->vdev_spa;
5250 vdev_t *rvd = spa->spa_root_vdev;
5251 int degraded = 0, faulted = 0;
5252 int corrupted = 0;
5253 vdev_t *child;
5254
5255 if (vd->vdev_children > 0) {
5256 for (int c = 0; c < vd->vdev_children; c++) {
5257 child = vd->vdev_child[c];
5258
5259 /*
5260 * Don't factor holes or indirect vdevs into the
5261 * decision.
5262 */
5263 if (!vdev_is_concrete(child))
5264 continue;
5265
5266 if (!vdev_readable(child) ||
5267 (!vdev_writeable(child) && spa_writeable(spa))) {
5268 /*
5269 * Root special: if there is a top-level log
5270 * device, treat the root vdev as if it were
5271 * degraded.
5272 */
5273 if (child->vdev_islog && vd == rvd)
5274 degraded++;
5275 else
5276 faulted++;
5277 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
5278 degraded++;
5279 }
5280
5281 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
5282 corrupted++;
5283 }
5284
5285 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
5286
5287 /*
5288 * Root special: if there is a top-level vdev that cannot be
5289 * opened due to corrupted metadata, then propagate the root
5290 * vdev's aux state as 'corrupt' rather than 'insufficient
5291 * replicas'.
5292 */
5293 if (corrupted && vd == rvd &&
5294 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
5295 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
5296 VDEV_AUX_CORRUPT_DATA);
5297 }
5298
5299 if (vd->vdev_parent)
5300 vdev_propagate_state(vd->vdev_parent);
5301 }
5302
5303 /*
5304 * Set a vdev's state. If this is during an open, we don't update the parent
5305 * state, because we're in the process of opening children depth-first.
5306 * Otherwise, we propagate the change to the parent.
5307 *
5308 * If this routine places a device in a faulted state, an appropriate ereport is
5309 * generated.
5310 */
5311 void
vdev_set_state(vdev_t * vd,boolean_t isopen,vdev_state_t state,vdev_aux_t aux)5312 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
5313 {
5314 uint64_t save_state;
5315 spa_t *spa = vd->vdev_spa;
5316
5317 if (state == vd->vdev_state) {
5318 /*
5319 * Since vdev_offline() code path is already in an offline
5320 * state we can miss a statechange event to OFFLINE. Check
5321 * the previous state to catch this condition.
5322 */
5323 if (vd->vdev_ops->vdev_op_leaf &&
5324 (state == VDEV_STATE_OFFLINE) &&
5325 (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
5326 /* post an offline state change */
5327 zfs_post_state_change(spa, vd, vd->vdev_prevstate);
5328 }
5329 vd->vdev_stat.vs_aux = aux;
5330 return;
5331 }
5332
5333 save_state = vd->vdev_state;
5334
5335 vd->vdev_state = state;
5336 vd->vdev_stat.vs_aux = aux;
5337
5338 /*
5339 * If we are setting the vdev state to anything but an open state, then
5340 * always close the underlying device unless the device has requested
5341 * a delayed close (i.e. we're about to remove or fault the device).
5342 * Otherwise, we keep accessible but invalid devices open forever.
5343 * We don't call vdev_close() itself, because that implies some extra
5344 * checks (offline, etc) that we don't want here. This is limited to
5345 * leaf devices, because otherwise closing the device will affect other
5346 * children.
5347 */
5348 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
5349 vd->vdev_ops->vdev_op_leaf)
5350 vd->vdev_ops->vdev_op_close(vd);
5351
5352 if (vd->vdev_removed &&
5353 state == VDEV_STATE_CANT_OPEN &&
5354 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
5355 /*
5356 * If the previous state is set to VDEV_STATE_REMOVED, then this
5357 * device was previously marked removed and someone attempted to
5358 * reopen it. If this failed due to a nonexistent device, then
5359 * keep the device in the REMOVED state. We also let this be if
5360 * it is one of our special test online cases, which is only
5361 * attempting to online the device and shouldn't generate an FMA
5362 * fault.
5363 */
5364 vd->vdev_state = VDEV_STATE_REMOVED;
5365 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
5366 } else if (state == VDEV_STATE_REMOVED) {
5367 vd->vdev_removed = B_TRUE;
5368 } else if (state == VDEV_STATE_CANT_OPEN) {
5369 /*
5370 * If we fail to open a vdev during an import or recovery, we
5371 * mark it as "not available", which signifies that it was
5372 * never there to begin with. Failure to open such a device
5373 * is not considered an error.
5374 */
5375 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
5376 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
5377 vd->vdev_ops->vdev_op_leaf)
5378 vd->vdev_not_present = 1;
5379
5380 /*
5381 * Post the appropriate ereport. If the 'prevstate' field is
5382 * set to something other than VDEV_STATE_UNKNOWN, it indicates
5383 * that this is part of a vdev_reopen(). In this case, we don't
5384 * want to post the ereport if the device was already in the
5385 * CANT_OPEN state beforehand.
5386 *
5387 * If the 'checkremove' flag is set, then this is an attempt to
5388 * online the device in response to an insertion event. If we
5389 * hit this case, then we have detected an insertion event for a
5390 * faulted or offline device that wasn't in the removed state.
5391 * In this scenario, we don't post an ereport because we are
5392 * about to replace the device, or attempt an online with
5393 * vdev_forcefault, which will generate the fault for us.
5394 */
5395 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
5396 !vd->vdev_not_present && !vd->vdev_checkremove &&
5397 vd != spa->spa_root_vdev) {
5398 const char *class;
5399
5400 switch (aux) {
5401 case VDEV_AUX_OPEN_FAILED:
5402 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
5403 break;
5404 case VDEV_AUX_CORRUPT_DATA:
5405 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
5406 break;
5407 case VDEV_AUX_NO_REPLICAS:
5408 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
5409 break;
5410 case VDEV_AUX_BAD_GUID_SUM:
5411 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
5412 break;
5413 case VDEV_AUX_TOO_SMALL:
5414 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
5415 break;
5416 case VDEV_AUX_BAD_LABEL:
5417 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
5418 break;
5419 case VDEV_AUX_BAD_ASHIFT:
5420 class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
5421 break;
5422 default:
5423 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
5424 }
5425
5426 (void) zfs_ereport_post(class, spa, vd, NULL, NULL,
5427 save_state);
5428 }
5429
5430 /* Erase any notion of persistent removed state */
5431 vd->vdev_removed = B_FALSE;
5432 } else {
5433 vd->vdev_removed = B_FALSE;
5434 }
5435
5436 /*
5437 * Notify ZED of any significant state-change on a leaf vdev.
5438 *
5439 */
5440 if (vd->vdev_ops->vdev_op_leaf) {
5441 /* preserve original state from a vdev_reopen() */
5442 if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
5443 (vd->vdev_prevstate != vd->vdev_state) &&
5444 (save_state <= VDEV_STATE_CLOSED))
5445 save_state = vd->vdev_prevstate;
5446
5447 /* filter out state change due to initial vdev_open */
5448 if (save_state > VDEV_STATE_CLOSED)
5449 zfs_post_state_change(spa, vd, save_state);
5450 }
5451
5452 if (!isopen && vd->vdev_parent)
5453 vdev_propagate_state(vd->vdev_parent);
5454 }
5455
5456 boolean_t
vdev_children_are_offline(vdev_t * vd)5457 vdev_children_are_offline(vdev_t *vd)
5458 {
5459 ASSERT(!vd->vdev_ops->vdev_op_leaf);
5460
5461 for (uint64_t i = 0; i < vd->vdev_children; i++) {
5462 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
5463 return (B_FALSE);
5464 }
5465
5466 return (B_TRUE);
5467 }
5468
5469 /*
5470 * Check the vdev configuration to ensure that it's capable of supporting
5471 * a root pool. We do not support partial configuration.
5472 */
5473 boolean_t
vdev_is_bootable(vdev_t * vd)5474 vdev_is_bootable(vdev_t *vd)
5475 {
5476 if (!vd->vdev_ops->vdev_op_leaf) {
5477 const char *vdev_type = vd->vdev_ops->vdev_op_type;
5478
5479 if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
5480 return (B_FALSE);
5481 }
5482
5483 for (int c = 0; c < vd->vdev_children; c++) {
5484 if (!vdev_is_bootable(vd->vdev_child[c]))
5485 return (B_FALSE);
5486 }
5487 return (B_TRUE);
5488 }
5489
5490 boolean_t
vdev_is_concrete(vdev_t * vd)5491 vdev_is_concrete(vdev_t *vd)
5492 {
5493 vdev_ops_t *ops = vd->vdev_ops;
5494 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
5495 ops == &vdev_missing_ops || ops == &vdev_root_ops) {
5496 return (B_FALSE);
5497 } else {
5498 return (B_TRUE);
5499 }
5500 }
5501
5502 /*
5503 * Determine if a log device has valid content. If the vdev was
5504 * removed or faulted in the MOS config then we know that
5505 * the content on the log device has already been written to the pool.
5506 */
5507 boolean_t
vdev_log_state_valid(vdev_t * vd)5508 vdev_log_state_valid(vdev_t *vd)
5509 {
5510 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
5511 !vd->vdev_removed)
5512 return (B_TRUE);
5513
5514 for (int c = 0; c < vd->vdev_children; c++)
5515 if (vdev_log_state_valid(vd->vdev_child[c]))
5516 return (B_TRUE);
5517
5518 return (B_FALSE);
5519 }
5520
5521 /*
5522 * Expand a vdev if possible.
5523 */
5524 void
vdev_expand(vdev_t * vd,uint64_t txg)5525 vdev_expand(vdev_t *vd, uint64_t txg)
5526 {
5527 ASSERT(vd->vdev_top == vd);
5528 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5529 ASSERT(vdev_is_concrete(vd));
5530
5531 vdev_set_deflate_ratio(vd);
5532
5533 if ((vd->vdev_spa->spa_raidz_expand == NULL ||
5534 vd->vdev_spa->spa_raidz_expand->vre_vdev_id != vd->vdev_id) &&
5535 (vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
5536 vdev_is_concrete(vd)) {
5537 vdev_metaslab_group_create(vd);
5538 VERIFY(vdev_metaslab_init(vd, txg) == 0);
5539 vdev_config_dirty(vd);
5540 }
5541 }
5542
5543 /*
5544 * Split a vdev.
5545 */
5546 void
vdev_split(vdev_t * vd)5547 vdev_split(vdev_t *vd)
5548 {
5549 vdev_t *cvd, *pvd = vd->vdev_parent;
5550
5551 VERIFY3U(pvd->vdev_children, >, 1);
5552
5553 vdev_remove_child(pvd, vd);
5554 vdev_compact_children(pvd);
5555
5556 ASSERT3P(pvd->vdev_child, !=, NULL);
5557
5558 cvd = pvd->vdev_child[0];
5559 if (pvd->vdev_children == 1) {
5560 vdev_remove_parent(cvd);
5561 cvd->vdev_splitting = B_TRUE;
5562 }
5563 vdev_propagate_state(cvd);
5564 }
5565
5566 void
vdev_deadman(vdev_t * vd,const char * tag)5567 vdev_deadman(vdev_t *vd, const char *tag)
5568 {
5569 for (int c = 0; c < vd->vdev_children; c++) {
5570 vdev_t *cvd = vd->vdev_child[c];
5571
5572 vdev_deadman(cvd, tag);
5573 }
5574
5575 if (vd->vdev_ops->vdev_op_leaf) {
5576 vdev_queue_t *vq = &vd->vdev_queue;
5577
5578 mutex_enter(&vq->vq_lock);
5579 if (vq->vq_active > 0) {
5580 spa_t *spa = vd->vdev_spa;
5581 zio_t *fio;
5582 uint64_t delta;
5583
5584 zfs_dbgmsg("slow vdev: %s has %u active IOs",
5585 vd->vdev_path, vq->vq_active);
5586
5587 /*
5588 * Look at the head of all the pending queues,
5589 * if any I/O has been outstanding for longer than
5590 * the spa_deadman_synctime invoke the deadman logic.
5591 */
5592 fio = list_head(&vq->vq_active_list);
5593 delta = gethrtime() - fio->io_timestamp;
5594 if (delta > spa_deadman_synctime(spa))
5595 zio_deadman(fio, tag);
5596 }
5597 mutex_exit(&vq->vq_lock);
5598 }
5599 }
5600
5601 void
vdev_defer_resilver(vdev_t * vd)5602 vdev_defer_resilver(vdev_t *vd)
5603 {
5604 ASSERT(vd->vdev_ops->vdev_op_leaf);
5605
5606 vd->vdev_resilver_deferred = B_TRUE;
5607 vd->vdev_spa->spa_resilver_deferred = B_TRUE;
5608 }
5609
5610 /*
5611 * Clears the resilver deferred flag on all leaf devs under vd. Returns
5612 * B_TRUE if we have devices that need to be resilvered and are available to
5613 * accept resilver I/Os.
5614 */
5615 boolean_t
vdev_clear_resilver_deferred(vdev_t * vd,dmu_tx_t * tx)5616 vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
5617 {
5618 boolean_t resilver_needed = B_FALSE;
5619 spa_t *spa = vd->vdev_spa;
5620
5621 for (int c = 0; c < vd->vdev_children; c++) {
5622 vdev_t *cvd = vd->vdev_child[c];
5623 resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
5624 }
5625
5626 if (vd == spa->spa_root_vdev &&
5627 spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
5628 spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
5629 vdev_config_dirty(vd);
5630 spa->spa_resilver_deferred = B_FALSE;
5631 return (resilver_needed);
5632 }
5633
5634 if (!vdev_is_concrete(vd) || vd->vdev_aux ||
5635 !vd->vdev_ops->vdev_op_leaf)
5636 return (resilver_needed);
5637
5638 vd->vdev_resilver_deferred = B_FALSE;
5639
5640 return (!vdev_is_dead(vd) && !vd->vdev_offline &&
5641 vdev_resilver_needed(vd, NULL, NULL));
5642 }
5643
5644 boolean_t
vdev_xlate_is_empty(range_seg64_t * rs)5645 vdev_xlate_is_empty(range_seg64_t *rs)
5646 {
5647 return (rs->rs_start == rs->rs_end);
5648 }
5649
5650 /*
5651 * Translate a logical range to the first contiguous physical range for the
5652 * specified vdev_t. This function is initially called with a leaf vdev and
5653 * will walk each parent vdev until it reaches a top-level vdev. Once the
5654 * top-level is reached the physical range is initialized and the recursive
5655 * function begins to unwind. As it unwinds it calls the parent's vdev
5656 * specific translation function to do the real conversion.
5657 */
5658 void
vdev_xlate(vdev_t * vd,const range_seg64_t * logical_rs,range_seg64_t * physical_rs,range_seg64_t * remain_rs)5659 vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
5660 range_seg64_t *physical_rs, range_seg64_t *remain_rs)
5661 {
5662 /*
5663 * Walk up the vdev tree
5664 */
5665 if (vd != vd->vdev_top) {
5666 vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
5667 remain_rs);
5668 } else {
5669 /*
5670 * We've reached the top-level vdev, initialize the physical
5671 * range to the logical range and set an empty remaining
5672 * range then start to unwind.
5673 */
5674 physical_rs->rs_start = logical_rs->rs_start;
5675 physical_rs->rs_end = logical_rs->rs_end;
5676
5677 remain_rs->rs_start = logical_rs->rs_start;
5678 remain_rs->rs_end = logical_rs->rs_start;
5679
5680 return;
5681 }
5682
5683 vdev_t *pvd = vd->vdev_parent;
5684 ASSERT3P(pvd, !=, NULL);
5685 ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
5686
5687 /*
5688 * As this recursive function unwinds, translate the logical
5689 * range into its physical and any remaining components by calling
5690 * the vdev specific translate function.
5691 */
5692 range_seg64_t intermediate = { 0 };
5693 pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
5694
5695 physical_rs->rs_start = intermediate.rs_start;
5696 physical_rs->rs_end = intermediate.rs_end;
5697 }
5698
5699 void
vdev_xlate_walk(vdev_t * vd,const range_seg64_t * logical_rs,vdev_xlate_func_t * func,void * arg)5700 vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
5701 vdev_xlate_func_t *func, void *arg)
5702 {
5703 range_seg64_t iter_rs = *logical_rs;
5704 range_seg64_t physical_rs;
5705 range_seg64_t remain_rs;
5706
5707 while (!vdev_xlate_is_empty(&iter_rs)) {
5708
5709 vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
5710
5711 /*
5712 * With raidz and dRAID, it's possible that the logical range
5713 * does not live on this leaf vdev. Only when there is a non-
5714 * zero physical size call the provided function.
5715 */
5716 if (!vdev_xlate_is_empty(&physical_rs))
5717 func(arg, &physical_rs);
5718
5719 iter_rs = remain_rs;
5720 }
5721 }
5722
5723 static char *
vdev_name(vdev_t * vd,char * buf,int buflen)5724 vdev_name(vdev_t *vd, char *buf, int buflen)
5725 {
5726 if (vd->vdev_path == NULL) {
5727 if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) {
5728 strlcpy(buf, vd->vdev_spa->spa_name, buflen);
5729 } else if (!vd->vdev_ops->vdev_op_leaf) {
5730 snprintf(buf, buflen, "%s-%llu",
5731 vd->vdev_ops->vdev_op_type,
5732 (u_longlong_t)vd->vdev_id);
5733 }
5734 } else {
5735 strlcpy(buf, vd->vdev_path, buflen);
5736 }
5737 return (buf);
5738 }
5739
5740 /*
5741 * Look at the vdev tree and determine whether any devices are currently being
5742 * replaced.
5743 */
5744 boolean_t
vdev_replace_in_progress(vdev_t * vdev)5745 vdev_replace_in_progress(vdev_t *vdev)
5746 {
5747 ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
5748
5749 if (vdev->vdev_ops == &vdev_replacing_ops)
5750 return (B_TRUE);
5751
5752 /*
5753 * A 'spare' vdev indicates that we have a replace in progress, unless
5754 * it has exactly two children, and the second, the hot spare, has
5755 * finished being resilvered.
5756 */
5757 if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
5758 !vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
5759 return (B_TRUE);
5760
5761 for (int i = 0; i < vdev->vdev_children; i++) {
5762 if (vdev_replace_in_progress(vdev->vdev_child[i]))
5763 return (B_TRUE);
5764 }
5765
5766 return (B_FALSE);
5767 }
5768
5769 /*
5770 * Add a (source=src, propname=propval) list to an nvlist.
5771 */
5772 static void
vdev_prop_add_list(nvlist_t * nvl,const char * propname,const char * strval,uint64_t intval,zprop_source_t src)5773 vdev_prop_add_list(nvlist_t *nvl, const char *propname, const char *strval,
5774 uint64_t intval, zprop_source_t src)
5775 {
5776 nvlist_t *propval;
5777
5778 propval = fnvlist_alloc();
5779 fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
5780
5781 if (strval != NULL)
5782 fnvlist_add_string(propval, ZPROP_VALUE, strval);
5783 else
5784 fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
5785
5786 fnvlist_add_nvlist(nvl, propname, propval);
5787 nvlist_free(propval);
5788 }
5789
5790 static void
vdev_props_set_sync(void * arg,dmu_tx_t * tx)5791 vdev_props_set_sync(void *arg, dmu_tx_t *tx)
5792 {
5793 vdev_t *vd;
5794 nvlist_t *nvp = arg;
5795 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
5796 objset_t *mos = spa->spa_meta_objset;
5797 nvpair_t *elem = NULL;
5798 uint64_t vdev_guid;
5799 uint64_t objid;
5800 nvlist_t *nvprops;
5801
5802 vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV);
5803 nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS);
5804 vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
5805
5806 /* this vdev could get removed while waiting for this sync task */
5807 if (vd == NULL)
5808 return;
5809
5810 /*
5811 * Set vdev property values in the vdev props mos object.
5812 */
5813 if (vd->vdev_root_zap != 0) {
5814 objid = vd->vdev_root_zap;
5815 } else if (vd->vdev_top_zap != 0) {
5816 objid = vd->vdev_top_zap;
5817 } else if (vd->vdev_leaf_zap != 0) {
5818 objid = vd->vdev_leaf_zap;
5819 } else {
5820 panic("unexpected vdev type");
5821 }
5822
5823 mutex_enter(&spa->spa_props_lock);
5824
5825 while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
5826 uint64_t intval;
5827 const char *strval;
5828 vdev_prop_t prop;
5829 const char *propname = nvpair_name(elem);
5830 zprop_type_t proptype;
5831
5832 switch (prop = vdev_name_to_prop(propname)) {
5833 case VDEV_PROP_USERPROP:
5834 if (vdev_prop_user(propname)) {
5835 strval = fnvpair_value_string(elem);
5836 if (strlen(strval) == 0) {
5837 /* remove the property if value == "" */
5838 (void) zap_remove(mos, objid, propname,
5839 tx);
5840 } else {
5841 VERIFY0(zap_update(mos, objid, propname,
5842 1, strlen(strval) + 1, strval, tx));
5843 }
5844 spa_history_log_internal(spa, "vdev set", tx,
5845 "vdev_guid=%llu: %s=%s",
5846 (u_longlong_t)vdev_guid, nvpair_name(elem),
5847 strval);
5848 }
5849 break;
5850 default:
5851 /* normalize the property name */
5852 propname = vdev_prop_to_name(prop);
5853 proptype = vdev_prop_get_type(prop);
5854
5855 if (nvpair_type(elem) == DATA_TYPE_STRING) {
5856 ASSERT(proptype == PROP_TYPE_STRING);
5857 strval = fnvpair_value_string(elem);
5858 VERIFY0(zap_update(mos, objid, propname,
5859 1, strlen(strval) + 1, strval, tx));
5860 spa_history_log_internal(spa, "vdev set", tx,
5861 "vdev_guid=%llu: %s=%s",
5862 (u_longlong_t)vdev_guid, nvpair_name(elem),
5863 strval);
5864 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
5865 intval = fnvpair_value_uint64(elem);
5866
5867 if (proptype == PROP_TYPE_INDEX) {
5868 const char *unused;
5869 VERIFY0(vdev_prop_index_to_string(
5870 prop, intval, &unused));
5871 }
5872 VERIFY0(zap_update(mos, objid, propname,
5873 sizeof (uint64_t), 1, &intval, tx));
5874 spa_history_log_internal(spa, "vdev set", tx,
5875 "vdev_guid=%llu: %s=%lld",
5876 (u_longlong_t)vdev_guid,
5877 nvpair_name(elem), (longlong_t)intval);
5878 } else {
5879 panic("invalid vdev property type %u",
5880 nvpair_type(elem));
5881 }
5882 }
5883
5884 }
5885
5886 mutex_exit(&spa->spa_props_lock);
5887 }
5888
5889 int
vdev_prop_set(vdev_t * vd,nvlist_t * innvl,nvlist_t * outnvl)5890 vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
5891 {
5892 spa_t *spa = vd->vdev_spa;
5893 nvpair_t *elem = NULL;
5894 uint64_t vdev_guid;
5895 nvlist_t *nvprops;
5896 int error = 0;
5897
5898 ASSERT(vd != NULL);
5899
5900 /* Check that vdev has a zap we can use */
5901 if (vd->vdev_root_zap == 0 &&
5902 vd->vdev_top_zap == 0 &&
5903 vd->vdev_leaf_zap == 0)
5904 return (SET_ERROR(EINVAL));
5905
5906 if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
5907 &vdev_guid) != 0)
5908 return (SET_ERROR(EINVAL));
5909
5910 if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS,
5911 &nvprops) != 0)
5912 return (SET_ERROR(EINVAL));
5913
5914 if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
5915 return (SET_ERROR(EINVAL));
5916
5917 while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
5918 const char *propname = nvpair_name(elem);
5919 vdev_prop_t prop = vdev_name_to_prop(propname);
5920 uint64_t intval = 0;
5921 const char *strval = NULL;
5922
5923 if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) {
5924 error = EINVAL;
5925 goto end;
5926 }
5927
5928 if (vdev_prop_readonly(prop)) {
5929 error = EROFS;
5930 goto end;
5931 }
5932
5933 /* Special Processing */
5934 switch (prop) {
5935 case VDEV_PROP_PATH:
5936 if (vd->vdev_path == NULL) {
5937 error = EROFS;
5938 break;
5939 }
5940 if (nvpair_value_string(elem, &strval) != 0) {
5941 error = EINVAL;
5942 break;
5943 }
5944 /* New path must start with /dev/ */
5945 if (strncmp(strval, "/dev/", 5)) {
5946 error = EINVAL;
5947 break;
5948 }
5949 error = spa_vdev_setpath(spa, vdev_guid, strval);
5950 break;
5951 case VDEV_PROP_ALLOCATING:
5952 if (nvpair_value_uint64(elem, &intval) != 0) {
5953 error = EINVAL;
5954 break;
5955 }
5956 if (intval != vd->vdev_noalloc)
5957 break;
5958 if (intval == 0)
5959 error = spa_vdev_noalloc(spa, vdev_guid);
5960 else
5961 error = spa_vdev_alloc(spa, vdev_guid);
5962 break;
5963 case VDEV_PROP_FAILFAST:
5964 if (nvpair_value_uint64(elem, &intval) != 0) {
5965 error = EINVAL;
5966 break;
5967 }
5968 vd->vdev_failfast = intval & 1;
5969 break;
5970 case VDEV_PROP_CHECKSUM_N:
5971 if (nvpair_value_uint64(elem, &intval) != 0) {
5972 error = EINVAL;
5973 break;
5974 }
5975 vd->vdev_checksum_n = intval;
5976 break;
5977 case VDEV_PROP_CHECKSUM_T:
5978 if (nvpair_value_uint64(elem, &intval) != 0) {
5979 error = EINVAL;
5980 break;
5981 }
5982 vd->vdev_checksum_t = intval;
5983 break;
5984 case VDEV_PROP_IO_N:
5985 if (nvpair_value_uint64(elem, &intval) != 0) {
5986 error = EINVAL;
5987 break;
5988 }
5989 vd->vdev_io_n = intval;
5990 break;
5991 case VDEV_PROP_IO_T:
5992 if (nvpair_value_uint64(elem, &intval) != 0) {
5993 error = EINVAL;
5994 break;
5995 }
5996 vd->vdev_io_t = intval;
5997 break;
5998 case VDEV_PROP_SLOW_IO_N:
5999 if (nvpair_value_uint64(elem, &intval) != 0) {
6000 error = EINVAL;
6001 break;
6002 }
6003 vd->vdev_slow_io_n = intval;
6004 break;
6005 case VDEV_PROP_SLOW_IO_T:
6006 if (nvpair_value_uint64(elem, &intval) != 0) {
6007 error = EINVAL;
6008 break;
6009 }
6010 vd->vdev_slow_io_t = intval;
6011 break;
6012 default:
6013 /* Most processing is done in vdev_props_set_sync */
6014 break;
6015 }
6016 end:
6017 if (error != 0) {
6018 intval = error;
6019 vdev_prop_add_list(outnvl, propname, strval, intval, 0);
6020 return (error);
6021 }
6022 }
6023
6024 return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
6025 innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED));
6026 }
6027
6028 int
vdev_prop_get(vdev_t * vd,nvlist_t * innvl,nvlist_t * outnvl)6029 vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
6030 {
6031 spa_t *spa = vd->vdev_spa;
6032 objset_t *mos = spa->spa_meta_objset;
6033 int err = 0;
6034 uint64_t objid;
6035 uint64_t vdev_guid;
6036 nvpair_t *elem = NULL;
6037 nvlist_t *nvprops = NULL;
6038 uint64_t intval = 0;
6039 char *strval = NULL;
6040 const char *propname = NULL;
6041 vdev_prop_t prop;
6042
6043 ASSERT(vd != NULL);
6044 ASSERT(mos != NULL);
6045
6046 if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
6047 &vdev_guid) != 0)
6048 return (SET_ERROR(EINVAL));
6049
6050 nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops);
6051
6052 if (vd->vdev_root_zap != 0) {
6053 objid = vd->vdev_root_zap;
6054 } else if (vd->vdev_top_zap != 0) {
6055 objid = vd->vdev_top_zap;
6056 } else if (vd->vdev_leaf_zap != 0) {
6057 objid = vd->vdev_leaf_zap;
6058 } else {
6059 return (SET_ERROR(EINVAL));
6060 }
6061 ASSERT(objid != 0);
6062
6063 mutex_enter(&spa->spa_props_lock);
6064
6065 if (nvprops != NULL) {
6066 char namebuf[64] = { 0 };
6067
6068 while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
6069 intval = 0;
6070 strval = NULL;
6071 propname = nvpair_name(elem);
6072 prop = vdev_name_to_prop(propname);
6073 zprop_source_t src = ZPROP_SRC_DEFAULT;
6074 uint64_t integer_size, num_integers;
6075
6076 switch (prop) {
6077 /* Special Read-only Properties */
6078 case VDEV_PROP_NAME:
6079 strval = vdev_name(vd, namebuf,
6080 sizeof (namebuf));
6081 if (strval == NULL)
6082 continue;
6083 vdev_prop_add_list(outnvl, propname, strval, 0,
6084 ZPROP_SRC_NONE);
6085 continue;
6086 case VDEV_PROP_CAPACITY:
6087 /* percent used */
6088 intval = (vd->vdev_stat.vs_dspace == 0) ? 0 :
6089 (vd->vdev_stat.vs_alloc * 100 /
6090 vd->vdev_stat.vs_dspace);
6091 vdev_prop_add_list(outnvl, propname, NULL,
6092 intval, ZPROP_SRC_NONE);
6093 continue;
6094 case VDEV_PROP_STATE:
6095 vdev_prop_add_list(outnvl, propname, NULL,
6096 vd->vdev_state, ZPROP_SRC_NONE);
6097 continue;
6098 case VDEV_PROP_GUID:
6099 vdev_prop_add_list(outnvl, propname, NULL,
6100 vd->vdev_guid, ZPROP_SRC_NONE);
6101 continue;
6102 case VDEV_PROP_ASIZE:
6103 vdev_prop_add_list(outnvl, propname, NULL,
6104 vd->vdev_asize, ZPROP_SRC_NONE);
6105 continue;
6106 case VDEV_PROP_PSIZE:
6107 vdev_prop_add_list(outnvl, propname, NULL,
6108 vd->vdev_psize, ZPROP_SRC_NONE);
6109 continue;
6110 case VDEV_PROP_ASHIFT:
6111 vdev_prop_add_list(outnvl, propname, NULL,
6112 vd->vdev_ashift, ZPROP_SRC_NONE);
6113 continue;
6114 case VDEV_PROP_SIZE:
6115 vdev_prop_add_list(outnvl, propname, NULL,
6116 vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE);
6117 continue;
6118 case VDEV_PROP_FREE:
6119 vdev_prop_add_list(outnvl, propname, NULL,
6120 vd->vdev_stat.vs_dspace -
6121 vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
6122 continue;
6123 case VDEV_PROP_ALLOCATED:
6124 vdev_prop_add_list(outnvl, propname, NULL,
6125 vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
6126 continue;
6127 case VDEV_PROP_EXPANDSZ:
6128 vdev_prop_add_list(outnvl, propname, NULL,
6129 vd->vdev_stat.vs_esize, ZPROP_SRC_NONE);
6130 continue;
6131 case VDEV_PROP_FRAGMENTATION:
6132 vdev_prop_add_list(outnvl, propname, NULL,
6133 vd->vdev_stat.vs_fragmentation,
6134 ZPROP_SRC_NONE);
6135 continue;
6136 case VDEV_PROP_PARITY:
6137 vdev_prop_add_list(outnvl, propname, NULL,
6138 vdev_get_nparity(vd), ZPROP_SRC_NONE);
6139 continue;
6140 case VDEV_PROP_PATH:
6141 if (vd->vdev_path == NULL)
6142 continue;
6143 vdev_prop_add_list(outnvl, propname,
6144 vd->vdev_path, 0, ZPROP_SRC_NONE);
6145 continue;
6146 case VDEV_PROP_DEVID:
6147 if (vd->vdev_devid == NULL)
6148 continue;
6149 vdev_prop_add_list(outnvl, propname,
6150 vd->vdev_devid, 0, ZPROP_SRC_NONE);
6151 continue;
6152 case VDEV_PROP_PHYS_PATH:
6153 if (vd->vdev_physpath == NULL)
6154 continue;
6155 vdev_prop_add_list(outnvl, propname,
6156 vd->vdev_physpath, 0, ZPROP_SRC_NONE);
6157 continue;
6158 case VDEV_PROP_ENC_PATH:
6159 if (vd->vdev_enc_sysfs_path == NULL)
6160 continue;
6161 vdev_prop_add_list(outnvl, propname,
6162 vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE);
6163 continue;
6164 case VDEV_PROP_FRU:
6165 if (vd->vdev_fru == NULL)
6166 continue;
6167 vdev_prop_add_list(outnvl, propname,
6168 vd->vdev_fru, 0, ZPROP_SRC_NONE);
6169 continue;
6170 case VDEV_PROP_PARENT:
6171 if (vd->vdev_parent != NULL) {
6172 strval = vdev_name(vd->vdev_parent,
6173 namebuf, sizeof (namebuf));
6174 vdev_prop_add_list(outnvl, propname,
6175 strval, 0, ZPROP_SRC_NONE);
6176 }
6177 continue;
6178 case VDEV_PROP_CHILDREN:
6179 if (vd->vdev_children > 0)
6180 strval = kmem_zalloc(ZAP_MAXVALUELEN,
6181 KM_SLEEP);
6182 for (uint64_t i = 0; i < vd->vdev_children;
6183 i++) {
6184 const char *vname;
6185
6186 vname = vdev_name(vd->vdev_child[i],
6187 namebuf, sizeof (namebuf));
6188 if (vname == NULL)
6189 vname = "(unknown)";
6190 if (strlen(strval) > 0)
6191 strlcat(strval, ",",
6192 ZAP_MAXVALUELEN);
6193 strlcat(strval, vname, ZAP_MAXVALUELEN);
6194 }
6195 if (strval != NULL) {
6196 vdev_prop_add_list(outnvl, propname,
6197 strval, 0, ZPROP_SRC_NONE);
6198 kmem_free(strval, ZAP_MAXVALUELEN);
6199 }
6200 continue;
6201 case VDEV_PROP_NUMCHILDREN:
6202 vdev_prop_add_list(outnvl, propname, NULL,
6203 vd->vdev_children, ZPROP_SRC_NONE);
6204 continue;
6205 case VDEV_PROP_READ_ERRORS:
6206 vdev_prop_add_list(outnvl, propname, NULL,
6207 vd->vdev_stat.vs_read_errors,
6208 ZPROP_SRC_NONE);
6209 continue;
6210 case VDEV_PROP_WRITE_ERRORS:
6211 vdev_prop_add_list(outnvl, propname, NULL,
6212 vd->vdev_stat.vs_write_errors,
6213 ZPROP_SRC_NONE);
6214 continue;
6215 case VDEV_PROP_CHECKSUM_ERRORS:
6216 vdev_prop_add_list(outnvl, propname, NULL,
6217 vd->vdev_stat.vs_checksum_errors,
6218 ZPROP_SRC_NONE);
6219 continue;
6220 case VDEV_PROP_INITIALIZE_ERRORS:
6221 vdev_prop_add_list(outnvl, propname, NULL,
6222 vd->vdev_stat.vs_initialize_errors,
6223 ZPROP_SRC_NONE);
6224 continue;
6225 case VDEV_PROP_OPS_NULL:
6226 vdev_prop_add_list(outnvl, propname, NULL,
6227 vd->vdev_stat.vs_ops[ZIO_TYPE_NULL],
6228 ZPROP_SRC_NONE);
6229 continue;
6230 case VDEV_PROP_OPS_READ:
6231 vdev_prop_add_list(outnvl, propname, NULL,
6232 vd->vdev_stat.vs_ops[ZIO_TYPE_READ],
6233 ZPROP_SRC_NONE);
6234 continue;
6235 case VDEV_PROP_OPS_WRITE:
6236 vdev_prop_add_list(outnvl, propname, NULL,
6237 vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE],
6238 ZPROP_SRC_NONE);
6239 continue;
6240 case VDEV_PROP_OPS_FREE:
6241 vdev_prop_add_list(outnvl, propname, NULL,
6242 vd->vdev_stat.vs_ops[ZIO_TYPE_FREE],
6243 ZPROP_SRC_NONE);
6244 continue;
6245 case VDEV_PROP_OPS_CLAIM:
6246 vdev_prop_add_list(outnvl, propname, NULL,
6247 vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM],
6248 ZPROP_SRC_NONE);
6249 continue;
6250 case VDEV_PROP_OPS_TRIM:
6251 /*
6252 * TRIM ops and bytes are reported to user
6253 * space as ZIO_TYPE_FLUSH. This is done to
6254 * preserve the vdev_stat_t structure layout
6255 * for user space.
6256 */
6257 vdev_prop_add_list(outnvl, propname, NULL,
6258 vd->vdev_stat.vs_ops[ZIO_TYPE_FLUSH],
6259 ZPROP_SRC_NONE);
6260 continue;
6261 case VDEV_PROP_BYTES_NULL:
6262 vdev_prop_add_list(outnvl, propname, NULL,
6263 vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL],
6264 ZPROP_SRC_NONE);
6265 continue;
6266 case VDEV_PROP_BYTES_READ:
6267 vdev_prop_add_list(outnvl, propname, NULL,
6268 vd->vdev_stat.vs_bytes[ZIO_TYPE_READ],
6269 ZPROP_SRC_NONE);
6270 continue;
6271 case VDEV_PROP_BYTES_WRITE:
6272 vdev_prop_add_list(outnvl, propname, NULL,
6273 vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE],
6274 ZPROP_SRC_NONE);
6275 continue;
6276 case VDEV_PROP_BYTES_FREE:
6277 vdev_prop_add_list(outnvl, propname, NULL,
6278 vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE],
6279 ZPROP_SRC_NONE);
6280 continue;
6281 case VDEV_PROP_BYTES_CLAIM:
6282 vdev_prop_add_list(outnvl, propname, NULL,
6283 vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM],
6284 ZPROP_SRC_NONE);
6285 continue;
6286 case VDEV_PROP_BYTES_TRIM:
6287 /*
6288 * TRIM ops and bytes are reported to user
6289 * space as ZIO_TYPE_FLUSH. This is done to
6290 * preserve the vdev_stat_t structure layout
6291 * for user space.
6292 */
6293 vdev_prop_add_list(outnvl, propname, NULL,
6294 vd->vdev_stat.vs_bytes[ZIO_TYPE_FLUSH],
6295 ZPROP_SRC_NONE);
6296 continue;
6297 case VDEV_PROP_REMOVING:
6298 vdev_prop_add_list(outnvl, propname, NULL,
6299 vd->vdev_removing, ZPROP_SRC_NONE);
6300 continue;
6301 case VDEV_PROP_RAIDZ_EXPANDING:
6302 /* Only expose this for raidz */
6303 if (vd->vdev_ops == &vdev_raidz_ops) {
6304 vdev_prop_add_list(outnvl, propname,
6305 NULL, vd->vdev_rz_expanding,
6306 ZPROP_SRC_NONE);
6307 }
6308 continue;
6309 /* Numeric Properites */
6310 case VDEV_PROP_ALLOCATING:
6311 /* Leaf vdevs cannot have this property */
6312 if (vd->vdev_mg == NULL &&
6313 vd->vdev_top != NULL) {
6314 src = ZPROP_SRC_NONE;
6315 intval = ZPROP_BOOLEAN_NA;
6316 } else {
6317 err = vdev_prop_get_int(vd, prop,
6318 &intval);
6319 if (err && err != ENOENT)
6320 break;
6321
6322 if (intval ==
6323 vdev_prop_default_numeric(prop))
6324 src = ZPROP_SRC_DEFAULT;
6325 else
6326 src = ZPROP_SRC_LOCAL;
6327 }
6328
6329 vdev_prop_add_list(outnvl, propname, NULL,
6330 intval, src);
6331 break;
6332 case VDEV_PROP_FAILFAST:
6333 src = ZPROP_SRC_LOCAL;
6334 strval = NULL;
6335
6336 err = zap_lookup(mos, objid, nvpair_name(elem),
6337 sizeof (uint64_t), 1, &intval);
6338 if (err == ENOENT) {
6339 intval = vdev_prop_default_numeric(
6340 prop);
6341 err = 0;
6342 } else if (err) {
6343 break;
6344 }
6345 if (intval == vdev_prop_default_numeric(prop))
6346 src = ZPROP_SRC_DEFAULT;
6347
6348 vdev_prop_add_list(outnvl, propname, strval,
6349 intval, src);
6350 break;
6351 case VDEV_PROP_CHECKSUM_N:
6352 case VDEV_PROP_CHECKSUM_T:
6353 case VDEV_PROP_IO_N:
6354 case VDEV_PROP_IO_T:
6355 case VDEV_PROP_SLOW_IO_N:
6356 case VDEV_PROP_SLOW_IO_T:
6357 err = vdev_prop_get_int(vd, prop, &intval);
6358 if (err && err != ENOENT)
6359 break;
6360
6361 if (intval == vdev_prop_default_numeric(prop))
6362 src = ZPROP_SRC_DEFAULT;
6363 else
6364 src = ZPROP_SRC_LOCAL;
6365
6366 vdev_prop_add_list(outnvl, propname, NULL,
6367 intval, src);
6368 break;
6369 /* Text Properties */
6370 case VDEV_PROP_COMMENT:
6371 /* Exists in the ZAP below */
6372 /* FALLTHRU */
6373 case VDEV_PROP_USERPROP:
6374 /* User Properites */
6375 src = ZPROP_SRC_LOCAL;
6376
6377 err = zap_length(mos, objid, nvpair_name(elem),
6378 &integer_size, &num_integers);
6379 if (err)
6380 break;
6381
6382 switch (integer_size) {
6383 case 8:
6384 /* User properties cannot be integers */
6385 err = EINVAL;
6386 break;
6387 case 1:
6388 /* string property */
6389 strval = kmem_alloc(num_integers,
6390 KM_SLEEP);
6391 err = zap_lookup(mos, objid,
6392 nvpair_name(elem), 1,
6393 num_integers, strval);
6394 if (err) {
6395 kmem_free(strval,
6396 num_integers);
6397 break;
6398 }
6399 vdev_prop_add_list(outnvl, propname,
6400 strval, 0, src);
6401 kmem_free(strval, num_integers);
6402 break;
6403 }
6404 break;
6405 default:
6406 err = ENOENT;
6407 break;
6408 }
6409 if (err)
6410 break;
6411 }
6412 } else {
6413 /*
6414 * Get all properties from the MOS vdev property object.
6415 */
6416 zap_cursor_t zc;
6417 zap_attribute_t za;
6418 for (zap_cursor_init(&zc, mos, objid);
6419 (err = zap_cursor_retrieve(&zc, &za)) == 0;
6420 zap_cursor_advance(&zc)) {
6421 intval = 0;
6422 strval = NULL;
6423 zprop_source_t src = ZPROP_SRC_DEFAULT;
6424 propname = za.za_name;
6425
6426 switch (za.za_integer_length) {
6427 case 8:
6428 /* We do not allow integer user properties */
6429 /* This is likely an internal value */
6430 break;
6431 case 1:
6432 /* string property */
6433 strval = kmem_alloc(za.za_num_integers,
6434 KM_SLEEP);
6435 err = zap_lookup(mos, objid, za.za_name, 1,
6436 za.za_num_integers, strval);
6437 if (err) {
6438 kmem_free(strval, za.za_num_integers);
6439 break;
6440 }
6441 vdev_prop_add_list(outnvl, propname, strval, 0,
6442 src);
6443 kmem_free(strval, za.za_num_integers);
6444 break;
6445
6446 default:
6447 break;
6448 }
6449 }
6450 zap_cursor_fini(&zc);
6451 }
6452
6453 mutex_exit(&spa->spa_props_lock);
6454 if (err && err != ENOENT) {
6455 return (err);
6456 }
6457
6458 return (0);
6459 }
6460
6461 EXPORT_SYMBOL(vdev_fault);
6462 EXPORT_SYMBOL(vdev_degrade);
6463 EXPORT_SYMBOL(vdev_online);
6464 EXPORT_SYMBOL(vdev_offline);
6465 EXPORT_SYMBOL(vdev_clear);
6466
6467 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW,
6468 "Target number of metaslabs per top-level vdev");
6469
6470 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW,
6471 "Default lower limit for metaslab size");
6472
6473 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_ms_shift, UINT, ZMOD_RW,
6474 "Default upper limit for metaslab size");
6475
6476 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW,
6477 "Minimum number of metaslabs per top-level vdev");
6478
6479 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW,
6480 "Practical upper limit of total metaslabs per top-level vdev");
6481
6482 ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
6483 "Rate limit slow IO (delay) events to this many per second");
6484
6485 ZFS_MODULE_PARAM(zfs, zfs_, deadman_events_per_second, UINT, ZMOD_RW,
6486 "Rate limit hung IO (deadman) events to this many per second");
6487
6488 /* BEGIN CSTYLED */
6489 ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
6490 "Rate limit checksum events to this many checksum errors per second "
6491 "(do not set below ZED threshold).");
6492 /* END CSTYLED */
6493
6494 ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
6495 "Ignore errors during resilver/scrub");
6496
6497 ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
6498 "Bypass vdev_validate()");
6499
6500 ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
6501 "Disable cache flushes");
6502
6503 ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
6504 "Minimum number of metaslabs required to dedicate one for log blocks");
6505
6506 /* BEGIN CSTYLED */
6507 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
6508 param_set_min_auto_ashift, param_get_uint, ZMOD_RW,
6509 "Minimum ashift used when creating new top-level vdevs");
6510
6511 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
6512 param_set_max_auto_ashift, param_get_uint, ZMOD_RW,
6513 "Maximum ashift used when optimizing for logical -> physical sector "
6514 "size on new top-level vdevs");
6515 /* END CSTYLED */
6516