1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/fm/fs/zfs.h> 29 #include <sys/spa.h> 30 #include <sys/spa_impl.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/vdev_impl.h> 34 #include <sys/uberblock_impl.h> 35 #include <sys/metaslab.h> 36 #include <sys/metaslab_impl.h> 37 #include <sys/space_map.h> 38 #include <sys/zio.h> 39 #include <sys/zap.h> 40 #include <sys/fs/zfs.h> 41 #include <sys/arc.h> 42 #include <sys/zil.h> 43 44 /* 45 * Virtual device management. 46 */ 47 48 static vdev_ops_t *vdev_ops_table[] = { 49 &vdev_root_ops, 50 &vdev_raidz_ops, 51 &vdev_mirror_ops, 52 &vdev_replacing_ops, 53 &vdev_spare_ops, 54 &vdev_disk_ops, 55 &vdev_file_ops, 56 &vdev_missing_ops, 57 &vdev_hole_ops, 58 NULL 59 }; 60 61 /* maximum scrub/resilver I/O queue per leaf vdev */ 62 int zfs_scrub_limit = 10; 63 64 /* 65 * Given a vdev type, return the appropriate ops vector. 66 */ 67 static vdev_ops_t * 68 vdev_getops(const char *type) 69 { 70 vdev_ops_t *ops, **opspp; 71 72 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 73 if (strcmp(ops->vdev_op_type, type) == 0) 74 break; 75 76 return (ops); 77 } 78 79 /* 80 * Default asize function: return the MAX of psize with the asize of 81 * all children. This is what's used by anything other than RAID-Z. 82 */ 83 uint64_t 84 vdev_default_asize(vdev_t *vd, uint64_t psize) 85 { 86 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 87 uint64_t csize; 88 89 for (int c = 0; c < vd->vdev_children; c++) { 90 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 91 asize = MAX(asize, csize); 92 } 93 94 return (asize); 95 } 96 97 /* 98 * Get the minimum allocatable size. We define the allocatable size as 99 * the vdev's asize rounded to the nearest metaslab. This allows us to 100 * replace or attach devices which don't have the same physical size but 101 * can still satisfy the same number of allocations. 102 */ 103 uint64_t 104 vdev_get_min_asize(vdev_t *vd) 105 { 106 vdev_t *pvd = vd->vdev_parent; 107 108 /* 109 * The our parent is NULL (inactive spare or cache) or is the root, 110 * just return our own asize. 111 */ 112 if (pvd == NULL) 113 return (vd->vdev_asize); 114 115 /* 116 * The top-level vdev just returns the allocatable size rounded 117 * to the nearest metaslab. 118 */ 119 if (vd == vd->vdev_top) 120 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 121 122 /* 123 * The allocatable space for a raidz vdev is N * sizeof(smallest child), 124 * so each child must provide at least 1/Nth of its asize. 125 */ 126 if (pvd->vdev_ops == &vdev_raidz_ops) 127 return (pvd->vdev_min_asize / pvd->vdev_children); 128 129 return (pvd->vdev_min_asize); 130 } 131 132 void 133 vdev_set_min_asize(vdev_t *vd) 134 { 135 vd->vdev_min_asize = vdev_get_min_asize(vd); 136 137 for (int c = 0; c < vd->vdev_children; c++) 138 vdev_set_min_asize(vd->vdev_child[c]); 139 } 140 141 vdev_t * 142 vdev_lookup_top(spa_t *spa, uint64_t vdev) 143 { 144 vdev_t *rvd = spa->spa_root_vdev; 145 146 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 147 148 if (vdev < rvd->vdev_children) { 149 ASSERT(rvd->vdev_child[vdev] != NULL); 150 return (rvd->vdev_child[vdev]); 151 } 152 153 return (NULL); 154 } 155 156 vdev_t * 157 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 158 { 159 vdev_t *mvd; 160 161 if (vd->vdev_guid == guid) 162 return (vd); 163 164 for (int c = 0; c < vd->vdev_children; c++) 165 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 166 NULL) 167 return (mvd); 168 169 return (NULL); 170 } 171 172 void 173 vdev_add_child(vdev_t *pvd, vdev_t *cvd) 174 { 175 size_t oldsize, newsize; 176 uint64_t id = cvd->vdev_id; 177 vdev_t **newchild; 178 179 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 180 ASSERT(cvd->vdev_parent == NULL); 181 182 cvd->vdev_parent = pvd; 183 184 if (pvd == NULL) 185 return; 186 187 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 188 189 oldsize = pvd->vdev_children * sizeof (vdev_t *); 190 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 191 newsize = pvd->vdev_children * sizeof (vdev_t *); 192 193 newchild = kmem_zalloc(newsize, KM_SLEEP); 194 if (pvd->vdev_child != NULL) { 195 bcopy(pvd->vdev_child, newchild, oldsize); 196 kmem_free(pvd->vdev_child, oldsize); 197 } 198 199 pvd->vdev_child = newchild; 200 pvd->vdev_child[id] = cvd; 201 202 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 203 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 204 205 /* 206 * Walk up all ancestors to update guid sum. 207 */ 208 for (; pvd != NULL; pvd = pvd->vdev_parent) 209 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 210 211 if (cvd->vdev_ops->vdev_op_leaf) 212 cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 213 } 214 215 void 216 vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 217 { 218 int c; 219 uint_t id = cvd->vdev_id; 220 221 ASSERT(cvd->vdev_parent == pvd); 222 223 if (pvd == NULL) 224 return; 225 226 ASSERT(id < pvd->vdev_children); 227 ASSERT(pvd->vdev_child[id] == cvd); 228 229 pvd->vdev_child[id] = NULL; 230 cvd->vdev_parent = NULL; 231 232 for (c = 0; c < pvd->vdev_children; c++) 233 if (pvd->vdev_child[c]) 234 break; 235 236 if (c == pvd->vdev_children) { 237 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 238 pvd->vdev_child = NULL; 239 pvd->vdev_children = 0; 240 } 241 242 /* 243 * Walk up all ancestors to update guid sum. 244 */ 245 for (; pvd != NULL; pvd = pvd->vdev_parent) 246 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 247 248 if (cvd->vdev_ops->vdev_op_leaf) 249 cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 250 } 251 252 /* 253 * Remove any holes in the child array. 254 */ 255 void 256 vdev_compact_children(vdev_t *pvd) 257 { 258 vdev_t **newchild, *cvd; 259 int oldc = pvd->vdev_children; 260 int newc; 261 262 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 263 264 for (int c = newc = 0; c < oldc; c++) 265 if (pvd->vdev_child[c]) 266 newc++; 267 268 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 269 270 for (int c = newc = 0; c < oldc; c++) { 271 if ((cvd = pvd->vdev_child[c]) != NULL) { 272 newchild[newc] = cvd; 273 cvd->vdev_id = newc++; 274 } 275 } 276 277 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 278 pvd->vdev_child = newchild; 279 pvd->vdev_children = newc; 280 } 281 282 /* 283 * Allocate and minimally initialize a vdev_t. 284 */ 285 vdev_t * 286 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 287 { 288 vdev_t *vd; 289 290 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 291 292 if (spa->spa_root_vdev == NULL) { 293 ASSERT(ops == &vdev_root_ops); 294 spa->spa_root_vdev = vd; 295 } 296 297 if (guid == 0 && ops != &vdev_hole_ops) { 298 if (spa->spa_root_vdev == vd) { 299 /* 300 * The root vdev's guid will also be the pool guid, 301 * which must be unique among all pools. 302 */ 303 while (guid == 0 || spa_guid_exists(guid, 0)) 304 guid = spa_get_random(-1ULL); 305 } else { 306 /* 307 * Any other vdev's guid must be unique within the pool. 308 */ 309 while (guid == 0 || 310 spa_guid_exists(spa_guid(spa), guid)) 311 guid = spa_get_random(-1ULL); 312 } 313 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 314 } 315 316 vd->vdev_spa = spa; 317 vd->vdev_id = id; 318 vd->vdev_guid = guid; 319 vd->vdev_guid_sum = guid; 320 vd->vdev_ops = ops; 321 vd->vdev_state = VDEV_STATE_CLOSED; 322 vd->vdev_ishole = (ops == &vdev_hole_ops); 323 324 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 325 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 326 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 327 for (int t = 0; t < DTL_TYPES; t++) { 328 space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 329 &vd->vdev_dtl_lock); 330 } 331 txg_list_create(&vd->vdev_ms_list, 332 offsetof(struct metaslab, ms_txg_node)); 333 txg_list_create(&vd->vdev_dtl_list, 334 offsetof(struct vdev, vdev_dtl_node)); 335 vd->vdev_stat.vs_timestamp = gethrtime(); 336 vdev_queue_init(vd); 337 vdev_cache_init(vd); 338 339 return (vd); 340 } 341 342 /* 343 * Allocate a new vdev. The 'alloctype' is used to control whether we are 344 * creating a new vdev or loading an existing one - the behavior is slightly 345 * different for each case. 346 */ 347 int 348 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 349 int alloctype) 350 { 351 vdev_ops_t *ops; 352 char *type; 353 uint64_t guid = 0, islog, nparity; 354 vdev_t *vd; 355 356 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 357 358 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 359 return (EINVAL); 360 361 if ((ops = vdev_getops(type)) == NULL) 362 return (EINVAL); 363 364 /* 365 * If this is a load, get the vdev guid from the nvlist. 366 * Otherwise, vdev_alloc_common() will generate one for us. 367 */ 368 if (alloctype == VDEV_ALLOC_LOAD) { 369 uint64_t label_id; 370 371 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 372 label_id != id) 373 return (EINVAL); 374 375 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 376 return (EINVAL); 377 } else if (alloctype == VDEV_ALLOC_SPARE) { 378 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 379 return (EINVAL); 380 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 381 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 382 return (EINVAL); 383 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 384 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 385 return (EINVAL); 386 } 387 388 /* 389 * The first allocated vdev must be of type 'root'. 390 */ 391 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 392 return (EINVAL); 393 394 /* 395 * Determine whether we're a log vdev. 396 */ 397 islog = 0; 398 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 399 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 400 return (ENOTSUP); 401 402 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 403 return (ENOTSUP); 404 405 /* 406 * Set the nparity property for RAID-Z vdevs. 407 */ 408 nparity = -1ULL; 409 if (ops == &vdev_raidz_ops) { 410 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 411 &nparity) == 0) { 412 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY) 413 return (EINVAL); 414 /* 415 * Previous versions could only support 1 or 2 parity 416 * device. 417 */ 418 if (nparity > 1 && 419 spa_version(spa) < SPA_VERSION_RAIDZ2) 420 return (ENOTSUP); 421 if (nparity > 2 && 422 spa_version(spa) < SPA_VERSION_RAIDZ3) 423 return (ENOTSUP); 424 } else { 425 /* 426 * We require the parity to be specified for SPAs that 427 * support multiple parity levels. 428 */ 429 if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 430 return (EINVAL); 431 /* 432 * Otherwise, we default to 1 parity device for RAID-Z. 433 */ 434 nparity = 1; 435 } 436 } else { 437 nparity = 0; 438 } 439 ASSERT(nparity != -1ULL); 440 441 vd = vdev_alloc_common(spa, id, guid, ops); 442 443 vd->vdev_islog = islog; 444 vd->vdev_nparity = nparity; 445 446 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 447 vd->vdev_path = spa_strdup(vd->vdev_path); 448 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 449 vd->vdev_devid = spa_strdup(vd->vdev_devid); 450 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 451 &vd->vdev_physpath) == 0) 452 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 453 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 454 vd->vdev_fru = spa_strdup(vd->vdev_fru); 455 456 /* 457 * Set the whole_disk property. If it's not specified, leave the value 458 * as -1. 459 */ 460 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 461 &vd->vdev_wholedisk) != 0) 462 vd->vdev_wholedisk = -1ULL; 463 464 /* 465 * Look for the 'not present' flag. This will only be set if the device 466 * was not present at the time of import. 467 */ 468 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 469 &vd->vdev_not_present); 470 471 /* 472 * Get the alignment requirement. 473 */ 474 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 475 476 /* 477 * Retrieve the vdev creation time. 478 */ 479 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 480 &vd->vdev_crtxg); 481 482 /* 483 * If we're a top-level vdev, try to load the allocation parameters. 484 */ 485 if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 486 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 487 &vd->vdev_ms_array); 488 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 489 &vd->vdev_ms_shift); 490 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 491 &vd->vdev_asize); 492 } 493 494 if (parent && !parent->vdev_parent) { 495 ASSERT(alloctype == VDEV_ALLOC_LOAD || 496 alloctype == VDEV_ALLOC_ADD); 497 vd->vdev_mg = metaslab_group_create(islog ? 498 spa_log_class(spa) : spa_normal_class(spa), vd); 499 } 500 501 /* 502 * If we're a leaf vdev, try to load the DTL object and other state. 503 */ 504 if (vd->vdev_ops->vdev_op_leaf && 505 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 506 alloctype == VDEV_ALLOC_ROOTPOOL)) { 507 if (alloctype == VDEV_ALLOC_LOAD) { 508 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 509 &vd->vdev_dtl_smo.smo_object); 510 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 511 &vd->vdev_unspare); 512 } 513 514 if (alloctype == VDEV_ALLOC_ROOTPOOL) { 515 uint64_t spare = 0; 516 517 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 518 &spare) == 0 && spare) 519 spa_spare_add(vd); 520 } 521 522 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 523 &vd->vdev_offline); 524 525 /* 526 * When importing a pool, we want to ignore the persistent fault 527 * state, as the diagnosis made on another system may not be 528 * valid in the current context. Local vdevs will 529 * remain in the faulted state. 530 */ 531 if (spa->spa_load_state == SPA_LOAD_OPEN) { 532 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 533 &vd->vdev_faulted); 534 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 535 &vd->vdev_degraded); 536 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 537 &vd->vdev_removed); 538 539 if (vd->vdev_faulted || vd->vdev_degraded) { 540 char *aux; 541 542 vd->vdev_label_aux = 543 VDEV_AUX_ERR_EXCEEDED; 544 if (nvlist_lookup_string(nv, 545 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && 546 strcmp(aux, "external") == 0) 547 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 548 } 549 } 550 } 551 552 /* 553 * Add ourselves to the parent's list of children. 554 */ 555 vdev_add_child(parent, vd); 556 557 *vdp = vd; 558 559 return (0); 560 } 561 562 void 563 vdev_free(vdev_t *vd) 564 { 565 spa_t *spa = vd->vdev_spa; 566 567 /* 568 * vdev_free() implies closing the vdev first. This is simpler than 569 * trying to ensure complicated semantics for all callers. 570 */ 571 vdev_close(vd); 572 573 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 574 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 575 576 /* 577 * Free all children. 578 */ 579 for (int c = 0; c < vd->vdev_children; c++) 580 vdev_free(vd->vdev_child[c]); 581 582 ASSERT(vd->vdev_child == NULL); 583 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 584 585 /* 586 * Discard allocation state. 587 */ 588 if (vd->vdev_mg != NULL) { 589 vdev_metaslab_fini(vd); 590 metaslab_group_destroy(vd->vdev_mg); 591 } 592 593 ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 594 ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 595 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 596 597 /* 598 * Remove this vdev from its parent's child list. 599 */ 600 vdev_remove_child(vd->vdev_parent, vd); 601 602 ASSERT(vd->vdev_parent == NULL); 603 604 /* 605 * Clean up vdev structure. 606 */ 607 vdev_queue_fini(vd); 608 vdev_cache_fini(vd); 609 610 if (vd->vdev_path) 611 spa_strfree(vd->vdev_path); 612 if (vd->vdev_devid) 613 spa_strfree(vd->vdev_devid); 614 if (vd->vdev_physpath) 615 spa_strfree(vd->vdev_physpath); 616 if (vd->vdev_fru) 617 spa_strfree(vd->vdev_fru); 618 619 if (vd->vdev_isspare) 620 spa_spare_remove(vd); 621 if (vd->vdev_isl2cache) 622 spa_l2cache_remove(vd); 623 624 txg_list_destroy(&vd->vdev_ms_list); 625 txg_list_destroy(&vd->vdev_dtl_list); 626 627 mutex_enter(&vd->vdev_dtl_lock); 628 for (int t = 0; t < DTL_TYPES; t++) { 629 space_map_unload(&vd->vdev_dtl[t]); 630 space_map_destroy(&vd->vdev_dtl[t]); 631 } 632 mutex_exit(&vd->vdev_dtl_lock); 633 634 mutex_destroy(&vd->vdev_dtl_lock); 635 mutex_destroy(&vd->vdev_stat_lock); 636 mutex_destroy(&vd->vdev_probe_lock); 637 638 if (vd == spa->spa_root_vdev) 639 spa->spa_root_vdev = NULL; 640 641 kmem_free(vd, sizeof (vdev_t)); 642 } 643 644 /* 645 * Transfer top-level vdev state from svd to tvd. 646 */ 647 static void 648 vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 649 { 650 spa_t *spa = svd->vdev_spa; 651 metaslab_t *msp; 652 vdev_t *vd; 653 int t; 654 655 ASSERT(tvd == tvd->vdev_top); 656 657 tvd->vdev_ms_array = svd->vdev_ms_array; 658 tvd->vdev_ms_shift = svd->vdev_ms_shift; 659 tvd->vdev_ms_count = svd->vdev_ms_count; 660 661 svd->vdev_ms_array = 0; 662 svd->vdev_ms_shift = 0; 663 svd->vdev_ms_count = 0; 664 665 tvd->vdev_mg = svd->vdev_mg; 666 tvd->vdev_ms = svd->vdev_ms; 667 668 svd->vdev_mg = NULL; 669 svd->vdev_ms = NULL; 670 671 if (tvd->vdev_mg != NULL) 672 tvd->vdev_mg->mg_vd = tvd; 673 674 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 675 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 676 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 677 678 svd->vdev_stat.vs_alloc = 0; 679 svd->vdev_stat.vs_space = 0; 680 svd->vdev_stat.vs_dspace = 0; 681 682 for (t = 0; t < TXG_SIZE; t++) { 683 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 684 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 685 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 686 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 687 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 688 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 689 } 690 691 if (list_link_active(&svd->vdev_config_dirty_node)) { 692 vdev_config_clean(svd); 693 vdev_config_dirty(tvd); 694 } 695 696 if (list_link_active(&svd->vdev_state_dirty_node)) { 697 vdev_state_clean(svd); 698 vdev_state_dirty(tvd); 699 } 700 701 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 702 svd->vdev_deflate_ratio = 0; 703 704 tvd->vdev_islog = svd->vdev_islog; 705 svd->vdev_islog = 0; 706 } 707 708 static void 709 vdev_top_update(vdev_t *tvd, vdev_t *vd) 710 { 711 if (vd == NULL) 712 return; 713 714 vd->vdev_top = tvd; 715 716 for (int c = 0; c < vd->vdev_children; c++) 717 vdev_top_update(tvd, vd->vdev_child[c]); 718 } 719 720 /* 721 * Add a mirror/replacing vdev above an existing vdev. 722 */ 723 vdev_t * 724 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 725 { 726 spa_t *spa = cvd->vdev_spa; 727 vdev_t *pvd = cvd->vdev_parent; 728 vdev_t *mvd; 729 730 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 731 732 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 733 734 mvd->vdev_asize = cvd->vdev_asize; 735 mvd->vdev_min_asize = cvd->vdev_min_asize; 736 mvd->vdev_ashift = cvd->vdev_ashift; 737 mvd->vdev_state = cvd->vdev_state; 738 mvd->vdev_crtxg = cvd->vdev_crtxg; 739 740 vdev_remove_child(pvd, cvd); 741 vdev_add_child(pvd, mvd); 742 cvd->vdev_id = mvd->vdev_children; 743 vdev_add_child(mvd, cvd); 744 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 745 746 if (mvd == mvd->vdev_top) 747 vdev_top_transfer(cvd, mvd); 748 749 return (mvd); 750 } 751 752 /* 753 * Remove a 1-way mirror/replacing vdev from the tree. 754 */ 755 void 756 vdev_remove_parent(vdev_t *cvd) 757 { 758 vdev_t *mvd = cvd->vdev_parent; 759 vdev_t *pvd = mvd->vdev_parent; 760 761 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 762 763 ASSERT(mvd->vdev_children == 1); 764 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 765 mvd->vdev_ops == &vdev_replacing_ops || 766 mvd->vdev_ops == &vdev_spare_ops); 767 cvd->vdev_ashift = mvd->vdev_ashift; 768 769 vdev_remove_child(mvd, cvd); 770 vdev_remove_child(pvd, mvd); 771 772 /* 773 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 774 * Otherwise, we could have detached an offline device, and when we 775 * go to import the pool we'll think we have two top-level vdevs, 776 * instead of a different version of the same top-level vdev. 777 */ 778 if (mvd->vdev_top == mvd) { 779 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 780 cvd->vdev_guid += guid_delta; 781 cvd->vdev_guid_sum += guid_delta; 782 } 783 cvd->vdev_id = mvd->vdev_id; 784 vdev_add_child(pvd, cvd); 785 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 786 787 if (cvd == cvd->vdev_top) 788 vdev_top_transfer(mvd, cvd); 789 790 ASSERT(mvd->vdev_children == 0); 791 vdev_free(mvd); 792 } 793 794 int 795 vdev_metaslab_init(vdev_t *vd, uint64_t txg) 796 { 797 spa_t *spa = vd->vdev_spa; 798 objset_t *mos = spa->spa_meta_objset; 799 uint64_t m; 800 uint64_t oldc = vd->vdev_ms_count; 801 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 802 metaslab_t **mspp; 803 int error; 804 805 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 806 807 /* 808 * This vdev is not being allocated from yet or is a hole. 809 */ 810 if (vd->vdev_ms_shift == 0) 811 return (0); 812 813 ASSERT(!vd->vdev_ishole); 814 815 /* 816 * Compute the raidz-deflation ratio. Note, we hard-code 817 * in 128k (1 << 17) because it is the current "typical" blocksize. 818 * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 819 * or we will inconsistently account for existing bp's. 820 */ 821 vd->vdev_deflate_ratio = (1 << 17) / 822 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 823 824 ASSERT(oldc <= newc); 825 826 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 827 828 if (oldc != 0) { 829 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 830 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 831 } 832 833 vd->vdev_ms = mspp; 834 vd->vdev_ms_count = newc; 835 836 for (m = oldc; m < newc; m++) { 837 space_map_obj_t smo = { 0, 0, 0 }; 838 if (txg == 0) { 839 uint64_t object = 0; 840 error = dmu_read(mos, vd->vdev_ms_array, 841 m * sizeof (uint64_t), sizeof (uint64_t), &object, 842 DMU_READ_PREFETCH); 843 if (error) 844 return (error); 845 if (object != 0) { 846 dmu_buf_t *db; 847 error = dmu_bonus_hold(mos, object, FTAG, &db); 848 if (error) 849 return (error); 850 ASSERT3U(db->db_size, >=, sizeof (smo)); 851 bcopy(db->db_data, &smo, sizeof (smo)); 852 ASSERT3U(smo.smo_object, ==, object); 853 dmu_buf_rele(db, FTAG); 854 } 855 } 856 vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 857 m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 858 } 859 860 if (txg == 0) 861 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); 862 863 if (oldc == 0) 864 metaslab_group_activate(vd->vdev_mg); 865 866 if (txg == 0) 867 spa_config_exit(spa, SCL_ALLOC, FTAG); 868 869 return (0); 870 } 871 872 void 873 vdev_metaslab_fini(vdev_t *vd) 874 { 875 uint64_t m; 876 uint64_t count = vd->vdev_ms_count; 877 878 if (vd->vdev_ms != NULL) { 879 metaslab_group_passivate(vd->vdev_mg); 880 for (m = 0; m < count; m++) 881 if (vd->vdev_ms[m] != NULL) 882 metaslab_fini(vd->vdev_ms[m]); 883 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 884 vd->vdev_ms = NULL; 885 } 886 } 887 888 typedef struct vdev_probe_stats { 889 boolean_t vps_readable; 890 boolean_t vps_writeable; 891 int vps_flags; 892 } vdev_probe_stats_t; 893 894 static void 895 vdev_probe_done(zio_t *zio) 896 { 897 spa_t *spa = zio->io_spa; 898 vdev_t *vd = zio->io_vd; 899 vdev_probe_stats_t *vps = zio->io_private; 900 901 ASSERT(vd->vdev_probe_zio != NULL); 902 903 if (zio->io_type == ZIO_TYPE_READ) { 904 if (zio->io_error == 0) 905 vps->vps_readable = 1; 906 if (zio->io_error == 0 && spa_writeable(spa)) { 907 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 908 zio->io_offset, zio->io_size, zio->io_data, 909 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 910 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 911 } else { 912 zio_buf_free(zio->io_data, zio->io_size); 913 } 914 } else if (zio->io_type == ZIO_TYPE_WRITE) { 915 if (zio->io_error == 0) 916 vps->vps_writeable = 1; 917 zio_buf_free(zio->io_data, zio->io_size); 918 } else if (zio->io_type == ZIO_TYPE_NULL) { 919 zio_t *pio; 920 921 vd->vdev_cant_read |= !vps->vps_readable; 922 vd->vdev_cant_write |= !vps->vps_writeable; 923 924 if (vdev_readable(vd) && 925 (vdev_writeable(vd) || !spa_writeable(spa))) { 926 zio->io_error = 0; 927 } else { 928 ASSERT(zio->io_error != 0); 929 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 930 spa, vd, NULL, 0, 0); 931 zio->io_error = ENXIO; 932 } 933 934 mutex_enter(&vd->vdev_probe_lock); 935 ASSERT(vd->vdev_probe_zio == zio); 936 vd->vdev_probe_zio = NULL; 937 mutex_exit(&vd->vdev_probe_lock); 938 939 while ((pio = zio_walk_parents(zio)) != NULL) 940 if (!vdev_accessible(vd, pio)) 941 pio->io_error = ENXIO; 942 943 kmem_free(vps, sizeof (*vps)); 944 } 945 } 946 947 /* 948 * Determine whether this device is accessible by reading and writing 949 * to several known locations: the pad regions of each vdev label 950 * but the first (which we leave alone in case it contains a VTOC). 951 */ 952 zio_t * 953 vdev_probe(vdev_t *vd, zio_t *zio) 954 { 955 spa_t *spa = vd->vdev_spa; 956 vdev_probe_stats_t *vps = NULL; 957 zio_t *pio; 958 959 ASSERT(vd->vdev_ops->vdev_op_leaf); 960 961 /* 962 * Don't probe the probe. 963 */ 964 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 965 return (NULL); 966 967 /* 968 * To prevent 'probe storms' when a device fails, we create 969 * just one probe i/o at a time. All zios that want to probe 970 * this vdev will become parents of the probe io. 971 */ 972 mutex_enter(&vd->vdev_probe_lock); 973 974 if ((pio = vd->vdev_probe_zio) == NULL) { 975 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 976 977 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 978 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 979 ZIO_FLAG_TRYHARD; 980 981 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 982 /* 983 * vdev_cant_read and vdev_cant_write can only 984 * transition from TRUE to FALSE when we have the 985 * SCL_ZIO lock as writer; otherwise they can only 986 * transition from FALSE to TRUE. This ensures that 987 * any zio looking at these values can assume that 988 * failures persist for the life of the I/O. That's 989 * important because when a device has intermittent 990 * connectivity problems, we want to ensure that 991 * they're ascribed to the device (ENXIO) and not 992 * the zio (EIO). 993 * 994 * Since we hold SCL_ZIO as writer here, clear both 995 * values so the probe can reevaluate from first 996 * principles. 997 */ 998 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 999 vd->vdev_cant_read = B_FALSE; 1000 vd->vdev_cant_write = B_FALSE; 1001 } 1002 1003 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 1004 vdev_probe_done, vps, 1005 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 1006 1007 if (zio != NULL) { 1008 vd->vdev_probe_wanted = B_TRUE; 1009 spa_async_request(spa, SPA_ASYNC_PROBE); 1010 } 1011 } 1012 1013 if (zio != NULL) 1014 zio_add_child(zio, pio); 1015 1016 mutex_exit(&vd->vdev_probe_lock); 1017 1018 if (vps == NULL) { 1019 ASSERT(zio != NULL); 1020 return (NULL); 1021 } 1022 1023 for (int l = 1; l < VDEV_LABELS; l++) { 1024 zio_nowait(zio_read_phys(pio, vd, 1025 vdev_label_offset(vd->vdev_psize, l, 1026 offsetof(vdev_label_t, vl_pad2)), 1027 VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 1028 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1029 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1030 } 1031 1032 if (zio == NULL) 1033 return (pio); 1034 1035 zio_nowait(pio); 1036 return (NULL); 1037 } 1038 1039 static void 1040 vdev_open_child(void *arg) 1041 { 1042 vdev_t *vd = arg; 1043 1044 vd->vdev_open_thread = curthread; 1045 vd->vdev_open_error = vdev_open(vd); 1046 vd->vdev_open_thread = NULL; 1047 } 1048 1049 boolean_t 1050 vdev_uses_zvols(vdev_t *vd) 1051 { 1052 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1053 strlen(ZVOL_DIR)) == 0) 1054 return (B_TRUE); 1055 for (int c = 0; c < vd->vdev_children; c++) 1056 if (vdev_uses_zvols(vd->vdev_child[c])) 1057 return (B_TRUE); 1058 return (B_FALSE); 1059 } 1060 1061 void 1062 vdev_open_children(vdev_t *vd) 1063 { 1064 taskq_t *tq; 1065 int children = vd->vdev_children; 1066 1067 /* 1068 * in order to handle pools on top of zvols, do the opens 1069 * in a single thread so that the same thread holds the 1070 * spa_namespace_lock 1071 */ 1072 if (vdev_uses_zvols(vd)) { 1073 for (int c = 0; c < children; c++) 1074 vd->vdev_child[c]->vdev_open_error = 1075 vdev_open(vd->vdev_child[c]); 1076 return; 1077 } 1078 tq = taskq_create("vdev_open", children, minclsyspri, 1079 children, children, TASKQ_PREPOPULATE); 1080 1081 for (int c = 0; c < children; c++) 1082 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1083 TQ_SLEEP) != NULL); 1084 1085 taskq_destroy(tq); 1086 } 1087 1088 /* 1089 * Prepare a virtual device for access. 1090 */ 1091 int 1092 vdev_open(vdev_t *vd) 1093 { 1094 spa_t *spa = vd->vdev_spa; 1095 int error; 1096 uint64_t osize = 0; 1097 uint64_t asize, psize; 1098 uint64_t ashift = 0; 1099 1100 ASSERT(vd->vdev_open_thread == curthread || 1101 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1102 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1103 vd->vdev_state == VDEV_STATE_CANT_OPEN || 1104 vd->vdev_state == VDEV_STATE_OFFLINE); 1105 1106 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1107 vd->vdev_cant_read = B_FALSE; 1108 vd->vdev_cant_write = B_FALSE; 1109 vd->vdev_min_asize = vdev_get_min_asize(vd); 1110 1111 /* 1112 * If this vdev is not removed, check its fault status. If it's 1113 * faulted, bail out of the open. 1114 */ 1115 if (!vd->vdev_removed && vd->vdev_faulted) { 1116 ASSERT(vd->vdev_children == 0); 1117 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1118 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1119 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1120 vd->vdev_label_aux); 1121 return (ENXIO); 1122 } else if (vd->vdev_offline) { 1123 ASSERT(vd->vdev_children == 0); 1124 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1125 return (ENXIO); 1126 } 1127 1128 error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 1129 1130 /* 1131 * Reset the vdev_reopening flag so that we actually close 1132 * the vdev on error. 1133 */ 1134 vd->vdev_reopening = B_FALSE; 1135 if (zio_injection_enabled && error == 0) 1136 error = zio_handle_device_injection(vd, NULL, ENXIO); 1137 1138 if (error) { 1139 if (vd->vdev_removed && 1140 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 1141 vd->vdev_removed = B_FALSE; 1142 1143 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1144 vd->vdev_stat.vs_aux); 1145 return (error); 1146 } 1147 1148 vd->vdev_removed = B_FALSE; 1149 1150 /* 1151 * Recheck the faulted flag now that we have confirmed that 1152 * the vdev is accessible. If we're faulted, bail. 1153 */ 1154 if (vd->vdev_faulted) { 1155 ASSERT(vd->vdev_children == 0); 1156 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1157 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1158 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1159 vd->vdev_label_aux); 1160 return (ENXIO); 1161 } 1162 1163 if (vd->vdev_degraded) { 1164 ASSERT(vd->vdev_children == 0); 1165 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1166 VDEV_AUX_ERR_EXCEEDED); 1167 } else { 1168 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); 1169 } 1170 1171 /* 1172 * For hole or missing vdevs we just return success. 1173 */ 1174 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1175 return (0); 1176 1177 for (int c = 0; c < vd->vdev_children; c++) { 1178 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1179 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1180 VDEV_AUX_NONE); 1181 break; 1182 } 1183 } 1184 1185 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1186 1187 if (vd->vdev_children == 0) { 1188 if (osize < SPA_MINDEVSIZE) { 1189 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1190 VDEV_AUX_TOO_SMALL); 1191 return (EOVERFLOW); 1192 } 1193 psize = osize; 1194 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1195 } else { 1196 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1197 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1198 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1199 VDEV_AUX_TOO_SMALL); 1200 return (EOVERFLOW); 1201 } 1202 psize = 0; 1203 asize = osize; 1204 } 1205 1206 vd->vdev_psize = psize; 1207 1208 /* 1209 * Make sure the allocatable size hasn't shrunk. 1210 */ 1211 if (asize < vd->vdev_min_asize) { 1212 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1213 VDEV_AUX_BAD_LABEL); 1214 return (EINVAL); 1215 } 1216 1217 if (vd->vdev_asize == 0) { 1218 /* 1219 * This is the first-ever open, so use the computed values. 1220 * For testing purposes, a higher ashift can be requested. 1221 */ 1222 vd->vdev_asize = asize; 1223 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1224 } else { 1225 /* 1226 * Make sure the alignment requirement hasn't increased. 1227 */ 1228 if (ashift > vd->vdev_top->vdev_ashift) { 1229 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1230 VDEV_AUX_BAD_LABEL); 1231 return (EINVAL); 1232 } 1233 } 1234 1235 /* 1236 * If all children are healthy and the asize has increased, 1237 * then we've experienced dynamic LUN growth. If automatic 1238 * expansion is enabled then use the additional space. 1239 */ 1240 if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize && 1241 (vd->vdev_expanding || spa->spa_autoexpand)) 1242 vd->vdev_asize = asize; 1243 1244 vdev_set_min_asize(vd); 1245 1246 /* 1247 * Ensure we can issue some IO before declaring the 1248 * vdev open for business. 1249 */ 1250 if (vd->vdev_ops->vdev_op_leaf && 1251 (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 1252 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1253 VDEV_AUX_IO_FAILURE); 1254 return (error); 1255 } 1256 1257 /* 1258 * If a leaf vdev has a DTL, and seems healthy, then kick off a 1259 * resilver. But don't do this if we are doing a reopen for a scrub, 1260 * since this would just restart the scrub we are already doing. 1261 */ 1262 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 1263 vdev_resilver_needed(vd, NULL, NULL)) 1264 spa_async_request(spa, SPA_ASYNC_RESILVER); 1265 1266 return (0); 1267 } 1268 1269 /* 1270 * Called once the vdevs are all opened, this routine validates the label 1271 * contents. This needs to be done before vdev_load() so that we don't 1272 * inadvertently do repair I/Os to the wrong device. 1273 * 1274 * This function will only return failure if one of the vdevs indicates that it 1275 * has since been destroyed or exported. This is only possible if 1276 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1277 * will be updated but the function will return 0. 1278 */ 1279 int 1280 vdev_validate(vdev_t *vd) 1281 { 1282 spa_t *spa = vd->vdev_spa; 1283 nvlist_t *label; 1284 uint64_t guid, top_guid; 1285 uint64_t state; 1286 1287 for (int c = 0; c < vd->vdev_children; c++) 1288 if (vdev_validate(vd->vdev_child[c]) != 0) 1289 return (EBADF); 1290 1291 /* 1292 * If the device has already failed, or was marked offline, don't do 1293 * any further validation. Otherwise, label I/O will fail and we will 1294 * overwrite the previous state. 1295 */ 1296 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 1297 1298 if ((label = vdev_label_read_config(vd)) == NULL) { 1299 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1300 VDEV_AUX_BAD_LABEL); 1301 return (0); 1302 } 1303 1304 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 1305 &guid) != 0 || guid != spa_guid(spa)) { 1306 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1307 VDEV_AUX_CORRUPT_DATA); 1308 nvlist_free(label); 1309 return (0); 1310 } 1311 1312 /* 1313 * If this vdev just became a top-level vdev because its 1314 * sibling was detached, it will have adopted the parent's 1315 * vdev guid -- but the label may or may not be on disk yet. 1316 * Fortunately, either version of the label will have the 1317 * same top guid, so if we're a top-level vdev, we can 1318 * safely compare to that instead. 1319 */ 1320 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 1321 &guid) != 0 || 1322 nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 1323 &top_guid) != 0 || 1324 (vd->vdev_guid != guid && 1325 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 1326 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1327 VDEV_AUX_CORRUPT_DATA); 1328 nvlist_free(label); 1329 return (0); 1330 } 1331 1332 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1333 &state) != 0) { 1334 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1335 VDEV_AUX_CORRUPT_DATA); 1336 nvlist_free(label); 1337 return (0); 1338 } 1339 1340 nvlist_free(label); 1341 1342 /* 1343 * If spa->spa_load_verbatim is true, no need to check the 1344 * state of the pool. 1345 */ 1346 if (!spa->spa_load_verbatim && 1347 spa->spa_load_state == SPA_LOAD_OPEN && 1348 state != POOL_STATE_ACTIVE) 1349 return (EBADF); 1350 1351 /* 1352 * If we were able to open and validate a vdev that was 1353 * previously marked permanently unavailable, clear that state 1354 * now. 1355 */ 1356 if (vd->vdev_not_present) 1357 vd->vdev_not_present = 0; 1358 } 1359 1360 return (0); 1361 } 1362 1363 /* 1364 * Close a virtual device. 1365 */ 1366 void 1367 vdev_close(vdev_t *vd) 1368 { 1369 spa_t *spa = vd->vdev_spa; 1370 vdev_t *pvd = vd->vdev_parent; 1371 1372 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1373 1374 if (pvd != NULL && pvd->vdev_reopening) 1375 vd->vdev_reopening = pvd->vdev_reopening; 1376 1377 vd->vdev_ops->vdev_op_close(vd); 1378 1379 vdev_cache_purge(vd); 1380 1381 /* 1382 * We record the previous state before we close it, so that if we are 1383 * doing a reopen(), we don't generate FMA ereports if we notice that 1384 * it's still faulted. 1385 */ 1386 vd->vdev_prevstate = vd->vdev_state; 1387 1388 if (vd->vdev_offline) 1389 vd->vdev_state = VDEV_STATE_OFFLINE; 1390 else 1391 vd->vdev_state = VDEV_STATE_CLOSED; 1392 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1393 } 1394 1395 /* 1396 * Reopen all interior vdevs and any unopened leaves. We don't actually 1397 * reopen leaf vdevs which had previously been opened as they might deadlock 1398 * on the spa_config_lock. Instead we only obtain the leaf's physical size. 1399 * If the leaf has never been opened then open it, as usual. 1400 */ 1401 void 1402 vdev_reopen(vdev_t *vd) 1403 { 1404 spa_t *spa = vd->vdev_spa; 1405 1406 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1407 1408 vd->vdev_reopening = B_TRUE; 1409 vdev_close(vd); 1410 (void) vdev_open(vd); 1411 1412 /* 1413 * Call vdev_validate() here to make sure we have the same device. 1414 * Otherwise, a device with an invalid label could be successfully 1415 * opened in response to vdev_reopen(). 1416 */ 1417 if (vd->vdev_aux) { 1418 (void) vdev_validate_aux(vd); 1419 if (vdev_readable(vd) && vdev_writeable(vd) && 1420 vd->vdev_aux == &spa->spa_l2cache && 1421 !l2arc_vdev_present(vd)) 1422 l2arc_add_vdev(spa, vd); 1423 } else { 1424 (void) vdev_validate(vd); 1425 } 1426 1427 /* 1428 * Reassess parent vdev's health. 1429 */ 1430 vdev_propagate_state(vd); 1431 } 1432 1433 int 1434 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1435 { 1436 int error; 1437 1438 /* 1439 * Normally, partial opens (e.g. of a mirror) are allowed. 1440 * For a create, however, we want to fail the request if 1441 * there are any components we can't open. 1442 */ 1443 error = vdev_open(vd); 1444 1445 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1446 vdev_close(vd); 1447 return (error ? error : ENXIO); 1448 } 1449 1450 /* 1451 * Recursively initialize all labels. 1452 */ 1453 if ((error = vdev_label_init(vd, txg, isreplacing ? 1454 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1455 vdev_close(vd); 1456 return (error); 1457 } 1458 1459 return (0); 1460 } 1461 1462 void 1463 vdev_metaslab_set_size(vdev_t *vd) 1464 { 1465 /* 1466 * Aim for roughly 200 metaslabs per vdev. 1467 */ 1468 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1469 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1470 } 1471 1472 void 1473 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1474 { 1475 ASSERT(vd == vd->vdev_top); 1476 ASSERT(!vd->vdev_ishole); 1477 ASSERT(ISP2(flags)); 1478 1479 if (flags & VDD_METASLAB) 1480 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 1481 1482 if (flags & VDD_DTL) 1483 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 1484 1485 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1486 } 1487 1488 /* 1489 * DTLs. 1490 * 1491 * A vdev's DTL (dirty time log) is the set of transaction groups for which 1492 * the vdev has less than perfect replication. There are three kinds of DTL: 1493 * 1494 * DTL_MISSING: txgs for which the vdev has no valid copies of the data 1495 * 1496 * DTL_PARTIAL: txgs for which data is available, but not fully replicated 1497 * 1498 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 1499 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 1500 * txgs that was scrubbed. 1501 * 1502 * DTL_OUTAGE: txgs which cannot currently be read, whether due to 1503 * persistent errors or just some device being offline. 1504 * Unlike the other three, the DTL_OUTAGE map is not generally 1505 * maintained; it's only computed when needed, typically to 1506 * determine whether a device can be detached. 1507 * 1508 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 1509 * either has the data or it doesn't. 1510 * 1511 * For interior vdevs such as mirror and RAID-Z the picture is more complex. 1512 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 1513 * if any child is less than fully replicated, then so is its parent. 1514 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 1515 * comprising only those txgs which appear in 'maxfaults' or more children; 1516 * those are the txgs we don't have enough replication to read. For example, 1517 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 1518 * thus, its DTL_MISSING consists of the set of txgs that appear in more than 1519 * two child DTL_MISSING maps. 1520 * 1521 * It should be clear from the above that to compute the DTLs and outage maps 1522 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 1523 * Therefore, that is all we keep on disk. When loading the pool, or after 1524 * a configuration change, we generate all other DTLs from first principles. 1525 */ 1526 void 1527 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1528 { 1529 space_map_t *sm = &vd->vdev_dtl[t]; 1530 1531 ASSERT(t < DTL_TYPES); 1532 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1533 1534 mutex_enter(sm->sm_lock); 1535 if (!space_map_contains(sm, txg, size)) 1536 space_map_add(sm, txg, size); 1537 mutex_exit(sm->sm_lock); 1538 } 1539 1540 boolean_t 1541 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1542 { 1543 space_map_t *sm = &vd->vdev_dtl[t]; 1544 boolean_t dirty = B_FALSE; 1545 1546 ASSERT(t < DTL_TYPES); 1547 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1548 1549 mutex_enter(sm->sm_lock); 1550 if (sm->sm_space != 0) 1551 dirty = space_map_contains(sm, txg, size); 1552 mutex_exit(sm->sm_lock); 1553 1554 return (dirty); 1555 } 1556 1557 boolean_t 1558 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 1559 { 1560 space_map_t *sm = &vd->vdev_dtl[t]; 1561 boolean_t empty; 1562 1563 mutex_enter(sm->sm_lock); 1564 empty = (sm->sm_space == 0); 1565 mutex_exit(sm->sm_lock); 1566 1567 return (empty); 1568 } 1569 1570 /* 1571 * Reassess DTLs after a config change or scrub completion. 1572 */ 1573 void 1574 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1575 { 1576 spa_t *spa = vd->vdev_spa; 1577 avl_tree_t reftree; 1578 int minref; 1579 1580 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1581 1582 for (int c = 0; c < vd->vdev_children; c++) 1583 vdev_dtl_reassess(vd->vdev_child[c], txg, 1584 scrub_txg, scrub_done); 1585 1586 if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux) 1587 return; 1588 1589 if (vd->vdev_ops->vdev_op_leaf) { 1590 mutex_enter(&vd->vdev_dtl_lock); 1591 if (scrub_txg != 0 && 1592 (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 1593 /* XXX should check scrub_done? */ 1594 /* 1595 * We completed a scrub up to scrub_txg. If we 1596 * did it without rebooting, then the scrub dtl 1597 * will be valid, so excise the old region and 1598 * fold in the scrub dtl. Otherwise, leave the 1599 * dtl as-is if there was an error. 1600 * 1601 * There's little trick here: to excise the beginning 1602 * of the DTL_MISSING map, we put it into a reference 1603 * tree and then add a segment with refcnt -1 that 1604 * covers the range [0, scrub_txg). This means 1605 * that each txg in that range has refcnt -1 or 0. 1606 * We then add DTL_SCRUB with a refcnt of 2, so that 1607 * entries in the range [0, scrub_txg) will have a 1608 * positive refcnt -- either 1 or 2. We then convert 1609 * the reference tree into the new DTL_MISSING map. 1610 */ 1611 space_map_ref_create(&reftree); 1612 space_map_ref_add_map(&reftree, 1613 &vd->vdev_dtl[DTL_MISSING], 1); 1614 space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 1615 space_map_ref_add_map(&reftree, 1616 &vd->vdev_dtl[DTL_SCRUB], 2); 1617 space_map_ref_generate_map(&reftree, 1618 &vd->vdev_dtl[DTL_MISSING], 1); 1619 space_map_ref_destroy(&reftree); 1620 } 1621 space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 1622 space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1623 space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1624 if (scrub_done) 1625 space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 1626 space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 1627 if (!vdev_readable(vd)) 1628 space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 1629 else 1630 space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1631 space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1632 mutex_exit(&vd->vdev_dtl_lock); 1633 1634 if (txg != 0) 1635 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1636 return; 1637 } 1638 1639 mutex_enter(&vd->vdev_dtl_lock); 1640 for (int t = 0; t < DTL_TYPES; t++) { 1641 /* account for child's outage in parent's missing map */ 1642 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; 1643 if (t == DTL_SCRUB) 1644 continue; /* leaf vdevs only */ 1645 if (t == DTL_PARTIAL) 1646 minref = 1; /* i.e. non-zero */ 1647 else if (vd->vdev_nparity != 0) 1648 minref = vd->vdev_nparity + 1; /* RAID-Z */ 1649 else 1650 minref = vd->vdev_children; /* any kind of mirror */ 1651 space_map_ref_create(&reftree); 1652 for (int c = 0; c < vd->vdev_children; c++) { 1653 vdev_t *cvd = vd->vdev_child[c]; 1654 mutex_enter(&cvd->vdev_dtl_lock); 1655 space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1); 1656 mutex_exit(&cvd->vdev_dtl_lock); 1657 } 1658 space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 1659 space_map_ref_destroy(&reftree); 1660 } 1661 mutex_exit(&vd->vdev_dtl_lock); 1662 } 1663 1664 static int 1665 vdev_dtl_load(vdev_t *vd) 1666 { 1667 spa_t *spa = vd->vdev_spa; 1668 space_map_obj_t *smo = &vd->vdev_dtl_smo; 1669 objset_t *mos = spa->spa_meta_objset; 1670 dmu_buf_t *db; 1671 int error; 1672 1673 ASSERT(vd->vdev_children == 0); 1674 1675 if (smo->smo_object == 0) 1676 return (0); 1677 1678 ASSERT(!vd->vdev_ishole); 1679 1680 if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 1681 return (error); 1682 1683 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1684 bcopy(db->db_data, smo, sizeof (*smo)); 1685 dmu_buf_rele(db, FTAG); 1686 1687 mutex_enter(&vd->vdev_dtl_lock); 1688 error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 1689 NULL, SM_ALLOC, smo, mos); 1690 mutex_exit(&vd->vdev_dtl_lock); 1691 1692 return (error); 1693 } 1694 1695 void 1696 vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1697 { 1698 spa_t *spa = vd->vdev_spa; 1699 space_map_obj_t *smo = &vd->vdev_dtl_smo; 1700 space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 1701 objset_t *mos = spa->spa_meta_objset; 1702 space_map_t smsync; 1703 kmutex_t smlock; 1704 dmu_buf_t *db; 1705 dmu_tx_t *tx; 1706 1707 ASSERT(!vd->vdev_ishole); 1708 1709 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1710 1711 if (vd->vdev_detached) { 1712 if (smo->smo_object != 0) { 1713 int err = dmu_object_free(mos, smo->smo_object, tx); 1714 ASSERT3U(err, ==, 0); 1715 smo->smo_object = 0; 1716 } 1717 dmu_tx_commit(tx); 1718 return; 1719 } 1720 1721 if (smo->smo_object == 0) { 1722 ASSERT(smo->smo_objsize == 0); 1723 ASSERT(smo->smo_alloc == 0); 1724 smo->smo_object = dmu_object_alloc(mos, 1725 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1726 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1727 ASSERT(smo->smo_object != 0); 1728 vdev_config_dirty(vd->vdev_top); 1729 } 1730 1731 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1732 1733 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1734 &smlock); 1735 1736 mutex_enter(&smlock); 1737 1738 mutex_enter(&vd->vdev_dtl_lock); 1739 space_map_walk(sm, space_map_add, &smsync); 1740 mutex_exit(&vd->vdev_dtl_lock); 1741 1742 space_map_truncate(smo, mos, tx); 1743 space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1744 1745 space_map_destroy(&smsync); 1746 1747 mutex_exit(&smlock); 1748 mutex_destroy(&smlock); 1749 1750 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1751 dmu_buf_will_dirty(db, tx); 1752 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1753 bcopy(smo, db->db_data, sizeof (*smo)); 1754 dmu_buf_rele(db, FTAG); 1755 1756 dmu_tx_commit(tx); 1757 } 1758 1759 /* 1760 * Determine whether the specified vdev can be offlined/detached/removed 1761 * without losing data. 1762 */ 1763 boolean_t 1764 vdev_dtl_required(vdev_t *vd) 1765 { 1766 spa_t *spa = vd->vdev_spa; 1767 vdev_t *tvd = vd->vdev_top; 1768 uint8_t cant_read = vd->vdev_cant_read; 1769 boolean_t required; 1770 1771 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1772 1773 if (vd == spa->spa_root_vdev || vd == tvd) 1774 return (B_TRUE); 1775 1776 /* 1777 * Temporarily mark the device as unreadable, and then determine 1778 * whether this results in any DTL outages in the top-level vdev. 1779 * If not, we can safely offline/detach/remove the device. 1780 */ 1781 vd->vdev_cant_read = B_TRUE; 1782 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1783 required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 1784 vd->vdev_cant_read = cant_read; 1785 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1786 1787 return (required); 1788 } 1789 1790 /* 1791 * Determine if resilver is needed, and if so the txg range. 1792 */ 1793 boolean_t 1794 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 1795 { 1796 boolean_t needed = B_FALSE; 1797 uint64_t thismin = UINT64_MAX; 1798 uint64_t thismax = 0; 1799 1800 if (vd->vdev_children == 0) { 1801 mutex_enter(&vd->vdev_dtl_lock); 1802 if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 1803 vdev_writeable(vd)) { 1804 space_seg_t *ss; 1805 1806 ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 1807 thismin = ss->ss_start - 1; 1808 ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 1809 thismax = ss->ss_end; 1810 needed = B_TRUE; 1811 } 1812 mutex_exit(&vd->vdev_dtl_lock); 1813 } else { 1814 for (int c = 0; c < vd->vdev_children; c++) { 1815 vdev_t *cvd = vd->vdev_child[c]; 1816 uint64_t cmin, cmax; 1817 1818 if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 1819 thismin = MIN(thismin, cmin); 1820 thismax = MAX(thismax, cmax); 1821 needed = B_TRUE; 1822 } 1823 } 1824 } 1825 1826 if (needed && minp) { 1827 *minp = thismin; 1828 *maxp = thismax; 1829 } 1830 return (needed); 1831 } 1832 1833 void 1834 vdev_load(vdev_t *vd) 1835 { 1836 /* 1837 * Recursively load all children. 1838 */ 1839 for (int c = 0; c < vd->vdev_children; c++) 1840 vdev_load(vd->vdev_child[c]); 1841 1842 /* 1843 * If this is a top-level vdev, initialize its metaslabs. 1844 */ 1845 if (vd == vd->vdev_top && !vd->vdev_ishole && 1846 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 1847 vdev_metaslab_init(vd, 0) != 0)) 1848 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1849 VDEV_AUX_CORRUPT_DATA); 1850 1851 /* 1852 * If this is a leaf vdev, load its DTL. 1853 */ 1854 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 1855 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1856 VDEV_AUX_CORRUPT_DATA); 1857 } 1858 1859 /* 1860 * The special vdev case is used for hot spares and l2cache devices. Its 1861 * sole purpose it to set the vdev state for the associated vdev. To do this, 1862 * we make sure that we can open the underlying device, then try to read the 1863 * label, and make sure that the label is sane and that it hasn't been 1864 * repurposed to another pool. 1865 */ 1866 int 1867 vdev_validate_aux(vdev_t *vd) 1868 { 1869 nvlist_t *label; 1870 uint64_t guid, version; 1871 uint64_t state; 1872 1873 if (!vdev_readable(vd)) 1874 return (0); 1875 1876 if ((label = vdev_label_read_config(vd)) == NULL) { 1877 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1878 VDEV_AUX_CORRUPT_DATA); 1879 return (-1); 1880 } 1881 1882 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 1883 version > SPA_VERSION || 1884 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 1885 guid != vd->vdev_guid || 1886 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 1887 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1888 VDEV_AUX_CORRUPT_DATA); 1889 nvlist_free(label); 1890 return (-1); 1891 } 1892 1893 /* 1894 * We don't actually check the pool state here. If it's in fact in 1895 * use by another pool, we update this fact on the fly when requested. 1896 */ 1897 nvlist_free(label); 1898 return (0); 1899 } 1900 1901 void 1902 vdev_remove(vdev_t *vd, uint64_t txg) 1903 { 1904 spa_t *spa = vd->vdev_spa; 1905 objset_t *mos = spa->spa_meta_objset; 1906 dmu_tx_t *tx; 1907 1908 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 1909 1910 if (vd->vdev_dtl_smo.smo_object) { 1911 ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0); 1912 (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx); 1913 vd->vdev_dtl_smo.smo_object = 0; 1914 } 1915 1916 if (vd->vdev_ms != NULL) { 1917 for (int m = 0; m < vd->vdev_ms_count; m++) { 1918 metaslab_t *msp = vd->vdev_ms[m]; 1919 1920 if (msp == NULL || msp->ms_smo.smo_object == 0) 1921 continue; 1922 1923 ASSERT3U(msp->ms_smo.smo_alloc, ==, 0); 1924 (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx); 1925 msp->ms_smo.smo_object = 0; 1926 } 1927 } 1928 1929 if (vd->vdev_ms_array) { 1930 (void) dmu_object_free(mos, vd->vdev_ms_array, tx); 1931 vd->vdev_ms_array = 0; 1932 vd->vdev_ms_shift = 0; 1933 } 1934 dmu_tx_commit(tx); 1935 } 1936 1937 void 1938 vdev_sync_done(vdev_t *vd, uint64_t txg) 1939 { 1940 metaslab_t *msp; 1941 1942 ASSERT(!vd->vdev_ishole); 1943 1944 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1945 metaslab_sync_done(msp, txg); 1946 } 1947 1948 void 1949 vdev_sync(vdev_t *vd, uint64_t txg) 1950 { 1951 spa_t *spa = vd->vdev_spa; 1952 vdev_t *lvd; 1953 metaslab_t *msp; 1954 dmu_tx_t *tx; 1955 1956 ASSERT(!vd->vdev_ishole); 1957 1958 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 1959 ASSERT(vd == vd->vdev_top); 1960 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1961 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 1962 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 1963 ASSERT(vd->vdev_ms_array != 0); 1964 vdev_config_dirty(vd); 1965 dmu_tx_commit(tx); 1966 } 1967 1968 if (vd->vdev_removing) 1969 vdev_remove(vd, txg); 1970 1971 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1972 metaslab_sync(msp, txg); 1973 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 1974 } 1975 1976 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1977 vdev_dtl_sync(lvd, txg); 1978 1979 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1980 } 1981 1982 uint64_t 1983 vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1984 { 1985 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1986 } 1987 1988 /* 1989 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 1990 * not be opened, and no I/O is attempted. 1991 */ 1992 int 1993 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) 1994 { 1995 vdev_t *vd; 1996 1997 spa_vdev_state_enter(spa, SCL_NONE); 1998 1999 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2000 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2001 2002 if (!vd->vdev_ops->vdev_op_leaf) 2003 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2004 2005 /* 2006 * We don't directly use the aux state here, but if we do a 2007 * vdev_reopen(), we need this value to be present to remember why we 2008 * were faulted. 2009 */ 2010 vd->vdev_label_aux = aux; 2011 2012 /* 2013 * Faulted state takes precedence over degraded. 2014 */ 2015 vd->vdev_faulted = 1ULL; 2016 vd->vdev_degraded = 0ULL; 2017 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); 2018 2019 /* 2020 * If marking the vdev as faulted cause the top-level vdev to become 2021 * unavailable, then back off and simply mark the vdev as degraded 2022 * instead. 2023 */ 2024 if (vdev_is_dead(vd->vdev_top) && !vd->vdev_islog && 2025 vd->vdev_aux == NULL) { 2026 vd->vdev_degraded = 1ULL; 2027 vd->vdev_faulted = 0ULL; 2028 2029 /* 2030 * If we reopen the device and it's not dead, only then do we 2031 * mark it degraded. 2032 */ 2033 vdev_reopen(vd); 2034 2035 if (vdev_readable(vd)) 2036 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); 2037 } 2038 2039 return (spa_vdev_state_exit(spa, vd, 0)); 2040 } 2041 2042 /* 2043 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 2044 * user that something is wrong. The vdev continues to operate as normal as far 2045 * as I/O is concerned. 2046 */ 2047 int 2048 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) 2049 { 2050 vdev_t *vd; 2051 2052 spa_vdev_state_enter(spa, SCL_NONE); 2053 2054 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2055 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2056 2057 if (!vd->vdev_ops->vdev_op_leaf) 2058 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2059 2060 /* 2061 * If the vdev is already faulted, then don't do anything. 2062 */ 2063 if (vd->vdev_faulted || vd->vdev_degraded) 2064 return (spa_vdev_state_exit(spa, NULL, 0)); 2065 2066 vd->vdev_degraded = 1ULL; 2067 if (!vdev_is_dead(vd)) 2068 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 2069 aux); 2070 2071 return (spa_vdev_state_exit(spa, vd, 0)); 2072 } 2073 2074 /* 2075 * Online the given vdev. If 'unspare' is set, it implies two things. First, 2076 * any attached spare device should be detached when the device finishes 2077 * resilvering. Second, the online should be treated like a 'test' online case, 2078 * so no FMA events are generated if the device fails to open. 2079 */ 2080 int 2081 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 2082 { 2083 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 2084 2085 spa_vdev_state_enter(spa, SCL_NONE); 2086 2087 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2088 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2089 2090 if (!vd->vdev_ops->vdev_op_leaf) 2091 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2092 2093 tvd = vd->vdev_top; 2094 vd->vdev_offline = B_FALSE; 2095 vd->vdev_tmpoffline = B_FALSE; 2096 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 2097 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 2098 2099 /* XXX - L2ARC 1.0 does not support expansion */ 2100 if (!vd->vdev_aux) { 2101 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2102 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 2103 } 2104 2105 vdev_reopen(tvd); 2106 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 2107 2108 if (!vd->vdev_aux) { 2109 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2110 pvd->vdev_expanding = B_FALSE; 2111 } 2112 2113 if (newstate) 2114 *newstate = vd->vdev_state; 2115 if ((flags & ZFS_ONLINE_UNSPARE) && 2116 !vdev_is_dead(vd) && vd->vdev_parent && 2117 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 2118 vd->vdev_parent->vdev_child[0] == vd) 2119 vd->vdev_unspare = B_TRUE; 2120 2121 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 2122 2123 /* XXX - L2ARC 1.0 does not support expansion */ 2124 if (vd->vdev_aux) 2125 return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 2126 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2127 } 2128 return (spa_vdev_state_exit(spa, vd, 0)); 2129 } 2130 2131 int 2132 vdev_offline_log(spa_t *spa) 2133 { 2134 int error = 0; 2135 2136 if ((error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 2137 NULL, DS_FIND_CHILDREN)) == 0) { 2138 2139 /* 2140 * We successfully offlined the log device, sync out the 2141 * current txg so that the "stubby" block can be removed 2142 * by zil_sync(). 2143 */ 2144 txg_wait_synced(spa->spa_dsl_pool, 0); 2145 } 2146 return (error); 2147 } 2148 2149 static int 2150 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) 2151 { 2152 vdev_t *vd, *tvd; 2153 int error = 0; 2154 uint64_t generation; 2155 metaslab_group_t *mg; 2156 2157 top: 2158 spa_vdev_state_enter(spa, SCL_ALLOC); 2159 2160 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2161 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2162 2163 if (!vd->vdev_ops->vdev_op_leaf) 2164 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2165 2166 tvd = vd->vdev_top; 2167 mg = tvd->vdev_mg; 2168 generation = spa->spa_config_generation + 1; 2169 2170 /* 2171 * If the device isn't already offline, try to offline it. 2172 */ 2173 if (!vd->vdev_offline) { 2174 /* 2175 * If this device has the only valid copy of some data, 2176 * don't allow it to be offlined. Log devices are always 2177 * expendable. 2178 */ 2179 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2180 vdev_dtl_required(vd)) 2181 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2182 2183 /* 2184 * If the top-level is a slog and it has had allocations 2185 * then proceed. We check that the vdev's metaslab group 2186 * is not NULL since it's possible that we may have just 2187 * added this vdev but not yet initialized its metaslabs. 2188 */ 2189 if (tvd->vdev_islog && mg != NULL) { 2190 /* 2191 * Prevent any future allocations. 2192 */ 2193 metaslab_group_passivate(mg); 2194 (void) spa_vdev_state_exit(spa, vd, 0); 2195 2196 error = vdev_offline_log(spa); 2197 2198 spa_vdev_state_enter(spa, SCL_ALLOC); 2199 2200 /* 2201 * Check to see if the config has changed. 2202 */ 2203 if (error || generation != spa->spa_config_generation) { 2204 metaslab_group_activate(mg); 2205 if (error) 2206 return (spa_vdev_state_exit(spa, 2207 vd, error)); 2208 (void) spa_vdev_state_exit(spa, vd, 0); 2209 goto top; 2210 } 2211 ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0); 2212 } 2213 2214 /* 2215 * Offline this device and reopen its top-level vdev. 2216 * If the top-level vdev is a log device then just offline 2217 * it. Otherwise, if this action results in the top-level 2218 * vdev becoming unusable, undo it and fail the request. 2219 */ 2220 vd->vdev_offline = B_TRUE; 2221 vdev_reopen(tvd); 2222 2223 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2224 vdev_is_dead(tvd)) { 2225 vd->vdev_offline = B_FALSE; 2226 vdev_reopen(tvd); 2227 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2228 } 2229 2230 /* 2231 * Add the device back into the metaslab rotor so that 2232 * once we online the device it's open for business. 2233 */ 2234 if (tvd->vdev_islog && mg != NULL) 2235 metaslab_group_activate(mg); 2236 } 2237 2238 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 2239 2240 return (spa_vdev_state_exit(spa, vd, 0)); 2241 } 2242 2243 int 2244 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 2245 { 2246 int error; 2247 2248 mutex_enter(&spa->spa_vdev_top_lock); 2249 error = vdev_offline_locked(spa, guid, flags); 2250 mutex_exit(&spa->spa_vdev_top_lock); 2251 2252 return (error); 2253 } 2254 2255 /* 2256 * Clear the error counts associated with this vdev. Unlike vdev_online() and 2257 * vdev_offline(), we assume the spa config is locked. We also clear all 2258 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 2259 */ 2260 void 2261 vdev_clear(spa_t *spa, vdev_t *vd) 2262 { 2263 vdev_t *rvd = spa->spa_root_vdev; 2264 2265 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2266 2267 if (vd == NULL) 2268 vd = rvd; 2269 2270 vd->vdev_stat.vs_read_errors = 0; 2271 vd->vdev_stat.vs_write_errors = 0; 2272 vd->vdev_stat.vs_checksum_errors = 0; 2273 2274 for (int c = 0; c < vd->vdev_children; c++) 2275 vdev_clear(spa, vd->vdev_child[c]); 2276 2277 /* 2278 * If we're in the FAULTED state or have experienced failed I/O, then 2279 * clear the persistent state and attempt to reopen the device. We 2280 * also mark the vdev config dirty, so that the new faulted state is 2281 * written out to disk. 2282 */ 2283 if (vd->vdev_faulted || vd->vdev_degraded || 2284 !vdev_readable(vd) || !vdev_writeable(vd)) { 2285 2286 /* 2287 * When reopening in reponse to a clear event, it may be due to 2288 * a fmadm repair request. In this case, if the device is 2289 * still broken, we want to still post the ereport again. 2290 */ 2291 vd->vdev_forcefault = B_TRUE; 2292 2293 vd->vdev_faulted = vd->vdev_degraded = 0; 2294 vd->vdev_cant_read = B_FALSE; 2295 vd->vdev_cant_write = B_FALSE; 2296 2297 vdev_reopen(vd); 2298 2299 vd->vdev_forcefault = B_FALSE; 2300 2301 if (vd != rvd) 2302 vdev_state_dirty(vd->vdev_top); 2303 2304 if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 2305 spa_async_request(spa, SPA_ASYNC_RESILVER); 2306 2307 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 2308 } 2309 2310 /* 2311 * When clearing a FMA-diagnosed fault, we always want to 2312 * unspare the device, as we assume that the original spare was 2313 * done in response to the FMA fault. 2314 */ 2315 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && 2316 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 2317 vd->vdev_parent->vdev_child[0] == vd) 2318 vd->vdev_unspare = B_TRUE; 2319 } 2320 2321 boolean_t 2322 vdev_is_dead(vdev_t *vd) 2323 { 2324 /* 2325 * Holes and missing devices are always considered "dead". 2326 * This simplifies the code since we don't have to check for 2327 * these types of devices in the various code paths. 2328 * Instead we rely on the fact that we skip over dead devices 2329 * before issuing I/O to them. 2330 */ 2331 return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole || 2332 vd->vdev_ops == &vdev_missing_ops); 2333 } 2334 2335 boolean_t 2336 vdev_readable(vdev_t *vd) 2337 { 2338 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 2339 } 2340 2341 boolean_t 2342 vdev_writeable(vdev_t *vd) 2343 { 2344 return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 2345 } 2346 2347 boolean_t 2348 vdev_allocatable(vdev_t *vd) 2349 { 2350 uint64_t state = vd->vdev_state; 2351 2352 /* 2353 * We currently allow allocations from vdevs which may be in the 2354 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 2355 * fails to reopen then we'll catch it later when we're holding 2356 * the proper locks. Note that we have to get the vdev state 2357 * in a local variable because although it changes atomically, 2358 * we're asking two separate questions about it. 2359 */ 2360 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 2361 !vd->vdev_cant_write && !vd->vdev_ishole && !vd->vdev_removing); 2362 } 2363 2364 boolean_t 2365 vdev_accessible(vdev_t *vd, zio_t *zio) 2366 { 2367 ASSERT(zio->io_vd == vd); 2368 2369 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 2370 return (B_FALSE); 2371 2372 if (zio->io_type == ZIO_TYPE_READ) 2373 return (!vd->vdev_cant_read); 2374 2375 if (zio->io_type == ZIO_TYPE_WRITE) 2376 return (!vd->vdev_cant_write); 2377 2378 return (B_TRUE); 2379 } 2380 2381 /* 2382 * Get statistics for the given vdev. 2383 */ 2384 void 2385 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2386 { 2387 vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2388 2389 mutex_enter(&vd->vdev_stat_lock); 2390 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 2391 vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2392 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2393 vs->vs_state = vd->vdev_state; 2394 vs->vs_rsize = vdev_get_min_asize(vd); 2395 if (vd->vdev_ops->vdev_op_leaf) 2396 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2397 mutex_exit(&vd->vdev_stat_lock); 2398 2399 /* 2400 * If we're getting stats on the root vdev, aggregate the I/O counts 2401 * over all top-level vdevs (i.e. the direct children of the root). 2402 */ 2403 if (vd == rvd) { 2404 for (int c = 0; c < rvd->vdev_children; c++) { 2405 vdev_t *cvd = rvd->vdev_child[c]; 2406 vdev_stat_t *cvs = &cvd->vdev_stat; 2407 2408 mutex_enter(&vd->vdev_stat_lock); 2409 for (int t = 0; t < ZIO_TYPES; t++) { 2410 vs->vs_ops[t] += cvs->vs_ops[t]; 2411 vs->vs_bytes[t] += cvs->vs_bytes[t]; 2412 } 2413 vs->vs_scrub_examined += cvs->vs_scrub_examined; 2414 mutex_exit(&vd->vdev_stat_lock); 2415 } 2416 } 2417 } 2418 2419 void 2420 vdev_clear_stats(vdev_t *vd) 2421 { 2422 mutex_enter(&vd->vdev_stat_lock); 2423 vd->vdev_stat.vs_space = 0; 2424 vd->vdev_stat.vs_dspace = 0; 2425 vd->vdev_stat.vs_alloc = 0; 2426 mutex_exit(&vd->vdev_stat_lock); 2427 } 2428 2429 void 2430 vdev_stat_update(zio_t *zio, uint64_t psize) 2431 { 2432 spa_t *spa = zio->io_spa; 2433 vdev_t *rvd = spa->spa_root_vdev; 2434 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2435 vdev_t *pvd; 2436 uint64_t txg = zio->io_txg; 2437 vdev_stat_t *vs = &vd->vdev_stat; 2438 zio_type_t type = zio->io_type; 2439 int flags = zio->io_flags; 2440 2441 /* 2442 * If this i/o is a gang leader, it didn't do any actual work. 2443 */ 2444 if (zio->io_gang_tree) 2445 return; 2446 2447 if (zio->io_error == 0) { 2448 /* 2449 * If this is a root i/o, don't count it -- we've already 2450 * counted the top-level vdevs, and vdev_get_stats() will 2451 * aggregate them when asked. This reduces contention on 2452 * the root vdev_stat_lock and implicitly handles blocks 2453 * that compress away to holes, for which there is no i/o. 2454 * (Holes never create vdev children, so all the counters 2455 * remain zero, which is what we want.) 2456 * 2457 * Note: this only applies to successful i/o (io_error == 0) 2458 * because unlike i/o counts, errors are not additive. 2459 * When reading a ditto block, for example, failure of 2460 * one top-level vdev does not imply a root-level error. 2461 */ 2462 if (vd == rvd) 2463 return; 2464 2465 ASSERT(vd == zio->io_vd); 2466 2467 if (flags & ZIO_FLAG_IO_BYPASS) 2468 return; 2469 2470 mutex_enter(&vd->vdev_stat_lock); 2471 2472 if (flags & ZIO_FLAG_IO_REPAIR) { 2473 if (flags & ZIO_FLAG_SCRUB_THREAD) 2474 vs->vs_scrub_repaired += psize; 2475 if (flags & ZIO_FLAG_SELF_HEAL) 2476 vs->vs_self_healed += psize; 2477 } 2478 2479 vs->vs_ops[type]++; 2480 vs->vs_bytes[type] += psize; 2481 2482 mutex_exit(&vd->vdev_stat_lock); 2483 return; 2484 } 2485 2486 if (flags & ZIO_FLAG_SPECULATIVE) 2487 return; 2488 2489 /* 2490 * If this is an I/O error that is going to be retried, then ignore the 2491 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 2492 * hard errors, when in reality they can happen for any number of 2493 * innocuous reasons (bus resets, MPxIO link failure, etc). 2494 */ 2495 if (zio->io_error == EIO && 2496 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 2497 return; 2498 2499 /* 2500 * Intent logs writes won't propagate their error to the root 2501 * I/O so don't mark these types of failures as pool-level 2502 * errors. 2503 */ 2504 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 2505 return; 2506 2507 mutex_enter(&vd->vdev_stat_lock); 2508 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 2509 if (zio->io_error == ECKSUM) 2510 vs->vs_checksum_errors++; 2511 else 2512 vs->vs_read_errors++; 2513 } 2514 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 2515 vs->vs_write_errors++; 2516 mutex_exit(&vd->vdev_stat_lock); 2517 2518 if (type == ZIO_TYPE_WRITE && txg != 0 && 2519 (!(flags & ZIO_FLAG_IO_REPAIR) || 2520 (flags & ZIO_FLAG_SCRUB_THREAD) || 2521 spa->spa_claiming)) { 2522 /* 2523 * This is either a normal write (not a repair), or it's 2524 * a repair induced by the scrub thread, or it's a repair 2525 * made by zil_claim() during spa_load() in the first txg. 2526 * In the normal case, we commit the DTL change in the same 2527 * txg as the block was born. In the scrub-induced repair 2528 * case, we know that scrubs run in first-pass syncing context, 2529 * so we commit the DTL change in spa_syncing_txg(spa). 2530 * In the zil_claim() case, we commit in spa_first_txg(spa). 2531 * 2532 * We currently do not make DTL entries for failed spontaneous 2533 * self-healing writes triggered by normal (non-scrubbing) 2534 * reads, because we have no transactional context in which to 2535 * do so -- and it's not clear that it'd be desirable anyway. 2536 */ 2537 if (vd->vdev_ops->vdev_op_leaf) { 2538 uint64_t commit_txg = txg; 2539 if (flags & ZIO_FLAG_SCRUB_THREAD) { 2540 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 2541 ASSERT(spa_sync_pass(spa) == 1); 2542 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 2543 commit_txg = spa_syncing_txg(spa); 2544 } else if (spa->spa_claiming) { 2545 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 2546 commit_txg = spa_first_txg(spa); 2547 } 2548 ASSERT(commit_txg >= spa_syncing_txg(spa)); 2549 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 2550 return; 2551 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2552 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 2553 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2554 } 2555 if (vd != rvd) 2556 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2557 } 2558 } 2559 2560 void 2561 vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2562 { 2563 vdev_stat_t *vs = &vd->vdev_stat; 2564 2565 for (int c = 0; c < vd->vdev_children; c++) 2566 vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2567 2568 mutex_enter(&vd->vdev_stat_lock); 2569 2570 if (type == POOL_SCRUB_NONE) { 2571 /* 2572 * Update completion and end time. Leave everything else alone 2573 * so we can report what happened during the previous scrub. 2574 */ 2575 vs->vs_scrub_complete = complete; 2576 vs->vs_scrub_end = gethrestime_sec(); 2577 } else { 2578 vs->vs_scrub_type = type; 2579 vs->vs_scrub_complete = 0; 2580 vs->vs_scrub_examined = 0; 2581 vs->vs_scrub_repaired = 0; 2582 vs->vs_scrub_start = gethrestime_sec(); 2583 vs->vs_scrub_end = 0; 2584 } 2585 2586 mutex_exit(&vd->vdev_stat_lock); 2587 } 2588 2589 /* 2590 * Update the in-core space usage stats for this vdev, its metaslab class, 2591 * and the root vdev. 2592 */ 2593 void 2594 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, 2595 int64_t space_delta) 2596 { 2597 int64_t dspace_delta = space_delta; 2598 spa_t *spa = vd->vdev_spa; 2599 vdev_t *rvd = spa->spa_root_vdev; 2600 metaslab_group_t *mg = vd->vdev_mg; 2601 metaslab_class_t *mc = mg ? mg->mg_class : NULL; 2602 2603 ASSERT(vd == vd->vdev_top); 2604 2605 /* 2606 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 2607 * factor. We must calculate this here and not at the root vdev 2608 * because the root vdev's psize-to-asize is simply the max of its 2609 * childrens', thus not accurate enough for us. 2610 */ 2611 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 2612 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 2613 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 2614 vd->vdev_deflate_ratio; 2615 2616 mutex_enter(&vd->vdev_stat_lock); 2617 vd->vdev_stat.vs_alloc += alloc_delta; 2618 vd->vdev_stat.vs_space += space_delta; 2619 vd->vdev_stat.vs_dspace += dspace_delta; 2620 mutex_exit(&vd->vdev_stat_lock); 2621 2622 if (mc == spa_normal_class(spa)) { 2623 mutex_enter(&rvd->vdev_stat_lock); 2624 rvd->vdev_stat.vs_alloc += alloc_delta; 2625 rvd->vdev_stat.vs_space += space_delta; 2626 rvd->vdev_stat.vs_dspace += dspace_delta; 2627 mutex_exit(&rvd->vdev_stat_lock); 2628 } 2629 2630 if (mc != NULL) { 2631 ASSERT(rvd == vd->vdev_parent); 2632 ASSERT(vd->vdev_ms_count != 0); 2633 2634 metaslab_class_space_update(mc, 2635 alloc_delta, defer_delta, space_delta, dspace_delta); 2636 } 2637 } 2638 2639 /* 2640 * Mark a top-level vdev's config as dirty, placing it on the dirty list 2641 * so that it will be written out next time the vdev configuration is synced. 2642 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2643 */ 2644 void 2645 vdev_config_dirty(vdev_t *vd) 2646 { 2647 spa_t *spa = vd->vdev_spa; 2648 vdev_t *rvd = spa->spa_root_vdev; 2649 int c; 2650 2651 /* 2652 * If this is an aux vdev (as with l2cache and spare devices), then we 2653 * update the vdev config manually and set the sync flag. 2654 */ 2655 if (vd->vdev_aux != NULL) { 2656 spa_aux_vdev_t *sav = vd->vdev_aux; 2657 nvlist_t **aux; 2658 uint_t naux; 2659 2660 for (c = 0; c < sav->sav_count; c++) { 2661 if (sav->sav_vdevs[c] == vd) 2662 break; 2663 } 2664 2665 if (c == sav->sav_count) { 2666 /* 2667 * We're being removed. There's nothing more to do. 2668 */ 2669 ASSERT(sav->sav_sync == B_TRUE); 2670 return; 2671 } 2672 2673 sav->sav_sync = B_TRUE; 2674 2675 if (nvlist_lookup_nvlist_array(sav->sav_config, 2676 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 2677 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 2678 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 2679 } 2680 2681 ASSERT(c < naux); 2682 2683 /* 2684 * Setting the nvlist in the middle if the array is a little 2685 * sketchy, but it will work. 2686 */ 2687 nvlist_free(aux[c]); 2688 aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 2689 2690 return; 2691 } 2692 2693 /* 2694 * The dirty list is protected by the SCL_CONFIG lock. The caller 2695 * must either hold SCL_CONFIG as writer, or must be the sync thread 2696 * (which holds SCL_CONFIG as reader). There's only one sync thread, 2697 * so this is sufficient to ensure mutual exclusion. 2698 */ 2699 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2700 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2701 spa_config_held(spa, SCL_CONFIG, RW_READER))); 2702 2703 if (vd == rvd) { 2704 for (c = 0; c < rvd->vdev_children; c++) 2705 vdev_config_dirty(rvd->vdev_child[c]); 2706 } else { 2707 ASSERT(vd == vd->vdev_top); 2708 2709 if (!list_link_active(&vd->vdev_config_dirty_node) && 2710 !vd->vdev_ishole) 2711 list_insert_head(&spa->spa_config_dirty_list, vd); 2712 } 2713 } 2714 2715 void 2716 vdev_config_clean(vdev_t *vd) 2717 { 2718 spa_t *spa = vd->vdev_spa; 2719 2720 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2721 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2722 spa_config_held(spa, SCL_CONFIG, RW_READER))); 2723 2724 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 2725 list_remove(&spa->spa_config_dirty_list, vd); 2726 } 2727 2728 /* 2729 * Mark a top-level vdev's state as dirty, so that the next pass of 2730 * spa_sync() can convert this into vdev_config_dirty(). We distinguish 2731 * the state changes from larger config changes because they require 2732 * much less locking, and are often needed for administrative actions. 2733 */ 2734 void 2735 vdev_state_dirty(vdev_t *vd) 2736 { 2737 spa_t *spa = vd->vdev_spa; 2738 2739 ASSERT(vd == vd->vdev_top); 2740 2741 /* 2742 * The state list is protected by the SCL_STATE lock. The caller 2743 * must either hold SCL_STATE as writer, or must be the sync thread 2744 * (which holds SCL_STATE as reader). There's only one sync thread, 2745 * so this is sufficient to ensure mutual exclusion. 2746 */ 2747 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2748 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2749 spa_config_held(spa, SCL_STATE, RW_READER))); 2750 2751 if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole) 2752 list_insert_head(&spa->spa_state_dirty_list, vd); 2753 } 2754 2755 void 2756 vdev_state_clean(vdev_t *vd) 2757 { 2758 spa_t *spa = vd->vdev_spa; 2759 2760 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2761 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2762 spa_config_held(spa, SCL_STATE, RW_READER))); 2763 2764 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 2765 list_remove(&spa->spa_state_dirty_list, vd); 2766 } 2767 2768 /* 2769 * Propagate vdev state up from children to parent. 2770 */ 2771 void 2772 vdev_propagate_state(vdev_t *vd) 2773 { 2774 spa_t *spa = vd->vdev_spa; 2775 vdev_t *rvd = spa->spa_root_vdev; 2776 int degraded = 0, faulted = 0; 2777 int corrupted = 0; 2778 vdev_t *child; 2779 2780 if (vd->vdev_children > 0) { 2781 for (int c = 0; c < vd->vdev_children; c++) { 2782 child = vd->vdev_child[c]; 2783 2784 /* 2785 * Don't factor holes into the decision. 2786 */ 2787 if (child->vdev_ishole) 2788 continue; 2789 2790 if (!vdev_readable(child) || 2791 (!vdev_writeable(child) && spa_writeable(spa))) { 2792 /* 2793 * Root special: if there is a top-level log 2794 * device, treat the root vdev as if it were 2795 * degraded. 2796 */ 2797 if (child->vdev_islog && vd == rvd) 2798 degraded++; 2799 else 2800 faulted++; 2801 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 2802 degraded++; 2803 } 2804 2805 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 2806 corrupted++; 2807 } 2808 2809 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 2810 2811 /* 2812 * Root special: if there is a top-level vdev that cannot be 2813 * opened due to corrupted metadata, then propagate the root 2814 * vdev's aux state as 'corrupt' rather than 'insufficient 2815 * replicas'. 2816 */ 2817 if (corrupted && vd == rvd && 2818 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 2819 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 2820 VDEV_AUX_CORRUPT_DATA); 2821 } 2822 2823 if (vd->vdev_parent) 2824 vdev_propagate_state(vd->vdev_parent); 2825 } 2826 2827 /* 2828 * Set a vdev's state. If this is during an open, we don't update the parent 2829 * state, because we're in the process of opening children depth-first. 2830 * Otherwise, we propagate the change to the parent. 2831 * 2832 * If this routine places a device in a faulted state, an appropriate ereport is 2833 * generated. 2834 */ 2835 void 2836 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2837 { 2838 uint64_t save_state; 2839 spa_t *spa = vd->vdev_spa; 2840 2841 if (state == vd->vdev_state) { 2842 vd->vdev_stat.vs_aux = aux; 2843 return; 2844 } 2845 2846 save_state = vd->vdev_state; 2847 2848 vd->vdev_state = state; 2849 vd->vdev_stat.vs_aux = aux; 2850 2851 /* 2852 * If we are setting the vdev state to anything but an open state, then 2853 * always close the underlying device. Otherwise, we keep accessible 2854 * but invalid devices open forever. We don't call vdev_close() itself, 2855 * because that implies some extra checks (offline, etc) that we don't 2856 * want here. This is limited to leaf devices, because otherwise 2857 * closing the device will affect other children. 2858 */ 2859 if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 2860 vd->vdev_ops->vdev_op_close(vd); 2861 2862 /* 2863 * If we have brought this vdev back into service, we need 2864 * to notify fmd so that it can gracefully repair any outstanding 2865 * cases due to a missing device. We do this in all cases, even those 2866 * that probably don't correlate to a repaired fault. This is sure to 2867 * catch all cases, and we let the zfs-retire agent sort it out. If 2868 * this is a transient state it's OK, as the retire agent will 2869 * double-check the state of the vdev before repairing it. 2870 */ 2871 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf && 2872 vd->vdev_prevstate != state) 2873 zfs_post_state_change(spa, vd); 2874 2875 if (vd->vdev_removed && 2876 state == VDEV_STATE_CANT_OPEN && 2877 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 2878 /* 2879 * If the previous state is set to VDEV_STATE_REMOVED, then this 2880 * device was previously marked removed and someone attempted to 2881 * reopen it. If this failed due to a nonexistent device, then 2882 * keep the device in the REMOVED state. We also let this be if 2883 * it is one of our special test online cases, which is only 2884 * attempting to online the device and shouldn't generate an FMA 2885 * fault. 2886 */ 2887 vd->vdev_state = VDEV_STATE_REMOVED; 2888 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 2889 } else if (state == VDEV_STATE_REMOVED) { 2890 vd->vdev_removed = B_TRUE; 2891 } else if (state == VDEV_STATE_CANT_OPEN) { 2892 /* 2893 * If we fail to open a vdev during an import, we mark it as 2894 * "not available", which signifies that it was never there to 2895 * begin with. Failure to open such a device is not considered 2896 * an error. 2897 */ 2898 if (spa->spa_load_state == SPA_LOAD_IMPORT && 2899 vd->vdev_ops->vdev_op_leaf) 2900 vd->vdev_not_present = 1; 2901 2902 /* 2903 * Post the appropriate ereport. If the 'prevstate' field is 2904 * set to something other than VDEV_STATE_UNKNOWN, it indicates 2905 * that this is part of a vdev_reopen(). In this case, we don't 2906 * want to post the ereport if the device was already in the 2907 * CANT_OPEN state beforehand. 2908 * 2909 * If the 'checkremove' flag is set, then this is an attempt to 2910 * online the device in response to an insertion event. If we 2911 * hit this case, then we have detected an insertion event for a 2912 * faulted or offline device that wasn't in the removed state. 2913 * In this scenario, we don't post an ereport because we are 2914 * about to replace the device, or attempt an online with 2915 * vdev_forcefault, which will generate the fault for us. 2916 */ 2917 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 2918 !vd->vdev_not_present && !vd->vdev_checkremove && 2919 vd != spa->spa_root_vdev) { 2920 const char *class; 2921 2922 switch (aux) { 2923 case VDEV_AUX_OPEN_FAILED: 2924 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 2925 break; 2926 case VDEV_AUX_CORRUPT_DATA: 2927 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 2928 break; 2929 case VDEV_AUX_NO_REPLICAS: 2930 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 2931 break; 2932 case VDEV_AUX_BAD_GUID_SUM: 2933 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 2934 break; 2935 case VDEV_AUX_TOO_SMALL: 2936 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 2937 break; 2938 case VDEV_AUX_BAD_LABEL: 2939 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 2940 break; 2941 case VDEV_AUX_IO_FAILURE: 2942 class = FM_EREPORT_ZFS_IO_FAILURE; 2943 break; 2944 default: 2945 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 2946 } 2947 2948 zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 2949 } 2950 2951 /* Erase any notion of persistent removed state */ 2952 vd->vdev_removed = B_FALSE; 2953 } else { 2954 vd->vdev_removed = B_FALSE; 2955 } 2956 2957 if (!isopen && vd->vdev_parent) 2958 vdev_propagate_state(vd->vdev_parent); 2959 } 2960 2961 /* 2962 * Check the vdev configuration to ensure that it's capable of supporting 2963 * a root pool. Currently, we do not support RAID-Z or partial configuration. 2964 * In addition, only a single top-level vdev is allowed and none of the leaves 2965 * can be wholedisks. 2966 */ 2967 boolean_t 2968 vdev_is_bootable(vdev_t *vd) 2969 { 2970 if (!vd->vdev_ops->vdev_op_leaf) { 2971 char *vdev_type = vd->vdev_ops->vdev_op_type; 2972 2973 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 2974 vd->vdev_children > 1) { 2975 return (B_FALSE); 2976 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 2977 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 2978 return (B_FALSE); 2979 } 2980 } else if (vd->vdev_wholedisk == 1) { 2981 return (B_FALSE); 2982 } 2983 2984 for (int c = 0; c < vd->vdev_children; c++) { 2985 if (!vdev_is_bootable(vd->vdev_child[c])) 2986 return (B_FALSE); 2987 } 2988 return (B_TRUE); 2989 } 2990 2991 /* 2992 * Load the state from the original vdev tree (ovd) which 2993 * we've retrieved from the MOS config object. If the original 2994 * vdev was offline then we transfer that state to the device 2995 * in the current vdev tree (nvd). 2996 */ 2997 void 2998 vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) 2999 { 3000 spa_t *spa = nvd->vdev_spa; 3001 3002 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 3003 ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); 3004 3005 for (int c = 0; c < nvd->vdev_children; c++) 3006 vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); 3007 3008 if (nvd->vdev_ops->vdev_op_leaf && ovd->vdev_offline) { 3009 /* 3010 * It would be nice to call vdev_offline() 3011 * directly but the pool isn't fully loaded and 3012 * the txg threads have not been started yet. 3013 */ 3014 nvd->vdev_offline = ovd->vdev_offline; 3015 vdev_reopen(nvd->vdev_top); 3016 } 3017 } 3018 3019 /* 3020 * Expand a vdev if possible. 3021 */ 3022 void 3023 vdev_expand(vdev_t *vd, uint64_t txg) 3024 { 3025 ASSERT(vd->vdev_top == vd); 3026 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3027 3028 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 3029 VERIFY(vdev_metaslab_init(vd, txg) == 0); 3030 vdev_config_dirty(vd); 3031 } 3032 } 3033