1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 * Copyright 2016 Toomas Soome <tsoome@me.com> 30 */ 31 32 /* 33 * SPA: Storage Pool Allocator 34 * 35 * This file contains all the routines used when modifying on-disk SPA state. 36 * This includes opening, importing, destroying, exporting a pool, and syncing a 37 * pool. 38 */ 39 40 #include <sys/zfs_context.h> 41 #include <sys/fm/fs/zfs.h> 42 #include <sys/spa_impl.h> 43 #include <sys/zio.h> 44 #include <sys/zio_checksum.h> 45 #include <sys/dmu.h> 46 #include <sys/dmu_tx.h> 47 #include <sys/zap.h> 48 #include <sys/zil.h> 49 #include <sys/ddt.h> 50 #include <sys/vdev_impl.h> 51 #include <sys/metaslab.h> 52 #include <sys/metaslab_impl.h> 53 #include <sys/uberblock_impl.h> 54 #include <sys/txg.h> 55 #include <sys/avl.h> 56 #include <sys/dmu_traverse.h> 57 #include <sys/dmu_objset.h> 58 #include <sys/unique.h> 59 #include <sys/dsl_pool.h> 60 #include <sys/dsl_dataset.h> 61 #include <sys/dsl_dir.h> 62 #include <sys/dsl_prop.h> 63 #include <sys/dsl_synctask.h> 64 #include <sys/fs/zfs.h> 65 #include <sys/arc.h> 66 #include <sys/callb.h> 67 #include <sys/systeminfo.h> 68 #include <sys/spa_boot.h> 69 #include <sys/zfs_ioctl.h> 70 #include <sys/dsl_scan.h> 71 #include <sys/zfeature.h> 72 #include <sys/dsl_destroy.h> 73 74 #ifdef _KERNEL 75 #include <sys/bootprops.h> 76 #include <sys/callb.h> 77 #include <sys/cpupart.h> 78 #include <sys/pool.h> 79 #include <sys/sysdc.h> 80 #include <sys/zone.h> 81 #endif /* _KERNEL */ 82 83 #include "zfs_prop.h" 84 #include "zfs_comutil.h" 85 86 /* 87 * The interval, in seconds, at which failed configuration cache file writes 88 * should be retried. 89 */ 90 static int zfs_ccw_retry_interval = 300; 91 92 typedef enum zti_modes { 93 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 94 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 95 ZTI_MODE_NULL, /* don't create a taskq */ 96 ZTI_NMODES 97 } zti_modes_t; 98 99 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 100 #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 101 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 102 103 #define ZTI_N(n) ZTI_P(n, 1) 104 #define ZTI_ONE ZTI_N(1) 105 106 typedef struct zio_taskq_info { 107 zti_modes_t zti_mode; 108 uint_t zti_value; 109 uint_t zti_count; 110 } zio_taskq_info_t; 111 112 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 113 "issue", "issue_high", "intr", "intr_high" 114 }; 115 116 /* 117 * This table defines the taskq settings for each ZFS I/O type. When 118 * initializing a pool, we use this table to create an appropriately sized 119 * taskq. Some operations are low volume and therefore have a small, static 120 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 121 * macros. Other operations process a large amount of data; the ZTI_BATCH 122 * macro causes us to create a taskq oriented for throughput. Some operations 123 * are so high frequency and short-lived that the taskq itself can become a a 124 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 125 * additional degree of parallelism specified by the number of threads per- 126 * taskq and the number of taskqs; when dispatching an event in this case, the 127 * particular taskq is chosen at random. 128 * 129 * The different taskq priorities are to handle the different contexts (issue 130 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 131 * need to be handled with minimum delay. 132 */ 133 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 134 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 135 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 136 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ 137 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 138 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 139 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 140 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 141 }; 142 143 static sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, const char *name); 144 static void spa_event_post(sysevent_t *ev); 145 static void spa_sync_version(void *arg, dmu_tx_t *tx); 146 static void spa_sync_props(void *arg, dmu_tx_t *tx); 147 static boolean_t spa_has_active_shared_spare(spa_t *spa); 148 static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config, 149 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 150 char **ereport); 151 static void spa_vdev_resilver_done(spa_t *spa); 152 153 uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 154 id_t zio_taskq_psrset_bind = PS_NONE; 155 boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 156 uint_t zio_taskq_basedc = 80; /* base duty cycle */ 157 158 boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 159 extern int zfs_sync_pass_deferred_free; 160 161 /* 162 * This (illegal) pool name is used when temporarily importing a spa_t in order 163 * to get the vdev stats associated with the imported devices. 164 */ 165 #define TRYIMPORT_NAME "$import" 166 167 /* 168 * ========================================================================== 169 * SPA properties routines 170 * ========================================================================== 171 */ 172 173 /* 174 * Add a (source=src, propname=propval) list to an nvlist. 175 */ 176 static void 177 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 178 uint64_t intval, zprop_source_t src) 179 { 180 const char *propname = zpool_prop_to_name(prop); 181 nvlist_t *propval; 182 183 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 184 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 185 186 if (strval != NULL) 187 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 188 else 189 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 190 191 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 192 nvlist_free(propval); 193 } 194 195 /* 196 * Get property values from the spa configuration. 197 */ 198 static void 199 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 200 { 201 vdev_t *rvd = spa->spa_root_vdev; 202 dsl_pool_t *pool = spa->spa_dsl_pool; 203 uint64_t size, alloc, cap, version; 204 zprop_source_t src = ZPROP_SRC_NONE; 205 spa_config_dirent_t *dp; 206 metaslab_class_t *mc = spa_normal_class(spa); 207 208 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 209 210 if (rvd != NULL) { 211 alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 212 size = metaslab_class_get_space(spa_normal_class(spa)); 213 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 214 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 215 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 216 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 217 size - alloc, src); 218 219 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL, 220 metaslab_class_fragmentation(mc), src); 221 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, 222 metaslab_class_expandable_space(mc), src); 223 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 224 (spa_mode(spa) == FREAD), src); 225 226 cap = (size == 0) ? 0 : (alloc * 100 / size); 227 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 228 229 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 230 ddt_get_pool_dedup_ratio(spa), src); 231 232 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 233 rvd->vdev_state, src); 234 235 version = spa_version(spa); 236 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 237 src = ZPROP_SRC_DEFAULT; 238 else 239 src = ZPROP_SRC_LOCAL; 240 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 241 } 242 243 if (pool != NULL) { 244 /* 245 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 246 * when opening pools before this version freedir will be NULL. 247 */ 248 if (pool->dp_free_dir != NULL) { 249 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 250 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 251 src); 252 } else { 253 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 254 NULL, 0, src); 255 } 256 257 if (pool->dp_leak_dir != NULL) { 258 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 259 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 260 src); 261 } else { 262 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 263 NULL, 0, src); 264 } 265 } 266 267 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 268 269 if (spa->spa_comment != NULL) { 270 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 271 0, ZPROP_SRC_LOCAL); 272 } 273 274 if (spa->spa_root != NULL) 275 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 276 0, ZPROP_SRC_LOCAL); 277 278 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 279 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 280 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 281 } else { 282 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 283 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 284 } 285 286 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 287 if (dp->scd_path == NULL) { 288 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 289 "none", 0, ZPROP_SRC_LOCAL); 290 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 291 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 292 dp->scd_path, 0, ZPROP_SRC_LOCAL); 293 } 294 } 295 } 296 297 /* 298 * Get zpool property values. 299 */ 300 int 301 spa_prop_get(spa_t *spa, nvlist_t **nvp) 302 { 303 objset_t *mos = spa->spa_meta_objset; 304 zap_cursor_t zc; 305 zap_attribute_t za; 306 int err; 307 308 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 309 310 mutex_enter(&spa->spa_props_lock); 311 312 /* 313 * Get properties from the spa config. 314 */ 315 spa_prop_get_config(spa, nvp); 316 317 /* If no pool property object, no more prop to get. */ 318 if (mos == NULL || spa->spa_pool_props_object == 0) { 319 mutex_exit(&spa->spa_props_lock); 320 return (0); 321 } 322 323 /* 324 * Get properties from the MOS pool property object. 325 */ 326 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 327 (err = zap_cursor_retrieve(&zc, &za)) == 0; 328 zap_cursor_advance(&zc)) { 329 uint64_t intval = 0; 330 char *strval = NULL; 331 zprop_source_t src = ZPROP_SRC_DEFAULT; 332 zpool_prop_t prop; 333 334 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 335 continue; 336 337 switch (za.za_integer_length) { 338 case 8: 339 /* integer property */ 340 if (za.za_first_integer != 341 zpool_prop_default_numeric(prop)) 342 src = ZPROP_SRC_LOCAL; 343 344 if (prop == ZPOOL_PROP_BOOTFS) { 345 dsl_pool_t *dp; 346 dsl_dataset_t *ds = NULL; 347 348 dp = spa_get_dsl(spa); 349 dsl_pool_config_enter(dp, FTAG); 350 if (err = dsl_dataset_hold_obj(dp, 351 za.za_first_integer, FTAG, &ds)) { 352 dsl_pool_config_exit(dp, FTAG); 353 break; 354 } 355 356 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 357 KM_SLEEP); 358 dsl_dataset_name(ds, strval); 359 dsl_dataset_rele(ds, FTAG); 360 dsl_pool_config_exit(dp, FTAG); 361 } else { 362 strval = NULL; 363 intval = za.za_first_integer; 364 } 365 366 spa_prop_add_list(*nvp, prop, strval, intval, src); 367 368 if (strval != NULL) 369 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 370 371 break; 372 373 case 1: 374 /* string property */ 375 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 376 err = zap_lookup(mos, spa->spa_pool_props_object, 377 za.za_name, 1, za.za_num_integers, strval); 378 if (err) { 379 kmem_free(strval, za.za_num_integers); 380 break; 381 } 382 spa_prop_add_list(*nvp, prop, strval, 0, src); 383 kmem_free(strval, za.za_num_integers); 384 break; 385 386 default: 387 break; 388 } 389 } 390 zap_cursor_fini(&zc); 391 mutex_exit(&spa->spa_props_lock); 392 out: 393 if (err && err != ENOENT) { 394 nvlist_free(*nvp); 395 *nvp = NULL; 396 return (err); 397 } 398 399 return (0); 400 } 401 402 /* 403 * Validate the given pool properties nvlist and modify the list 404 * for the property values to be set. 405 */ 406 static int 407 spa_prop_validate(spa_t *spa, nvlist_t *props) 408 { 409 nvpair_t *elem; 410 int error = 0, reset_bootfs = 0; 411 uint64_t objnum = 0; 412 boolean_t has_feature = B_FALSE; 413 414 elem = NULL; 415 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 416 uint64_t intval; 417 char *strval, *slash, *check, *fname; 418 const char *propname = nvpair_name(elem); 419 zpool_prop_t prop = zpool_name_to_prop(propname); 420 421 switch (prop) { 422 case ZPROP_INVAL: 423 if (!zpool_prop_feature(propname)) { 424 error = SET_ERROR(EINVAL); 425 break; 426 } 427 428 /* 429 * Sanitize the input. 430 */ 431 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 432 error = SET_ERROR(EINVAL); 433 break; 434 } 435 436 if (nvpair_value_uint64(elem, &intval) != 0) { 437 error = SET_ERROR(EINVAL); 438 break; 439 } 440 441 if (intval != 0) { 442 error = SET_ERROR(EINVAL); 443 break; 444 } 445 446 fname = strchr(propname, '@') + 1; 447 if (zfeature_lookup_name(fname, NULL) != 0) { 448 error = SET_ERROR(EINVAL); 449 break; 450 } 451 452 has_feature = B_TRUE; 453 break; 454 455 case ZPOOL_PROP_VERSION: 456 error = nvpair_value_uint64(elem, &intval); 457 if (!error && 458 (intval < spa_version(spa) || 459 intval > SPA_VERSION_BEFORE_FEATURES || 460 has_feature)) 461 error = SET_ERROR(EINVAL); 462 break; 463 464 case ZPOOL_PROP_DELEGATION: 465 case ZPOOL_PROP_AUTOREPLACE: 466 case ZPOOL_PROP_LISTSNAPS: 467 case ZPOOL_PROP_AUTOEXPAND: 468 error = nvpair_value_uint64(elem, &intval); 469 if (!error && intval > 1) 470 error = SET_ERROR(EINVAL); 471 break; 472 473 case ZPOOL_PROP_BOOTFS: 474 /* 475 * If the pool version is less than SPA_VERSION_BOOTFS, 476 * or the pool is still being created (version == 0), 477 * the bootfs property cannot be set. 478 */ 479 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 480 error = SET_ERROR(ENOTSUP); 481 break; 482 } 483 484 /* 485 * Make sure the vdev config is bootable 486 */ 487 if (!vdev_is_bootable(spa->spa_root_vdev)) { 488 error = SET_ERROR(ENOTSUP); 489 break; 490 } 491 492 reset_bootfs = 1; 493 494 error = nvpair_value_string(elem, &strval); 495 496 if (!error) { 497 objset_t *os; 498 uint64_t propval; 499 500 if (strval == NULL || strval[0] == '\0') { 501 objnum = zpool_prop_default_numeric( 502 ZPOOL_PROP_BOOTFS); 503 break; 504 } 505 506 if (error = dmu_objset_hold(strval, FTAG, &os)) 507 break; 508 509 /* 510 * Must be ZPL, and its property settings 511 * must be supported by GRUB (compression 512 * is not gzip, and large blocks are not used). 513 */ 514 515 if (dmu_objset_type(os) != DMU_OST_ZFS) { 516 error = SET_ERROR(ENOTSUP); 517 } else if ((error = 518 dsl_prop_get_int_ds(dmu_objset_ds(os), 519 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 520 &propval)) == 0 && 521 !BOOTFS_COMPRESS_VALID(propval)) { 522 error = SET_ERROR(ENOTSUP); 523 } else { 524 objnum = dmu_objset_id(os); 525 } 526 dmu_objset_rele(os, FTAG); 527 } 528 break; 529 530 case ZPOOL_PROP_FAILUREMODE: 531 error = nvpair_value_uint64(elem, &intval); 532 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 533 intval > ZIO_FAILURE_MODE_PANIC)) 534 error = SET_ERROR(EINVAL); 535 536 /* 537 * This is a special case which only occurs when 538 * the pool has completely failed. This allows 539 * the user to change the in-core failmode property 540 * without syncing it out to disk (I/Os might 541 * currently be blocked). We do this by returning 542 * EIO to the caller (spa_prop_set) to trick it 543 * into thinking we encountered a property validation 544 * error. 545 */ 546 if (!error && spa_suspended(spa)) { 547 spa->spa_failmode = intval; 548 error = SET_ERROR(EIO); 549 } 550 break; 551 552 case ZPOOL_PROP_CACHEFILE: 553 if ((error = nvpair_value_string(elem, &strval)) != 0) 554 break; 555 556 if (strval[0] == '\0') 557 break; 558 559 if (strcmp(strval, "none") == 0) 560 break; 561 562 if (strval[0] != '/') { 563 error = SET_ERROR(EINVAL); 564 break; 565 } 566 567 slash = strrchr(strval, '/'); 568 ASSERT(slash != NULL); 569 570 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 571 strcmp(slash, "/..") == 0) 572 error = SET_ERROR(EINVAL); 573 break; 574 575 case ZPOOL_PROP_COMMENT: 576 if ((error = nvpair_value_string(elem, &strval)) != 0) 577 break; 578 for (check = strval; *check != '\0'; check++) { 579 /* 580 * The kernel doesn't have an easy isprint() 581 * check. For this kernel check, we merely 582 * check ASCII apart from DEL. Fix this if 583 * there is an easy-to-use kernel isprint(). 584 */ 585 if (*check >= 0x7f) { 586 error = SET_ERROR(EINVAL); 587 break; 588 } 589 } 590 if (strlen(strval) > ZPROP_MAX_COMMENT) 591 error = E2BIG; 592 break; 593 594 case ZPOOL_PROP_DEDUPDITTO: 595 if (spa_version(spa) < SPA_VERSION_DEDUP) 596 error = SET_ERROR(ENOTSUP); 597 else 598 error = nvpair_value_uint64(elem, &intval); 599 if (error == 0 && 600 intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 601 error = SET_ERROR(EINVAL); 602 break; 603 } 604 605 if (error) 606 break; 607 } 608 609 if (!error && reset_bootfs) { 610 error = nvlist_remove(props, 611 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 612 613 if (!error) { 614 error = nvlist_add_uint64(props, 615 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 616 } 617 } 618 619 return (error); 620 } 621 622 void 623 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 624 { 625 char *cachefile; 626 spa_config_dirent_t *dp; 627 628 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 629 &cachefile) != 0) 630 return; 631 632 dp = kmem_alloc(sizeof (spa_config_dirent_t), 633 KM_SLEEP); 634 635 if (cachefile[0] == '\0') 636 dp->scd_path = spa_strdup(spa_config_path); 637 else if (strcmp(cachefile, "none") == 0) 638 dp->scd_path = NULL; 639 else 640 dp->scd_path = spa_strdup(cachefile); 641 642 list_insert_head(&spa->spa_config_list, dp); 643 if (need_sync) 644 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 645 } 646 647 int 648 spa_prop_set(spa_t *spa, nvlist_t *nvp) 649 { 650 int error; 651 nvpair_t *elem = NULL; 652 boolean_t need_sync = B_FALSE; 653 654 if ((error = spa_prop_validate(spa, nvp)) != 0) 655 return (error); 656 657 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 658 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 659 660 if (prop == ZPOOL_PROP_CACHEFILE || 661 prop == ZPOOL_PROP_ALTROOT || 662 prop == ZPOOL_PROP_READONLY) 663 continue; 664 665 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) { 666 uint64_t ver; 667 668 if (prop == ZPOOL_PROP_VERSION) { 669 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 670 } else { 671 ASSERT(zpool_prop_feature(nvpair_name(elem))); 672 ver = SPA_VERSION_FEATURES; 673 need_sync = B_TRUE; 674 } 675 676 /* Save time if the version is already set. */ 677 if (ver == spa_version(spa)) 678 continue; 679 680 /* 681 * In addition to the pool directory object, we might 682 * create the pool properties object, the features for 683 * read object, the features for write object, or the 684 * feature descriptions object. 685 */ 686 error = dsl_sync_task(spa->spa_name, NULL, 687 spa_sync_version, &ver, 688 6, ZFS_SPACE_CHECK_RESERVED); 689 if (error) 690 return (error); 691 continue; 692 } 693 694 need_sync = B_TRUE; 695 break; 696 } 697 698 if (need_sync) { 699 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 700 nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 701 } 702 703 return (0); 704 } 705 706 /* 707 * If the bootfs property value is dsobj, clear it. 708 */ 709 void 710 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 711 { 712 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 713 VERIFY(zap_remove(spa->spa_meta_objset, 714 spa->spa_pool_props_object, 715 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 716 spa->spa_bootfs = 0; 717 } 718 } 719 720 /*ARGSUSED*/ 721 static int 722 spa_change_guid_check(void *arg, dmu_tx_t *tx) 723 { 724 uint64_t *newguid = arg; 725 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 726 vdev_t *rvd = spa->spa_root_vdev; 727 uint64_t vdev_state; 728 729 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 730 vdev_state = rvd->vdev_state; 731 spa_config_exit(spa, SCL_STATE, FTAG); 732 733 if (vdev_state != VDEV_STATE_HEALTHY) 734 return (SET_ERROR(ENXIO)); 735 736 ASSERT3U(spa_guid(spa), !=, *newguid); 737 738 return (0); 739 } 740 741 static void 742 spa_change_guid_sync(void *arg, dmu_tx_t *tx) 743 { 744 uint64_t *newguid = arg; 745 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 746 uint64_t oldguid; 747 vdev_t *rvd = spa->spa_root_vdev; 748 749 oldguid = spa_guid(spa); 750 751 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 752 rvd->vdev_guid = *newguid; 753 rvd->vdev_guid_sum += (*newguid - oldguid); 754 vdev_config_dirty(rvd); 755 spa_config_exit(spa, SCL_STATE, FTAG); 756 757 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 758 oldguid, *newguid); 759 } 760 761 /* 762 * Change the GUID for the pool. This is done so that we can later 763 * re-import a pool built from a clone of our own vdevs. We will modify 764 * the root vdev's guid, our own pool guid, and then mark all of our 765 * vdevs dirty. Note that we must make sure that all our vdevs are 766 * online when we do this, or else any vdevs that weren't present 767 * would be orphaned from our pool. We are also going to issue a 768 * sysevent to update any watchers. 769 */ 770 int 771 spa_change_guid(spa_t *spa) 772 { 773 int error; 774 uint64_t guid; 775 776 mutex_enter(&spa->spa_vdev_top_lock); 777 mutex_enter(&spa_namespace_lock); 778 guid = spa_generate_guid(NULL); 779 780 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 781 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 782 783 if (error == 0) { 784 spa_config_sync(spa, B_FALSE, B_TRUE); 785 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID); 786 } 787 788 mutex_exit(&spa_namespace_lock); 789 mutex_exit(&spa->spa_vdev_top_lock); 790 791 return (error); 792 } 793 794 /* 795 * ========================================================================== 796 * SPA state manipulation (open/create/destroy/import/export) 797 * ========================================================================== 798 */ 799 800 static int 801 spa_error_entry_compare(const void *a, const void *b) 802 { 803 spa_error_entry_t *sa = (spa_error_entry_t *)a; 804 spa_error_entry_t *sb = (spa_error_entry_t *)b; 805 int ret; 806 807 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 808 sizeof (zbookmark_phys_t)); 809 810 if (ret < 0) 811 return (-1); 812 else if (ret > 0) 813 return (1); 814 else 815 return (0); 816 } 817 818 /* 819 * Utility function which retrieves copies of the current logs and 820 * re-initializes them in the process. 821 */ 822 void 823 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 824 { 825 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 826 827 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 828 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 829 830 avl_create(&spa->spa_errlist_scrub, 831 spa_error_entry_compare, sizeof (spa_error_entry_t), 832 offsetof(spa_error_entry_t, se_avl)); 833 avl_create(&spa->spa_errlist_last, 834 spa_error_entry_compare, sizeof (spa_error_entry_t), 835 offsetof(spa_error_entry_t, se_avl)); 836 } 837 838 static void 839 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 840 { 841 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 842 enum zti_modes mode = ztip->zti_mode; 843 uint_t value = ztip->zti_value; 844 uint_t count = ztip->zti_count; 845 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 846 char name[32]; 847 uint_t flags = 0; 848 boolean_t batch = B_FALSE; 849 850 if (mode == ZTI_MODE_NULL) { 851 tqs->stqs_count = 0; 852 tqs->stqs_taskq = NULL; 853 return; 854 } 855 856 ASSERT3U(count, >, 0); 857 858 tqs->stqs_count = count; 859 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 860 861 switch (mode) { 862 case ZTI_MODE_FIXED: 863 ASSERT3U(value, >=, 1); 864 value = MAX(value, 1); 865 break; 866 867 case ZTI_MODE_BATCH: 868 batch = B_TRUE; 869 flags |= TASKQ_THREADS_CPU_PCT; 870 value = zio_taskq_batch_pct; 871 break; 872 873 default: 874 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 875 "spa_activate()", 876 zio_type_name[t], zio_taskq_types[q], mode, value); 877 break; 878 } 879 880 for (uint_t i = 0; i < count; i++) { 881 taskq_t *tq; 882 883 if (count > 1) { 884 (void) snprintf(name, sizeof (name), "%s_%s_%u", 885 zio_type_name[t], zio_taskq_types[q], i); 886 } else { 887 (void) snprintf(name, sizeof (name), "%s_%s", 888 zio_type_name[t], zio_taskq_types[q]); 889 } 890 891 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 892 if (batch) 893 flags |= TASKQ_DC_BATCH; 894 895 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 896 spa->spa_proc, zio_taskq_basedc, flags); 897 } else { 898 pri_t pri = maxclsyspri; 899 /* 900 * The write issue taskq can be extremely CPU 901 * intensive. Run it at slightly lower priority 902 * than the other taskqs. 903 */ 904 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) 905 pri--; 906 907 tq = taskq_create_proc(name, value, pri, 50, 908 INT_MAX, spa->spa_proc, flags); 909 } 910 911 tqs->stqs_taskq[i] = tq; 912 } 913 } 914 915 static void 916 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 917 { 918 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 919 920 if (tqs->stqs_taskq == NULL) { 921 ASSERT0(tqs->stqs_count); 922 return; 923 } 924 925 for (uint_t i = 0; i < tqs->stqs_count; i++) { 926 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 927 taskq_destroy(tqs->stqs_taskq[i]); 928 } 929 930 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 931 tqs->stqs_taskq = NULL; 932 } 933 934 /* 935 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 936 * Note that a type may have multiple discrete taskqs to avoid lock contention 937 * on the taskq itself. In that case we choose which taskq at random by using 938 * the low bits of gethrtime(). 939 */ 940 void 941 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 942 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 943 { 944 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 945 taskq_t *tq; 946 947 ASSERT3P(tqs->stqs_taskq, !=, NULL); 948 ASSERT3U(tqs->stqs_count, !=, 0); 949 950 if (tqs->stqs_count == 1) { 951 tq = tqs->stqs_taskq[0]; 952 } else { 953 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 954 } 955 956 taskq_dispatch_ent(tq, func, arg, flags, ent); 957 } 958 959 static void 960 spa_create_zio_taskqs(spa_t *spa) 961 { 962 for (int t = 0; t < ZIO_TYPES; t++) { 963 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 964 spa_taskqs_init(spa, t, q); 965 } 966 } 967 } 968 969 #ifdef _KERNEL 970 static void 971 spa_thread(void *arg) 972 { 973 callb_cpr_t cprinfo; 974 975 spa_t *spa = arg; 976 user_t *pu = PTOU(curproc); 977 978 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 979 spa->spa_name); 980 981 ASSERT(curproc != &p0); 982 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 983 "zpool-%s", spa->spa_name); 984 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 985 986 /* bind this thread to the requested psrset */ 987 if (zio_taskq_psrset_bind != PS_NONE) { 988 pool_lock(); 989 mutex_enter(&cpu_lock); 990 mutex_enter(&pidlock); 991 mutex_enter(&curproc->p_lock); 992 993 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 994 0, NULL, NULL) == 0) { 995 curthread->t_bind_pset = zio_taskq_psrset_bind; 996 } else { 997 cmn_err(CE_WARN, 998 "Couldn't bind process for zfs pool \"%s\" to " 999 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1000 } 1001 1002 mutex_exit(&curproc->p_lock); 1003 mutex_exit(&pidlock); 1004 mutex_exit(&cpu_lock); 1005 pool_unlock(); 1006 } 1007 1008 if (zio_taskq_sysdc) { 1009 sysdc_thread_enter(curthread, 100, 0); 1010 } 1011 1012 spa->spa_proc = curproc; 1013 spa->spa_did = curthread->t_did; 1014 1015 spa_create_zio_taskqs(spa); 1016 1017 mutex_enter(&spa->spa_proc_lock); 1018 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1019 1020 spa->spa_proc_state = SPA_PROC_ACTIVE; 1021 cv_broadcast(&spa->spa_proc_cv); 1022 1023 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1024 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1025 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1026 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1027 1028 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1029 spa->spa_proc_state = SPA_PROC_GONE; 1030 spa->spa_proc = &p0; 1031 cv_broadcast(&spa->spa_proc_cv); 1032 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1033 1034 mutex_enter(&curproc->p_lock); 1035 lwp_exit(); 1036 } 1037 #endif 1038 1039 /* 1040 * Activate an uninitialized pool. 1041 */ 1042 static void 1043 spa_activate(spa_t *spa, int mode) 1044 { 1045 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1046 1047 spa->spa_state = POOL_STATE_ACTIVE; 1048 spa->spa_mode = mode; 1049 1050 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1051 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1052 1053 /* Try to create a covering process */ 1054 mutex_enter(&spa->spa_proc_lock); 1055 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1056 ASSERT(spa->spa_proc == &p0); 1057 spa->spa_did = 0; 1058 1059 /* Only create a process if we're going to be around a while. */ 1060 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1061 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1062 NULL, 0) == 0) { 1063 spa->spa_proc_state = SPA_PROC_CREATED; 1064 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1065 cv_wait(&spa->spa_proc_cv, 1066 &spa->spa_proc_lock); 1067 } 1068 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1069 ASSERT(spa->spa_proc != &p0); 1070 ASSERT(spa->spa_did != 0); 1071 } else { 1072 #ifdef _KERNEL 1073 cmn_err(CE_WARN, 1074 "Couldn't create process for zfs pool \"%s\"\n", 1075 spa->spa_name); 1076 #endif 1077 } 1078 } 1079 mutex_exit(&spa->spa_proc_lock); 1080 1081 /* If we didn't create a process, we need to create our taskqs. */ 1082 if (spa->spa_proc == &p0) { 1083 spa_create_zio_taskqs(spa); 1084 } 1085 1086 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1087 offsetof(vdev_t, vdev_config_dirty_node)); 1088 list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1089 offsetof(objset_t, os_evicting_node)); 1090 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1091 offsetof(vdev_t, vdev_state_dirty_node)); 1092 1093 txg_list_create(&spa->spa_vdev_txg_list, 1094 offsetof(struct vdev, vdev_txg_node)); 1095 1096 avl_create(&spa->spa_errlist_scrub, 1097 spa_error_entry_compare, sizeof (spa_error_entry_t), 1098 offsetof(spa_error_entry_t, se_avl)); 1099 avl_create(&spa->spa_errlist_last, 1100 spa_error_entry_compare, sizeof (spa_error_entry_t), 1101 offsetof(spa_error_entry_t, se_avl)); 1102 } 1103 1104 /* 1105 * Opposite of spa_activate(). 1106 */ 1107 static void 1108 spa_deactivate(spa_t *spa) 1109 { 1110 ASSERT(spa->spa_sync_on == B_FALSE); 1111 ASSERT(spa->spa_dsl_pool == NULL); 1112 ASSERT(spa->spa_root_vdev == NULL); 1113 ASSERT(spa->spa_async_zio_root == NULL); 1114 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1115 1116 spa_evicting_os_wait(spa); 1117 1118 txg_list_destroy(&spa->spa_vdev_txg_list); 1119 1120 list_destroy(&spa->spa_config_dirty_list); 1121 list_destroy(&spa->spa_evicting_os_list); 1122 list_destroy(&spa->spa_state_dirty_list); 1123 1124 for (int t = 0; t < ZIO_TYPES; t++) { 1125 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1126 spa_taskqs_fini(spa, t, q); 1127 } 1128 } 1129 1130 metaslab_class_destroy(spa->spa_normal_class); 1131 spa->spa_normal_class = NULL; 1132 1133 metaslab_class_destroy(spa->spa_log_class); 1134 spa->spa_log_class = NULL; 1135 1136 /* 1137 * If this was part of an import or the open otherwise failed, we may 1138 * still have errors left in the queues. Empty them just in case. 1139 */ 1140 spa_errlog_drain(spa); 1141 1142 avl_destroy(&spa->spa_errlist_scrub); 1143 avl_destroy(&spa->spa_errlist_last); 1144 1145 spa->spa_state = POOL_STATE_UNINITIALIZED; 1146 1147 mutex_enter(&spa->spa_proc_lock); 1148 if (spa->spa_proc_state != SPA_PROC_NONE) { 1149 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1150 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1151 cv_broadcast(&spa->spa_proc_cv); 1152 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1153 ASSERT(spa->spa_proc != &p0); 1154 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1155 } 1156 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1157 spa->spa_proc_state = SPA_PROC_NONE; 1158 } 1159 ASSERT(spa->spa_proc == &p0); 1160 mutex_exit(&spa->spa_proc_lock); 1161 1162 /* 1163 * We want to make sure spa_thread() has actually exited the ZFS 1164 * module, so that the module can't be unloaded out from underneath 1165 * it. 1166 */ 1167 if (spa->spa_did != 0) { 1168 thread_join(spa->spa_did); 1169 spa->spa_did = 0; 1170 } 1171 } 1172 1173 /* 1174 * Verify a pool configuration, and construct the vdev tree appropriately. This 1175 * will create all the necessary vdevs in the appropriate layout, with each vdev 1176 * in the CLOSED state. This will prep the pool before open/creation/import. 1177 * All vdev validation is done by the vdev_alloc() routine. 1178 */ 1179 static int 1180 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1181 uint_t id, int atype) 1182 { 1183 nvlist_t **child; 1184 uint_t children; 1185 int error; 1186 1187 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1188 return (error); 1189 1190 if ((*vdp)->vdev_ops->vdev_op_leaf) 1191 return (0); 1192 1193 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1194 &child, &children); 1195 1196 if (error == ENOENT) 1197 return (0); 1198 1199 if (error) { 1200 vdev_free(*vdp); 1201 *vdp = NULL; 1202 return (SET_ERROR(EINVAL)); 1203 } 1204 1205 for (int c = 0; c < children; c++) { 1206 vdev_t *vd; 1207 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1208 atype)) != 0) { 1209 vdev_free(*vdp); 1210 *vdp = NULL; 1211 return (error); 1212 } 1213 } 1214 1215 ASSERT(*vdp != NULL); 1216 1217 return (0); 1218 } 1219 1220 /* 1221 * Opposite of spa_load(). 1222 */ 1223 static void 1224 spa_unload(spa_t *spa) 1225 { 1226 int i; 1227 1228 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1229 1230 /* 1231 * Stop async tasks. 1232 */ 1233 spa_async_suspend(spa); 1234 1235 /* 1236 * Stop syncing. 1237 */ 1238 if (spa->spa_sync_on) { 1239 txg_sync_stop(spa->spa_dsl_pool); 1240 spa->spa_sync_on = B_FALSE; 1241 } 1242 1243 /* 1244 * Even though vdev_free() also calls vdev_metaslab_fini, we need 1245 * to call it earlier, before we wait for async i/o to complete. 1246 * This ensures that there is no async metaslab prefetching, by 1247 * calling taskq_wait(mg_taskq). 1248 */ 1249 if (spa->spa_root_vdev != NULL) { 1250 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1251 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) 1252 vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]); 1253 spa_config_exit(spa, SCL_ALL, FTAG); 1254 } 1255 1256 /* 1257 * Wait for any outstanding async I/O to complete. 1258 */ 1259 if (spa->spa_async_zio_root != NULL) { 1260 for (int i = 0; i < max_ncpus; i++) 1261 (void) zio_wait(spa->spa_async_zio_root[i]); 1262 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 1263 spa->spa_async_zio_root = NULL; 1264 } 1265 1266 bpobj_close(&spa->spa_deferred_bpobj); 1267 1268 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1269 1270 /* 1271 * Close all vdevs. 1272 */ 1273 if (spa->spa_root_vdev) 1274 vdev_free(spa->spa_root_vdev); 1275 ASSERT(spa->spa_root_vdev == NULL); 1276 1277 /* 1278 * Close the dsl pool. 1279 */ 1280 if (spa->spa_dsl_pool) { 1281 dsl_pool_close(spa->spa_dsl_pool); 1282 spa->spa_dsl_pool = NULL; 1283 spa->spa_meta_objset = NULL; 1284 } 1285 1286 ddt_unload(spa); 1287 1288 /* 1289 * Drop and purge level 2 cache 1290 */ 1291 spa_l2cache_drop(spa); 1292 1293 for (i = 0; i < spa->spa_spares.sav_count; i++) 1294 vdev_free(spa->spa_spares.sav_vdevs[i]); 1295 if (spa->spa_spares.sav_vdevs) { 1296 kmem_free(spa->spa_spares.sav_vdevs, 1297 spa->spa_spares.sav_count * sizeof (void *)); 1298 spa->spa_spares.sav_vdevs = NULL; 1299 } 1300 if (spa->spa_spares.sav_config) { 1301 nvlist_free(spa->spa_spares.sav_config); 1302 spa->spa_spares.sav_config = NULL; 1303 } 1304 spa->spa_spares.sav_count = 0; 1305 1306 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1307 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1308 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1309 } 1310 if (spa->spa_l2cache.sav_vdevs) { 1311 kmem_free(spa->spa_l2cache.sav_vdevs, 1312 spa->spa_l2cache.sav_count * sizeof (void *)); 1313 spa->spa_l2cache.sav_vdevs = NULL; 1314 } 1315 if (spa->spa_l2cache.sav_config) { 1316 nvlist_free(spa->spa_l2cache.sav_config); 1317 spa->spa_l2cache.sav_config = NULL; 1318 } 1319 spa->spa_l2cache.sav_count = 0; 1320 1321 spa->spa_async_suspended = 0; 1322 1323 if (spa->spa_comment != NULL) { 1324 spa_strfree(spa->spa_comment); 1325 spa->spa_comment = NULL; 1326 } 1327 1328 spa_config_exit(spa, SCL_ALL, FTAG); 1329 } 1330 1331 /* 1332 * Load (or re-load) the current list of vdevs describing the active spares for 1333 * this pool. When this is called, we have some form of basic information in 1334 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1335 * then re-generate a more complete list including status information. 1336 */ 1337 static void 1338 spa_load_spares(spa_t *spa) 1339 { 1340 nvlist_t **spares; 1341 uint_t nspares; 1342 int i; 1343 vdev_t *vd, *tvd; 1344 1345 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1346 1347 /* 1348 * First, close and free any existing spare vdevs. 1349 */ 1350 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1351 vd = spa->spa_spares.sav_vdevs[i]; 1352 1353 /* Undo the call to spa_activate() below */ 1354 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1355 B_FALSE)) != NULL && tvd->vdev_isspare) 1356 spa_spare_remove(tvd); 1357 vdev_close(vd); 1358 vdev_free(vd); 1359 } 1360 1361 if (spa->spa_spares.sav_vdevs) 1362 kmem_free(spa->spa_spares.sav_vdevs, 1363 spa->spa_spares.sav_count * sizeof (void *)); 1364 1365 if (spa->spa_spares.sav_config == NULL) 1366 nspares = 0; 1367 else 1368 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1369 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1370 1371 spa->spa_spares.sav_count = (int)nspares; 1372 spa->spa_spares.sav_vdevs = NULL; 1373 1374 if (nspares == 0) 1375 return; 1376 1377 /* 1378 * Construct the array of vdevs, opening them to get status in the 1379 * process. For each spare, there is potentially two different vdev_t 1380 * structures associated with it: one in the list of spares (used only 1381 * for basic validation purposes) and one in the active vdev 1382 * configuration (if it's spared in). During this phase we open and 1383 * validate each vdev on the spare list. If the vdev also exists in the 1384 * active configuration, then we also mark this vdev as an active spare. 1385 */ 1386 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1387 KM_SLEEP); 1388 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1389 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1390 VDEV_ALLOC_SPARE) == 0); 1391 ASSERT(vd != NULL); 1392 1393 spa->spa_spares.sav_vdevs[i] = vd; 1394 1395 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1396 B_FALSE)) != NULL) { 1397 if (!tvd->vdev_isspare) 1398 spa_spare_add(tvd); 1399 1400 /* 1401 * We only mark the spare active if we were successfully 1402 * able to load the vdev. Otherwise, importing a pool 1403 * with a bad active spare would result in strange 1404 * behavior, because multiple pool would think the spare 1405 * is actively in use. 1406 * 1407 * There is a vulnerability here to an equally bizarre 1408 * circumstance, where a dead active spare is later 1409 * brought back to life (onlined or otherwise). Given 1410 * the rarity of this scenario, and the extra complexity 1411 * it adds, we ignore the possibility. 1412 */ 1413 if (!vdev_is_dead(tvd)) 1414 spa_spare_activate(tvd); 1415 } 1416 1417 vd->vdev_top = vd; 1418 vd->vdev_aux = &spa->spa_spares; 1419 1420 if (vdev_open(vd) != 0) 1421 continue; 1422 1423 if (vdev_validate_aux(vd) == 0) 1424 spa_spare_add(vd); 1425 } 1426 1427 /* 1428 * Recompute the stashed list of spares, with status information 1429 * this time. 1430 */ 1431 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1432 DATA_TYPE_NVLIST_ARRAY) == 0); 1433 1434 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1435 KM_SLEEP); 1436 for (i = 0; i < spa->spa_spares.sav_count; i++) 1437 spares[i] = vdev_config_generate(spa, 1438 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1439 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1440 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1441 for (i = 0; i < spa->spa_spares.sav_count; i++) 1442 nvlist_free(spares[i]); 1443 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1444 } 1445 1446 /* 1447 * Load (or re-load) the current list of vdevs describing the active l2cache for 1448 * this pool. When this is called, we have some form of basic information in 1449 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1450 * then re-generate a more complete list including status information. 1451 * Devices which are already active have their details maintained, and are 1452 * not re-opened. 1453 */ 1454 static void 1455 spa_load_l2cache(spa_t *spa) 1456 { 1457 nvlist_t **l2cache; 1458 uint_t nl2cache; 1459 int i, j, oldnvdevs; 1460 uint64_t guid; 1461 vdev_t *vd, **oldvdevs, **newvdevs; 1462 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1463 1464 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1465 1466 if (sav->sav_config != NULL) { 1467 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1468 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1469 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1470 } else { 1471 nl2cache = 0; 1472 newvdevs = NULL; 1473 } 1474 1475 oldvdevs = sav->sav_vdevs; 1476 oldnvdevs = sav->sav_count; 1477 sav->sav_vdevs = NULL; 1478 sav->sav_count = 0; 1479 1480 /* 1481 * Process new nvlist of vdevs. 1482 */ 1483 for (i = 0; i < nl2cache; i++) { 1484 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1485 &guid) == 0); 1486 1487 newvdevs[i] = NULL; 1488 for (j = 0; j < oldnvdevs; j++) { 1489 vd = oldvdevs[j]; 1490 if (vd != NULL && guid == vd->vdev_guid) { 1491 /* 1492 * Retain previous vdev for add/remove ops. 1493 */ 1494 newvdevs[i] = vd; 1495 oldvdevs[j] = NULL; 1496 break; 1497 } 1498 } 1499 1500 if (newvdevs[i] == NULL) { 1501 /* 1502 * Create new vdev 1503 */ 1504 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1505 VDEV_ALLOC_L2CACHE) == 0); 1506 ASSERT(vd != NULL); 1507 newvdevs[i] = vd; 1508 1509 /* 1510 * Commit this vdev as an l2cache device, 1511 * even if it fails to open. 1512 */ 1513 spa_l2cache_add(vd); 1514 1515 vd->vdev_top = vd; 1516 vd->vdev_aux = sav; 1517 1518 spa_l2cache_activate(vd); 1519 1520 if (vdev_open(vd) != 0) 1521 continue; 1522 1523 (void) vdev_validate_aux(vd); 1524 1525 if (!vdev_is_dead(vd)) 1526 l2arc_add_vdev(spa, vd); 1527 } 1528 } 1529 1530 /* 1531 * Purge vdevs that were dropped 1532 */ 1533 for (i = 0; i < oldnvdevs; i++) { 1534 uint64_t pool; 1535 1536 vd = oldvdevs[i]; 1537 if (vd != NULL) { 1538 ASSERT(vd->vdev_isl2cache); 1539 1540 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1541 pool != 0ULL && l2arc_vdev_present(vd)) 1542 l2arc_remove_vdev(vd); 1543 vdev_clear_stats(vd); 1544 vdev_free(vd); 1545 } 1546 } 1547 1548 if (oldvdevs) 1549 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1550 1551 if (sav->sav_config == NULL) 1552 goto out; 1553 1554 sav->sav_vdevs = newvdevs; 1555 sav->sav_count = (int)nl2cache; 1556 1557 /* 1558 * Recompute the stashed list of l2cache devices, with status 1559 * information this time. 1560 */ 1561 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1562 DATA_TYPE_NVLIST_ARRAY) == 0); 1563 1564 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1565 for (i = 0; i < sav->sav_count; i++) 1566 l2cache[i] = vdev_config_generate(spa, 1567 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1568 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1569 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1570 out: 1571 for (i = 0; i < sav->sav_count; i++) 1572 nvlist_free(l2cache[i]); 1573 if (sav->sav_count) 1574 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1575 } 1576 1577 static int 1578 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1579 { 1580 dmu_buf_t *db; 1581 char *packed = NULL; 1582 size_t nvsize = 0; 1583 int error; 1584 *value = NULL; 1585 1586 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 1587 if (error != 0) 1588 return (error); 1589 1590 nvsize = *(uint64_t *)db->db_data; 1591 dmu_buf_rele(db, FTAG); 1592 1593 packed = kmem_alloc(nvsize, KM_SLEEP); 1594 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1595 DMU_READ_PREFETCH); 1596 if (error == 0) 1597 error = nvlist_unpack(packed, nvsize, value, 0); 1598 kmem_free(packed, nvsize); 1599 1600 return (error); 1601 } 1602 1603 /* 1604 * Checks to see if the given vdev could not be opened, in which case we post a 1605 * sysevent to notify the autoreplace code that the device has been removed. 1606 */ 1607 static void 1608 spa_check_removed(vdev_t *vd) 1609 { 1610 for (int c = 0; c < vd->vdev_children; c++) 1611 spa_check_removed(vd->vdev_child[c]); 1612 1613 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1614 !vd->vdev_ishole) { 1615 zfs_post_autoreplace(vd->vdev_spa, vd); 1616 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 1617 } 1618 } 1619 1620 static void 1621 spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd) 1622 { 1623 ASSERT3U(vd->vdev_children, ==, mvd->vdev_children); 1624 1625 vd->vdev_top_zap = mvd->vdev_top_zap; 1626 vd->vdev_leaf_zap = mvd->vdev_leaf_zap; 1627 1628 for (uint64_t i = 0; i < vd->vdev_children; i++) { 1629 spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]); 1630 } 1631 } 1632 1633 /* 1634 * Validate the current config against the MOS config 1635 */ 1636 static boolean_t 1637 spa_config_valid(spa_t *spa, nvlist_t *config) 1638 { 1639 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 1640 nvlist_t *nv; 1641 1642 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); 1643 1644 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1645 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 1646 1647 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children); 1648 1649 /* 1650 * If we're doing a normal import, then build up any additional 1651 * diagnostic information about missing devices in this config. 1652 * We'll pass this up to the user for further processing. 1653 */ 1654 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 1655 nvlist_t **child, *nv; 1656 uint64_t idx = 0; 1657 1658 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 1659 KM_SLEEP); 1660 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1661 1662 for (int c = 0; c < rvd->vdev_children; c++) { 1663 vdev_t *tvd = rvd->vdev_child[c]; 1664 vdev_t *mtvd = mrvd->vdev_child[c]; 1665 1666 if (tvd->vdev_ops == &vdev_missing_ops && 1667 mtvd->vdev_ops != &vdev_missing_ops && 1668 mtvd->vdev_islog) 1669 child[idx++] = vdev_config_generate(spa, mtvd, 1670 B_FALSE, 0); 1671 } 1672 1673 if (idx) { 1674 VERIFY(nvlist_add_nvlist_array(nv, 1675 ZPOOL_CONFIG_CHILDREN, child, idx) == 0); 1676 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 1677 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); 1678 1679 for (int i = 0; i < idx; i++) 1680 nvlist_free(child[i]); 1681 } 1682 nvlist_free(nv); 1683 kmem_free(child, rvd->vdev_children * sizeof (char **)); 1684 } 1685 1686 /* 1687 * Compare the root vdev tree with the information we have 1688 * from the MOS config (mrvd). Check each top-level vdev 1689 * with the corresponding MOS config top-level (mtvd). 1690 */ 1691 for (int c = 0; c < rvd->vdev_children; c++) { 1692 vdev_t *tvd = rvd->vdev_child[c]; 1693 vdev_t *mtvd = mrvd->vdev_child[c]; 1694 1695 /* 1696 * Resolve any "missing" vdevs in the current configuration. 1697 * If we find that the MOS config has more accurate information 1698 * about the top-level vdev then use that vdev instead. 1699 */ 1700 if (tvd->vdev_ops == &vdev_missing_ops && 1701 mtvd->vdev_ops != &vdev_missing_ops) { 1702 1703 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) 1704 continue; 1705 1706 /* 1707 * Device specific actions. 1708 */ 1709 if (mtvd->vdev_islog) { 1710 spa_set_log_state(spa, SPA_LOG_CLEAR); 1711 } else { 1712 /* 1713 * XXX - once we have 'readonly' pool 1714 * support we should be able to handle 1715 * missing data devices by transitioning 1716 * the pool to readonly. 1717 */ 1718 continue; 1719 } 1720 1721 /* 1722 * Swap the missing vdev with the data we were 1723 * able to obtain from the MOS config. 1724 */ 1725 vdev_remove_child(rvd, tvd); 1726 vdev_remove_child(mrvd, mtvd); 1727 1728 vdev_add_child(rvd, mtvd); 1729 vdev_add_child(mrvd, tvd); 1730 1731 spa_config_exit(spa, SCL_ALL, FTAG); 1732 vdev_load(mtvd); 1733 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1734 1735 vdev_reopen(rvd); 1736 } else { 1737 if (mtvd->vdev_islog) { 1738 /* 1739 * Load the slog device's state from the MOS 1740 * config since it's possible that the label 1741 * does not contain the most up-to-date 1742 * information. 1743 */ 1744 vdev_load_log_state(tvd, mtvd); 1745 vdev_reopen(tvd); 1746 } 1747 1748 /* 1749 * Per-vdev ZAP info is stored exclusively in the MOS. 1750 */ 1751 spa_config_valid_zaps(tvd, mtvd); 1752 } 1753 } 1754 1755 vdev_free(mrvd); 1756 spa_config_exit(spa, SCL_ALL, FTAG); 1757 1758 /* 1759 * Ensure we were able to validate the config. 1760 */ 1761 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum); 1762 } 1763 1764 /* 1765 * Check for missing log devices 1766 */ 1767 static boolean_t 1768 spa_check_logs(spa_t *spa) 1769 { 1770 boolean_t rv = B_FALSE; 1771 dsl_pool_t *dp = spa_get_dsl(spa); 1772 1773 switch (spa->spa_log_state) { 1774 case SPA_LOG_MISSING: 1775 /* need to recheck in case slog has been restored */ 1776 case SPA_LOG_UNKNOWN: 1777 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1778 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 1779 if (rv) 1780 spa_set_log_state(spa, SPA_LOG_MISSING); 1781 break; 1782 } 1783 return (rv); 1784 } 1785 1786 static boolean_t 1787 spa_passivate_log(spa_t *spa) 1788 { 1789 vdev_t *rvd = spa->spa_root_vdev; 1790 boolean_t slog_found = B_FALSE; 1791 1792 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1793 1794 if (!spa_has_slogs(spa)) 1795 return (B_FALSE); 1796 1797 for (int c = 0; c < rvd->vdev_children; c++) { 1798 vdev_t *tvd = rvd->vdev_child[c]; 1799 metaslab_group_t *mg = tvd->vdev_mg; 1800 1801 if (tvd->vdev_islog) { 1802 metaslab_group_passivate(mg); 1803 slog_found = B_TRUE; 1804 } 1805 } 1806 1807 return (slog_found); 1808 } 1809 1810 static void 1811 spa_activate_log(spa_t *spa) 1812 { 1813 vdev_t *rvd = spa->spa_root_vdev; 1814 1815 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1816 1817 for (int c = 0; c < rvd->vdev_children; c++) { 1818 vdev_t *tvd = rvd->vdev_child[c]; 1819 metaslab_group_t *mg = tvd->vdev_mg; 1820 1821 if (tvd->vdev_islog) 1822 metaslab_group_activate(mg); 1823 } 1824 } 1825 1826 int 1827 spa_offline_log(spa_t *spa) 1828 { 1829 int error; 1830 1831 error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 1832 NULL, DS_FIND_CHILDREN); 1833 if (error == 0) { 1834 /* 1835 * We successfully offlined the log device, sync out the 1836 * current txg so that the "stubby" block can be removed 1837 * by zil_sync(). 1838 */ 1839 txg_wait_synced(spa->spa_dsl_pool, 0); 1840 } 1841 return (error); 1842 } 1843 1844 static void 1845 spa_aux_check_removed(spa_aux_vdev_t *sav) 1846 { 1847 for (int i = 0; i < sav->sav_count; i++) 1848 spa_check_removed(sav->sav_vdevs[i]); 1849 } 1850 1851 void 1852 spa_claim_notify(zio_t *zio) 1853 { 1854 spa_t *spa = zio->io_spa; 1855 1856 if (zio->io_error) 1857 return; 1858 1859 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 1860 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 1861 spa->spa_claim_max_txg = zio->io_bp->blk_birth; 1862 mutex_exit(&spa->spa_props_lock); 1863 } 1864 1865 typedef struct spa_load_error { 1866 uint64_t sle_meta_count; 1867 uint64_t sle_data_count; 1868 } spa_load_error_t; 1869 1870 static void 1871 spa_load_verify_done(zio_t *zio) 1872 { 1873 blkptr_t *bp = zio->io_bp; 1874 spa_load_error_t *sle = zio->io_private; 1875 dmu_object_type_t type = BP_GET_TYPE(bp); 1876 int error = zio->io_error; 1877 spa_t *spa = zio->io_spa; 1878 1879 if (error) { 1880 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 1881 type != DMU_OT_INTENT_LOG) 1882 atomic_inc_64(&sle->sle_meta_count); 1883 else 1884 atomic_inc_64(&sle->sle_data_count); 1885 } 1886 zio_data_buf_free(zio->io_data, zio->io_size); 1887 1888 mutex_enter(&spa->spa_scrub_lock); 1889 spa->spa_scrub_inflight--; 1890 cv_broadcast(&spa->spa_scrub_io_cv); 1891 mutex_exit(&spa->spa_scrub_lock); 1892 } 1893 1894 /* 1895 * Maximum number of concurrent scrub i/os to create while verifying 1896 * a pool while importing it. 1897 */ 1898 int spa_load_verify_maxinflight = 10000; 1899 boolean_t spa_load_verify_metadata = B_TRUE; 1900 boolean_t spa_load_verify_data = B_TRUE; 1901 1902 /*ARGSUSED*/ 1903 static int 1904 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1905 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1906 { 1907 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 1908 return (0); 1909 /* 1910 * Note: normally this routine will not be called if 1911 * spa_load_verify_metadata is not set. However, it may be useful 1912 * to manually set the flag after the traversal has begun. 1913 */ 1914 if (!spa_load_verify_metadata) 1915 return (0); 1916 if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data) 1917 return (0); 1918 1919 zio_t *rio = arg; 1920 size_t size = BP_GET_PSIZE(bp); 1921 void *data = zio_data_buf_alloc(size); 1922 1923 mutex_enter(&spa->spa_scrub_lock); 1924 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight) 1925 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1926 spa->spa_scrub_inflight++; 1927 mutex_exit(&spa->spa_scrub_lock); 1928 1929 zio_nowait(zio_read(rio, spa, bp, data, size, 1930 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 1931 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 1932 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 1933 return (0); 1934 } 1935 1936 /* ARGSUSED */ 1937 int 1938 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 1939 { 1940 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) 1941 return (SET_ERROR(ENAMETOOLONG)); 1942 1943 return (0); 1944 } 1945 1946 static int 1947 spa_load_verify(spa_t *spa) 1948 { 1949 zio_t *rio; 1950 spa_load_error_t sle = { 0 }; 1951 zpool_rewind_policy_t policy; 1952 boolean_t verify_ok = B_FALSE; 1953 int error = 0; 1954 1955 zpool_get_rewind_policy(spa->spa_config, &policy); 1956 1957 if (policy.zrp_request & ZPOOL_NEVER_REWIND) 1958 return (0); 1959 1960 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 1961 error = dmu_objset_find_dp(spa->spa_dsl_pool, 1962 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, 1963 DS_FIND_CHILDREN); 1964 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 1965 if (error != 0) 1966 return (error); 1967 1968 rio = zio_root(spa, NULL, &sle, 1969 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1970 1971 if (spa_load_verify_metadata) { 1972 error = traverse_pool(spa, spa->spa_verify_min_txg, 1973 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 1974 spa_load_verify_cb, rio); 1975 } 1976 1977 (void) zio_wait(rio); 1978 1979 spa->spa_load_meta_errors = sle.sle_meta_count; 1980 spa->spa_load_data_errors = sle.sle_data_count; 1981 1982 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta && 1983 sle.sle_data_count <= policy.zrp_maxdata) { 1984 int64_t loss = 0; 1985 1986 verify_ok = B_TRUE; 1987 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 1988 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 1989 1990 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 1991 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1992 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 1993 VERIFY(nvlist_add_int64(spa->spa_load_info, 1994 ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 1995 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1996 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 1997 } else { 1998 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 1999 } 2000 2001 if (error) { 2002 if (error != ENXIO && error != EIO) 2003 error = SET_ERROR(EIO); 2004 return (error); 2005 } 2006 2007 return (verify_ok ? 0 : EIO); 2008 } 2009 2010 /* 2011 * Find a value in the pool props object. 2012 */ 2013 static void 2014 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2015 { 2016 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2017 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2018 } 2019 2020 /* 2021 * Find a value in the pool directory object. 2022 */ 2023 static int 2024 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val) 2025 { 2026 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2027 name, sizeof (uint64_t), 1, val)); 2028 } 2029 2030 static int 2031 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2032 { 2033 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2034 return (err); 2035 } 2036 2037 /* 2038 * Fix up config after a partly-completed split. This is done with the 2039 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 2040 * pool have that entry in their config, but only the splitting one contains 2041 * a list of all the guids of the vdevs that are being split off. 2042 * 2043 * This function determines what to do with that list: either rejoin 2044 * all the disks to the pool, or complete the splitting process. To attempt 2045 * the rejoin, each disk that is offlined is marked online again, and 2046 * we do a reopen() call. If the vdev label for every disk that was 2047 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 2048 * then we call vdev_split() on each disk, and complete the split. 2049 * 2050 * Otherwise we leave the config alone, with all the vdevs in place in 2051 * the original pool. 2052 */ 2053 static void 2054 spa_try_repair(spa_t *spa, nvlist_t *config) 2055 { 2056 uint_t extracted; 2057 uint64_t *glist; 2058 uint_t i, gcount; 2059 nvlist_t *nvl; 2060 vdev_t **vd; 2061 boolean_t attempt_reopen; 2062 2063 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 2064 return; 2065 2066 /* check that the config is complete */ 2067 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2068 &glist, &gcount) != 0) 2069 return; 2070 2071 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2072 2073 /* attempt to online all the vdevs & validate */ 2074 attempt_reopen = B_TRUE; 2075 for (i = 0; i < gcount; i++) { 2076 if (glist[i] == 0) /* vdev is hole */ 2077 continue; 2078 2079 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2080 if (vd[i] == NULL) { 2081 /* 2082 * Don't bother attempting to reopen the disks; 2083 * just do the split. 2084 */ 2085 attempt_reopen = B_FALSE; 2086 } else { 2087 /* attempt to re-online it */ 2088 vd[i]->vdev_offline = B_FALSE; 2089 } 2090 } 2091 2092 if (attempt_reopen) { 2093 vdev_reopen(spa->spa_root_vdev); 2094 2095 /* check each device to see what state it's in */ 2096 for (extracted = 0, i = 0; i < gcount; i++) { 2097 if (vd[i] != NULL && 2098 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2099 break; 2100 ++extracted; 2101 } 2102 } 2103 2104 /* 2105 * If every disk has been moved to the new pool, or if we never 2106 * even attempted to look at them, then we split them off for 2107 * good. 2108 */ 2109 if (!attempt_reopen || gcount == extracted) { 2110 for (i = 0; i < gcount; i++) 2111 if (vd[i] != NULL) 2112 vdev_split(vd[i]); 2113 vdev_reopen(spa->spa_root_vdev); 2114 } 2115 2116 kmem_free(vd, gcount * sizeof (vdev_t *)); 2117 } 2118 2119 static int 2120 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, 2121 boolean_t mosconfig) 2122 { 2123 nvlist_t *config = spa->spa_config; 2124 char *ereport = FM_EREPORT_ZFS_POOL; 2125 char *comment; 2126 int error; 2127 uint64_t pool_guid; 2128 nvlist_t *nvl; 2129 2130 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) 2131 return (SET_ERROR(EINVAL)); 2132 2133 ASSERT(spa->spa_comment == NULL); 2134 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2135 spa->spa_comment = spa_strdup(comment); 2136 2137 /* 2138 * Versioning wasn't explicitly added to the label until later, so if 2139 * it's not present treat it as the initial version. 2140 */ 2141 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 2142 &spa->spa_ubsync.ub_version) != 0) 2143 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 2144 2145 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 2146 &spa->spa_config_txg); 2147 2148 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 2149 spa_guid_exists(pool_guid, 0)) { 2150 error = SET_ERROR(EEXIST); 2151 } else { 2152 spa->spa_config_guid = pool_guid; 2153 2154 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, 2155 &nvl) == 0) { 2156 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting, 2157 KM_SLEEP) == 0); 2158 } 2159 2160 nvlist_free(spa->spa_load_info); 2161 spa->spa_load_info = fnvlist_alloc(); 2162 2163 gethrestime(&spa->spa_loaded_ts); 2164 error = spa_load_impl(spa, pool_guid, config, state, type, 2165 mosconfig, &ereport); 2166 } 2167 2168 /* 2169 * Don't count references from objsets that are already closed 2170 * and are making their way through the eviction process. 2171 */ 2172 spa_evicting_os_wait(spa); 2173 spa->spa_minref = refcount_count(&spa->spa_refcount); 2174 if (error) { 2175 if (error != EEXIST) { 2176 spa->spa_loaded_ts.tv_sec = 0; 2177 spa->spa_loaded_ts.tv_nsec = 0; 2178 } 2179 if (error != EBADF) { 2180 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 2181 } 2182 } 2183 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2184 spa->spa_ena = 0; 2185 2186 return (error); 2187 } 2188 2189 /* 2190 * Count the number of per-vdev ZAPs associated with all of the vdevs in the 2191 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the 2192 * spa's per-vdev ZAP list. 2193 */ 2194 static uint64_t 2195 vdev_count_verify_zaps(vdev_t *vd) 2196 { 2197 spa_t *spa = vd->vdev_spa; 2198 uint64_t total = 0; 2199 if (vd->vdev_top_zap != 0) { 2200 total++; 2201 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2202 spa->spa_all_vdev_zaps, vd->vdev_top_zap)); 2203 } 2204 if (vd->vdev_leaf_zap != 0) { 2205 total++; 2206 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2207 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); 2208 } 2209 2210 for (uint64_t i = 0; i < vd->vdev_children; i++) { 2211 total += vdev_count_verify_zaps(vd->vdev_child[i]); 2212 } 2213 2214 return (total); 2215 } 2216 2217 /* 2218 * Load an existing storage pool, using the pool's builtin spa_config as a 2219 * source of configuration information. 2220 */ 2221 static int 2222 spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, 2223 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 2224 char **ereport) 2225 { 2226 int error = 0; 2227 nvlist_t *nvroot = NULL; 2228 nvlist_t *label; 2229 vdev_t *rvd; 2230 uberblock_t *ub = &spa->spa_uberblock; 2231 uint64_t children, config_cache_txg = spa->spa_config_txg; 2232 int orig_mode = spa->spa_mode; 2233 int parse; 2234 uint64_t obj; 2235 boolean_t missing_feat_write = B_FALSE; 2236 2237 /* 2238 * If this is an untrusted config, access the pool in read-only mode. 2239 * This prevents things like resilvering recently removed devices. 2240 */ 2241 if (!mosconfig) 2242 spa->spa_mode = FREAD; 2243 2244 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2245 2246 spa->spa_load_state = state; 2247 2248 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot)) 2249 return (SET_ERROR(EINVAL)); 2250 2251 parse = (type == SPA_IMPORT_EXISTING ? 2252 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2253 2254 /* 2255 * Create "The Godfather" zio to hold all async IOs 2256 */ 2257 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 2258 KM_SLEEP); 2259 for (int i = 0; i < max_ncpus; i++) { 2260 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 2261 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2262 ZIO_FLAG_GODFATHER); 2263 } 2264 2265 /* 2266 * Parse the configuration into a vdev tree. We explicitly set the 2267 * value that will be returned by spa_version() since parsing the 2268 * configuration requires knowing the version number. 2269 */ 2270 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2271 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse); 2272 spa_config_exit(spa, SCL_ALL, FTAG); 2273 2274 if (error != 0) 2275 return (error); 2276 2277 ASSERT(spa->spa_root_vdev == rvd); 2278 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 2279 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 2280 2281 if (type != SPA_IMPORT_ASSEMBLE) { 2282 ASSERT(spa_guid(spa) == pool_guid); 2283 } 2284 2285 /* 2286 * Try to open all vdevs, loading each label in the process. 2287 */ 2288 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2289 error = vdev_open(rvd); 2290 spa_config_exit(spa, SCL_ALL, FTAG); 2291 if (error != 0) 2292 return (error); 2293 2294 /* 2295 * We need to validate the vdev labels against the configuration that 2296 * we have in hand, which is dependent on the setting of mosconfig. If 2297 * mosconfig is true then we're validating the vdev labels based on 2298 * that config. Otherwise, we're validating against the cached config 2299 * (zpool.cache) that was read when we loaded the zfs module, and then 2300 * later we will recursively call spa_load() and validate against 2301 * the vdev config. 2302 * 2303 * If we're assembling a new pool that's been split off from an 2304 * existing pool, the labels haven't yet been updated so we skip 2305 * validation for now. 2306 */ 2307 if (type != SPA_IMPORT_ASSEMBLE) { 2308 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2309 error = vdev_validate(rvd, mosconfig); 2310 spa_config_exit(spa, SCL_ALL, FTAG); 2311 2312 if (error != 0) 2313 return (error); 2314 2315 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2316 return (SET_ERROR(ENXIO)); 2317 } 2318 2319 /* 2320 * Find the best uberblock. 2321 */ 2322 vdev_uberblock_load(rvd, ub, &label); 2323 2324 /* 2325 * If we weren't able to find a single valid uberblock, return failure. 2326 */ 2327 if (ub->ub_txg == 0) { 2328 nvlist_free(label); 2329 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2330 } 2331 2332 /* 2333 * If the pool has an unsupported version we can't open it. 2334 */ 2335 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2336 nvlist_free(label); 2337 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2338 } 2339 2340 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2341 nvlist_t *features; 2342 2343 /* 2344 * If we weren't able to find what's necessary for reading the 2345 * MOS in the label, return failure. 2346 */ 2347 if (label == NULL || nvlist_lookup_nvlist(label, 2348 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) { 2349 nvlist_free(label); 2350 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2351 ENXIO)); 2352 } 2353 2354 /* 2355 * Update our in-core representation with the definitive values 2356 * from the label. 2357 */ 2358 nvlist_free(spa->spa_label_features); 2359 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2360 } 2361 2362 nvlist_free(label); 2363 2364 /* 2365 * Look through entries in the label nvlist's features_for_read. If 2366 * there is a feature listed there which we don't understand then we 2367 * cannot open a pool. 2368 */ 2369 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2370 nvlist_t *unsup_feat; 2371 2372 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2373 0); 2374 2375 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2376 NULL); nvp != NULL; 2377 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2378 if (!zfeature_is_supported(nvpair_name(nvp))) { 2379 VERIFY(nvlist_add_string(unsup_feat, 2380 nvpair_name(nvp), "") == 0); 2381 } 2382 } 2383 2384 if (!nvlist_empty(unsup_feat)) { 2385 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2386 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2387 nvlist_free(unsup_feat); 2388 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2389 ENOTSUP)); 2390 } 2391 2392 nvlist_free(unsup_feat); 2393 } 2394 2395 /* 2396 * If the vdev guid sum doesn't match the uberblock, we have an 2397 * incomplete configuration. We first check to see if the pool 2398 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN). 2399 * If it is, defer the vdev_guid_sum check till later so we 2400 * can handle missing vdevs. 2401 */ 2402 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 2403 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE && 2404 rvd->vdev_guid_sum != ub->ub_guid_sum) 2405 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 2406 2407 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 2408 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2409 spa_try_repair(spa, config); 2410 spa_config_exit(spa, SCL_ALL, FTAG); 2411 nvlist_free(spa->spa_config_splitting); 2412 spa->spa_config_splitting = NULL; 2413 } 2414 2415 /* 2416 * Initialize internal SPA structures. 2417 */ 2418 spa->spa_state = POOL_STATE_ACTIVE; 2419 spa->spa_ubsync = spa->spa_uberblock; 2420 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2421 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2422 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2423 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2424 spa->spa_claim_max_txg = spa->spa_first_txg; 2425 spa->spa_prev_software_version = ub->ub_software_version; 2426 2427 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 2428 if (error) 2429 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2430 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2431 2432 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0) 2433 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2434 2435 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 2436 boolean_t missing_feat_read = B_FALSE; 2437 nvlist_t *unsup_feat, *enabled_feat; 2438 2439 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 2440 &spa->spa_feat_for_read_obj) != 0) { 2441 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2442 } 2443 2444 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 2445 &spa->spa_feat_for_write_obj) != 0) { 2446 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2447 } 2448 2449 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 2450 &spa->spa_feat_desc_obj) != 0) { 2451 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2452 } 2453 2454 enabled_feat = fnvlist_alloc(); 2455 unsup_feat = fnvlist_alloc(); 2456 2457 if (!spa_features_check(spa, B_FALSE, 2458 unsup_feat, enabled_feat)) 2459 missing_feat_read = B_TRUE; 2460 2461 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) { 2462 if (!spa_features_check(spa, B_TRUE, 2463 unsup_feat, enabled_feat)) { 2464 missing_feat_write = B_TRUE; 2465 } 2466 } 2467 2468 fnvlist_add_nvlist(spa->spa_load_info, 2469 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 2470 2471 if (!nvlist_empty(unsup_feat)) { 2472 fnvlist_add_nvlist(spa->spa_load_info, 2473 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 2474 } 2475 2476 fnvlist_free(enabled_feat); 2477 fnvlist_free(unsup_feat); 2478 2479 if (!missing_feat_read) { 2480 fnvlist_add_boolean(spa->spa_load_info, 2481 ZPOOL_CONFIG_CAN_RDONLY); 2482 } 2483 2484 /* 2485 * If the state is SPA_LOAD_TRYIMPORT, our objective is 2486 * twofold: to determine whether the pool is available for 2487 * import in read-write mode and (if it is not) whether the 2488 * pool is available for import in read-only mode. If the pool 2489 * is available for import in read-write mode, it is displayed 2490 * as available in userland; if it is not available for import 2491 * in read-only mode, it is displayed as unavailable in 2492 * userland. If the pool is available for import in read-only 2493 * mode but not read-write mode, it is displayed as unavailable 2494 * in userland with a special note that the pool is actually 2495 * available for open in read-only mode. 2496 * 2497 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 2498 * missing a feature for write, we must first determine whether 2499 * the pool can be opened read-only before returning to 2500 * userland in order to know whether to display the 2501 * abovementioned note. 2502 */ 2503 if (missing_feat_read || (missing_feat_write && 2504 spa_writeable(spa))) { 2505 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2506 ENOTSUP)); 2507 } 2508 2509 /* 2510 * Load refcounts for ZFS features from disk into an in-memory 2511 * cache during SPA initialization. 2512 */ 2513 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 2514 uint64_t refcount; 2515 2516 error = feature_get_refcount_from_disk(spa, 2517 &spa_feature_table[i], &refcount); 2518 if (error == 0) { 2519 spa->spa_feat_refcount_cache[i] = refcount; 2520 } else if (error == ENOTSUP) { 2521 spa->spa_feat_refcount_cache[i] = 2522 SPA_FEATURE_DISABLED; 2523 } else { 2524 return (spa_vdev_err(rvd, 2525 VDEV_AUX_CORRUPT_DATA, EIO)); 2526 } 2527 } 2528 } 2529 2530 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 2531 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 2532 &spa->spa_feat_enabled_txg_obj) != 0) 2533 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2534 } 2535 2536 spa->spa_is_initializing = B_TRUE; 2537 error = dsl_pool_open(spa->spa_dsl_pool); 2538 spa->spa_is_initializing = B_FALSE; 2539 if (error != 0) 2540 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2541 2542 if (!mosconfig) { 2543 uint64_t hostid; 2544 nvlist_t *policy = NULL, *nvconfig; 2545 2546 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2547 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2548 2549 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig, 2550 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 2551 char *hostname; 2552 unsigned long myhostid = 0; 2553 2554 VERIFY(nvlist_lookup_string(nvconfig, 2555 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 2556 2557 #ifdef _KERNEL 2558 myhostid = zone_get_hostid(NULL); 2559 #else /* _KERNEL */ 2560 /* 2561 * We're emulating the system's hostid in userland, so 2562 * we can't use zone_get_hostid(). 2563 */ 2564 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2565 #endif /* _KERNEL */ 2566 if (hostid != 0 && myhostid != 0 && 2567 hostid != myhostid) { 2568 nvlist_free(nvconfig); 2569 cmn_err(CE_WARN, "pool '%s' could not be " 2570 "loaded as it was last accessed by " 2571 "another system (host: %s hostid: 0x%lx). " 2572 "See: http://illumos.org/msg/ZFS-8000-EY", 2573 spa_name(spa), hostname, 2574 (unsigned long)hostid); 2575 return (SET_ERROR(EBADF)); 2576 } 2577 } 2578 if (nvlist_lookup_nvlist(spa->spa_config, 2579 ZPOOL_REWIND_POLICY, &policy) == 0) 2580 VERIFY(nvlist_add_nvlist(nvconfig, 2581 ZPOOL_REWIND_POLICY, policy) == 0); 2582 2583 spa_config_set(spa, nvconfig); 2584 spa_unload(spa); 2585 spa_deactivate(spa); 2586 spa_activate(spa, orig_mode); 2587 2588 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE)); 2589 } 2590 2591 /* Grab the secret checksum salt from the MOS. */ 2592 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2593 DMU_POOL_CHECKSUM_SALT, 1, 2594 sizeof (spa->spa_cksum_salt.zcs_bytes), 2595 spa->spa_cksum_salt.zcs_bytes); 2596 if (error == ENOENT) { 2597 /* Generate a new salt for subsequent use */ 2598 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 2599 sizeof (spa->spa_cksum_salt.zcs_bytes)); 2600 } else if (error != 0) { 2601 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2602 } 2603 2604 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0) 2605 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2606 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 2607 if (error != 0) 2608 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2609 2610 /* 2611 * Load the bit that tells us to use the new accounting function 2612 * (raid-z deflation). If we have an older pool, this will not 2613 * be present. 2614 */ 2615 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate); 2616 if (error != 0 && error != ENOENT) 2617 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2618 2619 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 2620 &spa->spa_creation_version); 2621 if (error != 0 && error != ENOENT) 2622 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2623 2624 /* 2625 * Load the persistent error log. If we have an older pool, this will 2626 * not be present. 2627 */ 2628 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last); 2629 if (error != 0 && error != ENOENT) 2630 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2631 2632 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 2633 &spa->spa_errlog_scrub); 2634 if (error != 0 && error != ENOENT) 2635 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2636 2637 /* 2638 * Load the history object. If we have an older pool, this 2639 * will not be present. 2640 */ 2641 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history); 2642 if (error != 0 && error != ENOENT) 2643 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2644 2645 /* 2646 * Load the per-vdev ZAP map. If we have an older pool, this will not 2647 * be present; in this case, defer its creation to a later time to 2648 * avoid dirtying the MOS this early / out of sync context. See 2649 * spa_sync_config_object. 2650 */ 2651 2652 /* The sentinel is only available in the MOS config. */ 2653 nvlist_t *mos_config; 2654 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) 2655 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2656 2657 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP, 2658 &spa->spa_all_vdev_zaps); 2659 2660 if (error != ENOENT && error != 0) { 2661 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2662 } else if (error == 0 && !nvlist_exists(mos_config, 2663 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) { 2664 /* 2665 * An older version of ZFS overwrote the sentinel value, so 2666 * we have orphaned per-vdev ZAPs in the MOS. Defer their 2667 * destruction to later; see spa_sync_config_object. 2668 */ 2669 spa->spa_avz_action = AVZ_ACTION_DESTROY; 2670 /* 2671 * We're assuming that no vdevs have had their ZAPs created 2672 * before this. Better be sure of it. 2673 */ 2674 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 2675 } 2676 nvlist_free(mos_config); 2677 2678 /* 2679 * If we're assembling the pool from the split-off vdevs of 2680 * an existing pool, we don't want to attach the spares & cache 2681 * devices. 2682 */ 2683 2684 /* 2685 * Load any hot spares for this pool. 2686 */ 2687 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object); 2688 if (error != 0 && error != ENOENT) 2689 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2690 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2691 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 2692 if (load_nvlist(spa, spa->spa_spares.sav_object, 2693 &spa->spa_spares.sav_config) != 0) 2694 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2695 2696 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2697 spa_load_spares(spa); 2698 spa_config_exit(spa, SCL_ALL, FTAG); 2699 } else if (error == 0) { 2700 spa->spa_spares.sav_sync = B_TRUE; 2701 } 2702 2703 /* 2704 * Load any level 2 ARC devices for this pool. 2705 */ 2706 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 2707 &spa->spa_l2cache.sav_object); 2708 if (error != 0 && error != ENOENT) 2709 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2710 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2711 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 2712 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 2713 &spa->spa_l2cache.sav_config) != 0) 2714 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2715 2716 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2717 spa_load_l2cache(spa); 2718 spa_config_exit(spa, SCL_ALL, FTAG); 2719 } else if (error == 0) { 2720 spa->spa_l2cache.sav_sync = B_TRUE; 2721 } 2722 2723 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2724 2725 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object); 2726 if (error && error != ENOENT) 2727 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2728 2729 if (error == 0) { 2730 uint64_t autoreplace; 2731 2732 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 2733 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 2734 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 2735 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 2736 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 2737 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 2738 &spa->spa_dedup_ditto); 2739 2740 spa->spa_autoreplace = (autoreplace != 0); 2741 } 2742 2743 /* 2744 * If the 'autoreplace' property is set, then post a resource notifying 2745 * the ZFS DE that it should not issue any faults for unopenable 2746 * devices. We also iterate over the vdevs, and post a sysevent for any 2747 * unopenable vdevs so that the normal autoreplace handler can take 2748 * over. 2749 */ 2750 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) { 2751 spa_check_removed(spa->spa_root_vdev); 2752 /* 2753 * For the import case, this is done in spa_import(), because 2754 * at this point we're using the spare definitions from 2755 * the MOS config, not necessarily from the userland config. 2756 */ 2757 if (state != SPA_LOAD_IMPORT) { 2758 spa_aux_check_removed(&spa->spa_spares); 2759 spa_aux_check_removed(&spa->spa_l2cache); 2760 } 2761 } 2762 2763 /* 2764 * Load the vdev state for all toplevel vdevs. 2765 */ 2766 vdev_load(rvd); 2767 2768 /* 2769 * Propagate the leaf DTLs we just loaded all the way up the tree. 2770 */ 2771 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2772 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 2773 spa_config_exit(spa, SCL_ALL, FTAG); 2774 2775 /* 2776 * Load the DDTs (dedup tables). 2777 */ 2778 error = ddt_load(spa); 2779 if (error != 0) 2780 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2781 2782 spa_update_dspace(spa); 2783 2784 /* 2785 * Validate the config, using the MOS config to fill in any 2786 * information which might be missing. If we fail to validate 2787 * the config then declare the pool unfit for use. If we're 2788 * assembling a pool from a split, the log is not transferred 2789 * over. 2790 */ 2791 if (type != SPA_IMPORT_ASSEMBLE) { 2792 nvlist_t *nvconfig; 2793 2794 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2795 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2796 2797 if (!spa_config_valid(spa, nvconfig)) { 2798 nvlist_free(nvconfig); 2799 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 2800 ENXIO)); 2801 } 2802 nvlist_free(nvconfig); 2803 2804 /* 2805 * Now that we've validated the config, check the state of the 2806 * root vdev. If it can't be opened, it indicates one or 2807 * more toplevel vdevs are faulted. 2808 */ 2809 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2810 return (SET_ERROR(ENXIO)); 2811 2812 if (spa_writeable(spa) && spa_check_logs(spa)) { 2813 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 2814 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO)); 2815 } 2816 } 2817 2818 if (missing_feat_write) { 2819 ASSERT(state == SPA_LOAD_TRYIMPORT); 2820 2821 /* 2822 * At this point, we know that we can open the pool in 2823 * read-only mode but not read-write mode. We now have enough 2824 * information and can return to userland. 2825 */ 2826 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP)); 2827 } 2828 2829 /* 2830 * We've successfully opened the pool, verify that we're ready 2831 * to start pushing transactions. 2832 */ 2833 if (state != SPA_LOAD_TRYIMPORT) { 2834 if (error = spa_load_verify(spa)) 2835 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2836 error)); 2837 } 2838 2839 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER || 2840 spa->spa_load_max_txg == UINT64_MAX)) { 2841 dmu_tx_t *tx; 2842 int need_update = B_FALSE; 2843 dsl_pool_t *dp = spa_get_dsl(spa); 2844 2845 ASSERT(state != SPA_LOAD_TRYIMPORT); 2846 2847 /* 2848 * Claim log blocks that haven't been committed yet. 2849 * This must all happen in a single txg. 2850 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 2851 * invoked from zil_claim_log_block()'s i/o done callback. 2852 * Price of rollback is that we abandon the log. 2853 */ 2854 spa->spa_claiming = B_TRUE; 2855 2856 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 2857 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2858 zil_claim, tx, DS_FIND_CHILDREN); 2859 dmu_tx_commit(tx); 2860 2861 spa->spa_claiming = B_FALSE; 2862 2863 spa_set_log_state(spa, SPA_LOG_GOOD); 2864 spa->spa_sync_on = B_TRUE; 2865 txg_sync_start(spa->spa_dsl_pool); 2866 2867 /* 2868 * Wait for all claims to sync. We sync up to the highest 2869 * claimed log block birth time so that claimed log blocks 2870 * don't appear to be from the future. spa_claim_max_txg 2871 * will have been set for us by either zil_check_log_chain() 2872 * (invoked from spa_check_logs()) or zil_claim() above. 2873 */ 2874 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 2875 2876 /* 2877 * If the config cache is stale, or we have uninitialized 2878 * metaslabs (see spa_vdev_add()), then update the config. 2879 * 2880 * If this is a verbatim import, trust the current 2881 * in-core spa_config and update the disk labels. 2882 */ 2883 if (config_cache_txg != spa->spa_config_txg || 2884 state == SPA_LOAD_IMPORT || 2885 state == SPA_LOAD_RECOVER || 2886 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 2887 need_update = B_TRUE; 2888 2889 for (int c = 0; c < rvd->vdev_children; c++) 2890 if (rvd->vdev_child[c]->vdev_ms_array == 0) 2891 need_update = B_TRUE; 2892 2893 /* 2894 * Update the config cache asychronously in case we're the 2895 * root pool, in which case the config cache isn't writable yet. 2896 */ 2897 if (need_update) 2898 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2899 2900 /* 2901 * Check all DTLs to see if anything needs resilvering. 2902 */ 2903 if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 2904 vdev_resilver_needed(rvd, NULL, NULL)) 2905 spa_async_request(spa, SPA_ASYNC_RESILVER); 2906 2907 /* 2908 * Log the fact that we booted up (so that we can detect if 2909 * we rebooted in the middle of an operation). 2910 */ 2911 spa_history_log_version(spa, "open"); 2912 2913 /* 2914 * Delete any inconsistent datasets. 2915 */ 2916 (void) dmu_objset_find(spa_name(spa), 2917 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 2918 2919 /* 2920 * Clean up any stale temporary dataset userrefs. 2921 */ 2922 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 2923 } 2924 2925 return (0); 2926 } 2927 2928 static int 2929 spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig) 2930 { 2931 int mode = spa->spa_mode; 2932 2933 spa_unload(spa); 2934 spa_deactivate(spa); 2935 2936 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 2937 2938 spa_activate(spa, mode); 2939 spa_async_suspend(spa); 2940 2941 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); 2942 } 2943 2944 /* 2945 * If spa_load() fails this function will try loading prior txg's. If 2946 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 2947 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 2948 * function will not rewind the pool and will return the same error as 2949 * spa_load(). 2950 */ 2951 static int 2952 spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, 2953 uint64_t max_request, int rewind_flags) 2954 { 2955 nvlist_t *loadinfo = NULL; 2956 nvlist_t *config = NULL; 2957 int load_error, rewind_error; 2958 uint64_t safe_rewind_txg; 2959 uint64_t min_txg; 2960 2961 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 2962 spa->spa_load_max_txg = spa->spa_load_txg; 2963 spa_set_log_state(spa, SPA_LOG_CLEAR); 2964 } else { 2965 spa->spa_load_max_txg = max_request; 2966 if (max_request != UINT64_MAX) 2967 spa->spa_extreme_rewind = B_TRUE; 2968 } 2969 2970 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING, 2971 mosconfig); 2972 if (load_error == 0) 2973 return (0); 2974 2975 if (spa->spa_root_vdev != NULL) 2976 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2977 2978 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 2979 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 2980 2981 if (rewind_flags & ZPOOL_NEVER_REWIND) { 2982 nvlist_free(config); 2983 return (load_error); 2984 } 2985 2986 if (state == SPA_LOAD_RECOVER) { 2987 /* Price of rolling back is discarding txgs, including log */ 2988 spa_set_log_state(spa, SPA_LOG_CLEAR); 2989 } else { 2990 /* 2991 * If we aren't rolling back save the load info from our first 2992 * import attempt so that we can restore it after attempting 2993 * to rewind. 2994 */ 2995 loadinfo = spa->spa_load_info; 2996 spa->spa_load_info = fnvlist_alloc(); 2997 } 2998 2999 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 3000 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 3001 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 3002 TXG_INITIAL : safe_rewind_txg; 3003 3004 /* 3005 * Continue as long as we're finding errors, we're still within 3006 * the acceptable rewind range, and we're still finding uberblocks 3007 */ 3008 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 3009 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 3010 if (spa->spa_load_max_txg < safe_rewind_txg) 3011 spa->spa_extreme_rewind = B_TRUE; 3012 rewind_error = spa_load_retry(spa, state, mosconfig); 3013 } 3014 3015 spa->spa_extreme_rewind = B_FALSE; 3016 spa->spa_load_max_txg = UINT64_MAX; 3017 3018 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 3019 spa_config_set(spa, config); 3020 3021 if (state == SPA_LOAD_RECOVER) { 3022 ASSERT3P(loadinfo, ==, NULL); 3023 return (rewind_error); 3024 } else { 3025 /* Store the rewind info as part of the initial load info */ 3026 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 3027 spa->spa_load_info); 3028 3029 /* Restore the initial load info */ 3030 fnvlist_free(spa->spa_load_info); 3031 spa->spa_load_info = loadinfo; 3032 3033 return (load_error); 3034 } 3035 } 3036 3037 /* 3038 * Pool Open/Import 3039 * 3040 * The import case is identical to an open except that the configuration is sent 3041 * down from userland, instead of grabbed from the configuration cache. For the 3042 * case of an open, the pool configuration will exist in the 3043 * POOL_STATE_UNINITIALIZED state. 3044 * 3045 * The stats information (gen/count/ustats) is used to gather vdev statistics at 3046 * the same time open the pool, without having to keep around the spa_t in some 3047 * ambiguous state. 3048 */ 3049 static int 3050 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 3051 nvlist_t **config) 3052 { 3053 spa_t *spa; 3054 spa_load_state_t state = SPA_LOAD_OPEN; 3055 int error; 3056 int locked = B_FALSE; 3057 3058 *spapp = NULL; 3059 3060 /* 3061 * As disgusting as this is, we need to support recursive calls to this 3062 * function because dsl_dir_open() is called during spa_load(), and ends 3063 * up calling spa_open() again. The real fix is to figure out how to 3064 * avoid dsl_dir_open() calling this in the first place. 3065 */ 3066 if (mutex_owner(&spa_namespace_lock) != curthread) { 3067 mutex_enter(&spa_namespace_lock); 3068 locked = B_TRUE; 3069 } 3070 3071 if ((spa = spa_lookup(pool)) == NULL) { 3072 if (locked) 3073 mutex_exit(&spa_namespace_lock); 3074 return (SET_ERROR(ENOENT)); 3075 } 3076 3077 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 3078 zpool_rewind_policy_t policy; 3079 3080 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, 3081 &policy); 3082 if (policy.zrp_request & ZPOOL_DO_REWIND) 3083 state = SPA_LOAD_RECOVER; 3084 3085 spa_activate(spa, spa_mode_global); 3086 3087 if (state != SPA_LOAD_RECOVER) 3088 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 3089 3090 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg, 3091 policy.zrp_request); 3092 3093 if (error == EBADF) { 3094 /* 3095 * If vdev_validate() returns failure (indicated by 3096 * EBADF), it indicates that one of the vdevs indicates 3097 * that the pool has been exported or destroyed. If 3098 * this is the case, the config cache is out of sync and 3099 * we should remove the pool from the namespace. 3100 */ 3101 spa_unload(spa); 3102 spa_deactivate(spa); 3103 spa_config_sync(spa, B_TRUE, B_TRUE); 3104 spa_remove(spa); 3105 if (locked) 3106 mutex_exit(&spa_namespace_lock); 3107 return (SET_ERROR(ENOENT)); 3108 } 3109 3110 if (error) { 3111 /* 3112 * We can't open the pool, but we still have useful 3113 * information: the state of each vdev after the 3114 * attempted vdev_open(). Return this to the user. 3115 */ 3116 if (config != NULL && spa->spa_config) { 3117 VERIFY(nvlist_dup(spa->spa_config, config, 3118 KM_SLEEP) == 0); 3119 VERIFY(nvlist_add_nvlist(*config, 3120 ZPOOL_CONFIG_LOAD_INFO, 3121 spa->spa_load_info) == 0); 3122 } 3123 spa_unload(spa); 3124 spa_deactivate(spa); 3125 spa->spa_last_open_failed = error; 3126 if (locked) 3127 mutex_exit(&spa_namespace_lock); 3128 *spapp = NULL; 3129 return (error); 3130 } 3131 } 3132 3133 spa_open_ref(spa, tag); 3134 3135 if (config != NULL) 3136 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 3137 3138 /* 3139 * If we've recovered the pool, pass back any information we 3140 * gathered while doing the load. 3141 */ 3142 if (state == SPA_LOAD_RECOVER) { 3143 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 3144 spa->spa_load_info) == 0); 3145 } 3146 3147 if (locked) { 3148 spa->spa_last_open_failed = 0; 3149 spa->spa_last_ubsync_txg = 0; 3150 spa->spa_load_txg = 0; 3151 mutex_exit(&spa_namespace_lock); 3152 } 3153 3154 *spapp = spa; 3155 3156 return (0); 3157 } 3158 3159 int 3160 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 3161 nvlist_t **config) 3162 { 3163 return (spa_open_common(name, spapp, tag, policy, config)); 3164 } 3165 3166 int 3167 spa_open(const char *name, spa_t **spapp, void *tag) 3168 { 3169 return (spa_open_common(name, spapp, tag, NULL, NULL)); 3170 } 3171 3172 /* 3173 * Lookup the given spa_t, incrementing the inject count in the process, 3174 * preventing it from being exported or destroyed. 3175 */ 3176 spa_t * 3177 spa_inject_addref(char *name) 3178 { 3179 spa_t *spa; 3180 3181 mutex_enter(&spa_namespace_lock); 3182 if ((spa = spa_lookup(name)) == NULL) { 3183 mutex_exit(&spa_namespace_lock); 3184 return (NULL); 3185 } 3186 spa->spa_inject_ref++; 3187 mutex_exit(&spa_namespace_lock); 3188 3189 return (spa); 3190 } 3191 3192 void 3193 spa_inject_delref(spa_t *spa) 3194 { 3195 mutex_enter(&spa_namespace_lock); 3196 spa->spa_inject_ref--; 3197 mutex_exit(&spa_namespace_lock); 3198 } 3199 3200 /* 3201 * Add spares device information to the nvlist. 3202 */ 3203 static void 3204 spa_add_spares(spa_t *spa, nvlist_t *config) 3205 { 3206 nvlist_t **spares; 3207 uint_t i, nspares; 3208 nvlist_t *nvroot; 3209 uint64_t guid; 3210 vdev_stat_t *vs; 3211 uint_t vsc; 3212 uint64_t pool; 3213 3214 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3215 3216 if (spa->spa_spares.sav_count == 0) 3217 return; 3218 3219 VERIFY(nvlist_lookup_nvlist(config, 3220 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3221 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3222 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3223 if (nspares != 0) { 3224 VERIFY(nvlist_add_nvlist_array(nvroot, 3225 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3226 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3227 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3228 3229 /* 3230 * Go through and find any spares which have since been 3231 * repurposed as an active spare. If this is the case, update 3232 * their status appropriately. 3233 */ 3234 for (i = 0; i < nspares; i++) { 3235 VERIFY(nvlist_lookup_uint64(spares[i], 3236 ZPOOL_CONFIG_GUID, &guid) == 0); 3237 if (spa_spare_exists(guid, &pool, NULL) && 3238 pool != 0ULL) { 3239 VERIFY(nvlist_lookup_uint64_array( 3240 spares[i], ZPOOL_CONFIG_VDEV_STATS, 3241 (uint64_t **)&vs, &vsc) == 0); 3242 vs->vs_state = VDEV_STATE_CANT_OPEN; 3243 vs->vs_aux = VDEV_AUX_SPARED; 3244 } 3245 } 3246 } 3247 } 3248 3249 /* 3250 * Add l2cache device information to the nvlist, including vdev stats. 3251 */ 3252 static void 3253 spa_add_l2cache(spa_t *spa, nvlist_t *config) 3254 { 3255 nvlist_t **l2cache; 3256 uint_t i, j, nl2cache; 3257 nvlist_t *nvroot; 3258 uint64_t guid; 3259 vdev_t *vd; 3260 vdev_stat_t *vs; 3261 uint_t vsc; 3262 3263 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3264 3265 if (spa->spa_l2cache.sav_count == 0) 3266 return; 3267 3268 VERIFY(nvlist_lookup_nvlist(config, 3269 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3270 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3271 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3272 if (nl2cache != 0) { 3273 VERIFY(nvlist_add_nvlist_array(nvroot, 3274 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3275 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3276 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3277 3278 /* 3279 * Update level 2 cache device stats. 3280 */ 3281 3282 for (i = 0; i < nl2cache; i++) { 3283 VERIFY(nvlist_lookup_uint64(l2cache[i], 3284 ZPOOL_CONFIG_GUID, &guid) == 0); 3285 3286 vd = NULL; 3287 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 3288 if (guid == 3289 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 3290 vd = spa->spa_l2cache.sav_vdevs[j]; 3291 break; 3292 } 3293 } 3294 ASSERT(vd != NULL); 3295 3296 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 3297 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 3298 == 0); 3299 vdev_get_stats(vd, vs); 3300 } 3301 } 3302 } 3303 3304 static void 3305 spa_add_feature_stats(spa_t *spa, nvlist_t *config) 3306 { 3307 nvlist_t *features; 3308 zap_cursor_t zc; 3309 zap_attribute_t za; 3310 3311 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3312 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3313 3314 if (spa->spa_feat_for_read_obj != 0) { 3315 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3316 spa->spa_feat_for_read_obj); 3317 zap_cursor_retrieve(&zc, &za) == 0; 3318 zap_cursor_advance(&zc)) { 3319 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3320 za.za_num_integers == 1); 3321 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3322 za.za_first_integer)); 3323 } 3324 zap_cursor_fini(&zc); 3325 } 3326 3327 if (spa->spa_feat_for_write_obj != 0) { 3328 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3329 spa->spa_feat_for_write_obj); 3330 zap_cursor_retrieve(&zc, &za) == 0; 3331 zap_cursor_advance(&zc)) { 3332 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3333 za.za_num_integers == 1); 3334 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3335 za.za_first_integer)); 3336 } 3337 zap_cursor_fini(&zc); 3338 } 3339 3340 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 3341 features) == 0); 3342 nvlist_free(features); 3343 } 3344 3345 int 3346 spa_get_stats(const char *name, nvlist_t **config, 3347 char *altroot, size_t buflen) 3348 { 3349 int error; 3350 spa_t *spa; 3351 3352 *config = NULL; 3353 error = spa_open_common(name, &spa, FTAG, NULL, config); 3354 3355 if (spa != NULL) { 3356 /* 3357 * This still leaves a window of inconsistency where the spares 3358 * or l2cache devices could change and the config would be 3359 * self-inconsistent. 3360 */ 3361 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3362 3363 if (*config != NULL) { 3364 uint64_t loadtimes[2]; 3365 3366 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 3367 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 3368 VERIFY(nvlist_add_uint64_array(*config, 3369 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 3370 3371 VERIFY(nvlist_add_uint64(*config, 3372 ZPOOL_CONFIG_ERRCOUNT, 3373 spa_get_errlog_size(spa)) == 0); 3374 3375 if (spa_suspended(spa)) 3376 VERIFY(nvlist_add_uint64(*config, 3377 ZPOOL_CONFIG_SUSPENDED, 3378 spa->spa_failmode) == 0); 3379 3380 spa_add_spares(spa, *config); 3381 spa_add_l2cache(spa, *config); 3382 spa_add_feature_stats(spa, *config); 3383 } 3384 } 3385 3386 /* 3387 * We want to get the alternate root even for faulted pools, so we cheat 3388 * and call spa_lookup() directly. 3389 */ 3390 if (altroot) { 3391 if (spa == NULL) { 3392 mutex_enter(&spa_namespace_lock); 3393 spa = spa_lookup(name); 3394 if (spa) 3395 spa_altroot(spa, altroot, buflen); 3396 else 3397 altroot[0] = '\0'; 3398 spa = NULL; 3399 mutex_exit(&spa_namespace_lock); 3400 } else { 3401 spa_altroot(spa, altroot, buflen); 3402 } 3403 } 3404 3405 if (spa != NULL) { 3406 spa_config_exit(spa, SCL_CONFIG, FTAG); 3407 spa_close(spa, FTAG); 3408 } 3409 3410 return (error); 3411 } 3412 3413 /* 3414 * Validate that the auxiliary device array is well formed. We must have an 3415 * array of nvlists, each which describes a valid leaf vdev. If this is an 3416 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 3417 * specified, as long as they are well-formed. 3418 */ 3419 static int 3420 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 3421 spa_aux_vdev_t *sav, const char *config, uint64_t version, 3422 vdev_labeltype_t label) 3423 { 3424 nvlist_t **dev; 3425 uint_t i, ndev; 3426 vdev_t *vd; 3427 int error; 3428 3429 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3430 3431 /* 3432 * It's acceptable to have no devs specified. 3433 */ 3434 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 3435 return (0); 3436 3437 if (ndev == 0) 3438 return (SET_ERROR(EINVAL)); 3439 3440 /* 3441 * Make sure the pool is formatted with a version that supports this 3442 * device type. 3443 */ 3444 if (spa_version(spa) < version) 3445 return (SET_ERROR(ENOTSUP)); 3446 3447 /* 3448 * Set the pending device list so we correctly handle device in-use 3449 * checking. 3450 */ 3451 sav->sav_pending = dev; 3452 sav->sav_npending = ndev; 3453 3454 for (i = 0; i < ndev; i++) { 3455 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 3456 mode)) != 0) 3457 goto out; 3458 3459 if (!vd->vdev_ops->vdev_op_leaf) { 3460 vdev_free(vd); 3461 error = SET_ERROR(EINVAL); 3462 goto out; 3463 } 3464 3465 /* 3466 * The L2ARC currently only supports disk devices in 3467 * kernel context. For user-level testing, we allow it. 3468 */ 3469 #ifdef _KERNEL 3470 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 3471 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 3472 error = SET_ERROR(ENOTBLK); 3473 vdev_free(vd); 3474 goto out; 3475 } 3476 #endif 3477 vd->vdev_top = vd; 3478 3479 if ((error = vdev_open(vd)) == 0 && 3480 (error = vdev_label_init(vd, crtxg, label)) == 0) { 3481 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 3482 vd->vdev_guid) == 0); 3483 } 3484 3485 vdev_free(vd); 3486 3487 if (error && 3488 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 3489 goto out; 3490 else 3491 error = 0; 3492 } 3493 3494 out: 3495 sav->sav_pending = NULL; 3496 sav->sav_npending = 0; 3497 return (error); 3498 } 3499 3500 static int 3501 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 3502 { 3503 int error; 3504 3505 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3506 3507 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3508 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 3509 VDEV_LABEL_SPARE)) != 0) { 3510 return (error); 3511 } 3512 3513 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3514 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 3515 VDEV_LABEL_L2CACHE)); 3516 } 3517 3518 static void 3519 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 3520 const char *config) 3521 { 3522 int i; 3523 3524 if (sav->sav_config != NULL) { 3525 nvlist_t **olddevs; 3526 uint_t oldndevs; 3527 nvlist_t **newdevs; 3528 3529 /* 3530 * Generate new dev list by concatentating with the 3531 * current dev list. 3532 */ 3533 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 3534 &olddevs, &oldndevs) == 0); 3535 3536 newdevs = kmem_alloc(sizeof (void *) * 3537 (ndevs + oldndevs), KM_SLEEP); 3538 for (i = 0; i < oldndevs; i++) 3539 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 3540 KM_SLEEP) == 0); 3541 for (i = 0; i < ndevs; i++) 3542 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 3543 KM_SLEEP) == 0); 3544 3545 VERIFY(nvlist_remove(sav->sav_config, config, 3546 DATA_TYPE_NVLIST_ARRAY) == 0); 3547 3548 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3549 config, newdevs, ndevs + oldndevs) == 0); 3550 for (i = 0; i < oldndevs + ndevs; i++) 3551 nvlist_free(newdevs[i]); 3552 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 3553 } else { 3554 /* 3555 * Generate a new dev list. 3556 */ 3557 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 3558 KM_SLEEP) == 0); 3559 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 3560 devs, ndevs) == 0); 3561 } 3562 } 3563 3564 /* 3565 * Stop and drop level 2 ARC devices 3566 */ 3567 void 3568 spa_l2cache_drop(spa_t *spa) 3569 { 3570 vdev_t *vd; 3571 int i; 3572 spa_aux_vdev_t *sav = &spa->spa_l2cache; 3573 3574 for (i = 0; i < sav->sav_count; i++) { 3575 uint64_t pool; 3576 3577 vd = sav->sav_vdevs[i]; 3578 ASSERT(vd != NULL); 3579 3580 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 3581 pool != 0ULL && l2arc_vdev_present(vd)) 3582 l2arc_remove_vdev(vd); 3583 } 3584 } 3585 3586 /* 3587 * Pool Creation 3588 */ 3589 int 3590 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 3591 nvlist_t *zplprops) 3592 { 3593 spa_t *spa; 3594 char *altroot = NULL; 3595 vdev_t *rvd; 3596 dsl_pool_t *dp; 3597 dmu_tx_t *tx; 3598 int error = 0; 3599 uint64_t txg = TXG_INITIAL; 3600 nvlist_t **spares, **l2cache; 3601 uint_t nspares, nl2cache; 3602 uint64_t version, obj; 3603 boolean_t has_features; 3604 3605 /* 3606 * If this pool already exists, return failure. 3607 */ 3608 mutex_enter(&spa_namespace_lock); 3609 if (spa_lookup(pool) != NULL) { 3610 mutex_exit(&spa_namespace_lock); 3611 return (SET_ERROR(EEXIST)); 3612 } 3613 3614 /* 3615 * Allocate a new spa_t structure. 3616 */ 3617 (void) nvlist_lookup_string(props, 3618 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3619 spa = spa_add(pool, NULL, altroot); 3620 spa_activate(spa, spa_mode_global); 3621 3622 if (props && (error = spa_prop_validate(spa, props))) { 3623 spa_deactivate(spa); 3624 spa_remove(spa); 3625 mutex_exit(&spa_namespace_lock); 3626 return (error); 3627 } 3628 3629 has_features = B_FALSE; 3630 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 3631 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 3632 if (zpool_prop_feature(nvpair_name(elem))) 3633 has_features = B_TRUE; 3634 } 3635 3636 if (has_features || nvlist_lookup_uint64(props, 3637 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 3638 version = SPA_VERSION; 3639 } 3640 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 3641 3642 spa->spa_first_txg = txg; 3643 spa->spa_uberblock.ub_txg = txg - 1; 3644 spa->spa_uberblock.ub_version = version; 3645 spa->spa_ubsync = spa->spa_uberblock; 3646 spa->spa_load_state = SPA_LOAD_CREATE; 3647 3648 /* 3649 * Create "The Godfather" zio to hold all async IOs 3650 */ 3651 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 3652 KM_SLEEP); 3653 for (int i = 0; i < max_ncpus; i++) { 3654 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 3655 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3656 ZIO_FLAG_GODFATHER); 3657 } 3658 3659 /* 3660 * Create the root vdev. 3661 */ 3662 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3663 3664 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 3665 3666 ASSERT(error != 0 || rvd != NULL); 3667 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 3668 3669 if (error == 0 && !zfs_allocatable_devs(nvroot)) 3670 error = SET_ERROR(EINVAL); 3671 3672 if (error == 0 && 3673 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 3674 (error = spa_validate_aux(spa, nvroot, txg, 3675 VDEV_ALLOC_ADD)) == 0) { 3676 for (int c = 0; c < rvd->vdev_children; c++) { 3677 vdev_metaslab_set_size(rvd->vdev_child[c]); 3678 vdev_expand(rvd->vdev_child[c], txg); 3679 } 3680 } 3681 3682 spa_config_exit(spa, SCL_ALL, FTAG); 3683 3684 if (error != 0) { 3685 spa_unload(spa); 3686 spa_deactivate(spa); 3687 spa_remove(spa); 3688 mutex_exit(&spa_namespace_lock); 3689 return (error); 3690 } 3691 3692 /* 3693 * Get the list of spares, if specified. 3694 */ 3695 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 3696 &spares, &nspares) == 0) { 3697 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 3698 KM_SLEEP) == 0); 3699 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 3700 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3701 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3702 spa_load_spares(spa); 3703 spa_config_exit(spa, SCL_ALL, FTAG); 3704 spa->spa_spares.sav_sync = B_TRUE; 3705 } 3706 3707 /* 3708 * Get the list of level 2 cache devices, if specified. 3709 */ 3710 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 3711 &l2cache, &nl2cache) == 0) { 3712 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 3713 NV_UNIQUE_NAME, KM_SLEEP) == 0); 3714 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 3715 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3716 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3717 spa_load_l2cache(spa); 3718 spa_config_exit(spa, SCL_ALL, FTAG); 3719 spa->spa_l2cache.sav_sync = B_TRUE; 3720 } 3721 3722 spa->spa_is_initializing = B_TRUE; 3723 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 3724 spa->spa_meta_objset = dp->dp_meta_objset; 3725 spa->spa_is_initializing = B_FALSE; 3726 3727 /* 3728 * Create DDTs (dedup tables). 3729 */ 3730 ddt_create(spa); 3731 3732 spa_update_dspace(spa); 3733 3734 tx = dmu_tx_create_assigned(dp, txg); 3735 3736 /* 3737 * Create the pool config object. 3738 */ 3739 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 3740 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 3741 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 3742 3743 if (zap_add(spa->spa_meta_objset, 3744 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 3745 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 3746 cmn_err(CE_PANIC, "failed to add pool config"); 3747 } 3748 3749 if (spa_version(spa) >= SPA_VERSION_FEATURES) 3750 spa_feature_create_zap_objects(spa, tx); 3751 3752 if (zap_add(spa->spa_meta_objset, 3753 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 3754 sizeof (uint64_t), 1, &version, tx) != 0) { 3755 cmn_err(CE_PANIC, "failed to add pool version"); 3756 } 3757 3758 /* Newly created pools with the right version are always deflated. */ 3759 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 3760 spa->spa_deflate = TRUE; 3761 if (zap_add(spa->spa_meta_objset, 3762 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3763 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 3764 cmn_err(CE_PANIC, "failed to add deflate"); 3765 } 3766 } 3767 3768 /* 3769 * Create the deferred-free bpobj. Turn off compression 3770 * because sync-to-convergence takes longer if the blocksize 3771 * keeps changing. 3772 */ 3773 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 3774 dmu_object_set_compress(spa->spa_meta_objset, obj, 3775 ZIO_COMPRESS_OFF, tx); 3776 if (zap_add(spa->spa_meta_objset, 3777 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 3778 sizeof (uint64_t), 1, &obj, tx) != 0) { 3779 cmn_err(CE_PANIC, "failed to add bpobj"); 3780 } 3781 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 3782 spa->spa_meta_objset, obj)); 3783 3784 /* 3785 * Create the pool's history object. 3786 */ 3787 if (version >= SPA_VERSION_ZPOOL_HISTORY) 3788 spa_history_create_obj(spa, tx); 3789 3790 /* 3791 * Generate some random noise for salted checksums to operate on. 3792 */ 3793 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 3794 sizeof (spa->spa_cksum_salt.zcs_bytes)); 3795 3796 /* 3797 * Set pool properties. 3798 */ 3799 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 3800 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 3801 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 3802 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 3803 3804 if (props != NULL) { 3805 spa_configfile_set(spa, props, B_FALSE); 3806 spa_sync_props(props, tx); 3807 } 3808 3809 dmu_tx_commit(tx); 3810 3811 spa->spa_sync_on = B_TRUE; 3812 txg_sync_start(spa->spa_dsl_pool); 3813 3814 /* 3815 * We explicitly wait for the first transaction to complete so that our 3816 * bean counters are appropriately updated. 3817 */ 3818 txg_wait_synced(spa->spa_dsl_pool, txg); 3819 3820 spa_config_sync(spa, B_FALSE, B_TRUE); 3821 spa_event_notify(spa, NULL, ESC_ZFS_POOL_CREATE); 3822 3823 spa_history_log_version(spa, "create"); 3824 3825 /* 3826 * Don't count references from objsets that are already closed 3827 * and are making their way through the eviction process. 3828 */ 3829 spa_evicting_os_wait(spa); 3830 spa->spa_minref = refcount_count(&spa->spa_refcount); 3831 spa->spa_load_state = SPA_LOAD_NONE; 3832 3833 mutex_exit(&spa_namespace_lock); 3834 3835 return (0); 3836 } 3837 3838 #ifdef _KERNEL 3839 /* 3840 * Get the root pool information from the root disk, then import the root pool 3841 * during the system boot up time. 3842 */ 3843 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 3844 3845 static nvlist_t * 3846 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 3847 { 3848 nvlist_t *config; 3849 nvlist_t *nvtop, *nvroot; 3850 uint64_t pgid; 3851 3852 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 3853 return (NULL); 3854 3855 /* 3856 * Add this top-level vdev to the child array. 3857 */ 3858 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3859 &nvtop) == 0); 3860 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3861 &pgid) == 0); 3862 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 3863 3864 /* 3865 * Put this pool's top-level vdevs into a root vdev. 3866 */ 3867 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3868 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 3869 VDEV_TYPE_ROOT) == 0); 3870 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 3871 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 3872 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3873 &nvtop, 1) == 0); 3874 3875 /* 3876 * Replace the existing vdev_tree with the new root vdev in 3877 * this pool's configuration (remove the old, add the new). 3878 */ 3879 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 3880 nvlist_free(nvroot); 3881 return (config); 3882 } 3883 3884 /* 3885 * Walk the vdev tree and see if we can find a device with "better" 3886 * configuration. A configuration is "better" if the label on that 3887 * device has a more recent txg. 3888 */ 3889 static void 3890 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 3891 { 3892 for (int c = 0; c < vd->vdev_children; c++) 3893 spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 3894 3895 if (vd->vdev_ops->vdev_op_leaf) { 3896 nvlist_t *label; 3897 uint64_t label_txg; 3898 3899 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 3900 &label) != 0) 3901 return; 3902 3903 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 3904 &label_txg) == 0); 3905 3906 /* 3907 * Do we have a better boot device? 3908 */ 3909 if (label_txg > *txg) { 3910 *txg = label_txg; 3911 *avd = vd; 3912 } 3913 nvlist_free(label); 3914 } 3915 } 3916 3917 /* 3918 * Import a root pool. 3919 * 3920 * For x86. devpath_list will consist of devid and/or physpath name of 3921 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 3922 * The GRUB "findroot" command will return the vdev we should boot. 3923 * 3924 * For Sparc, devpath_list consists the physpath name of the booting device 3925 * no matter the rootpool is a single device pool or a mirrored pool. 3926 * e.g. 3927 * "/pci@1f,0/ide@d/disk@0,0:a" 3928 */ 3929 int 3930 spa_import_rootpool(char *devpath, char *devid) 3931 { 3932 spa_t *spa; 3933 vdev_t *rvd, *bvd, *avd = NULL; 3934 nvlist_t *config, *nvtop; 3935 uint64_t guid, txg; 3936 char *pname; 3937 int error; 3938 3939 /* 3940 * Read the label from the boot device and generate a configuration. 3941 */ 3942 config = spa_generate_rootconf(devpath, devid, &guid); 3943 #if defined(_OBP) && defined(_KERNEL) 3944 if (config == NULL) { 3945 if (strstr(devpath, "/iscsi/ssd") != NULL) { 3946 /* iscsi boot */ 3947 get_iscsi_bootpath_phy(devpath); 3948 config = spa_generate_rootconf(devpath, devid, &guid); 3949 } 3950 } 3951 #endif 3952 if (config == NULL) { 3953 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 3954 devpath); 3955 return (SET_ERROR(EIO)); 3956 } 3957 3958 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3959 &pname) == 0); 3960 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 3961 3962 mutex_enter(&spa_namespace_lock); 3963 if ((spa = spa_lookup(pname)) != NULL) { 3964 /* 3965 * Remove the existing root pool from the namespace so that we 3966 * can replace it with the correct config we just read in. 3967 */ 3968 spa_remove(spa); 3969 } 3970 3971 spa = spa_add(pname, config, NULL); 3972 spa->spa_is_root = B_TRUE; 3973 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 3974 3975 /* 3976 * Build up a vdev tree based on the boot device's label config. 3977 */ 3978 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3979 &nvtop) == 0); 3980 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3981 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 3982 VDEV_ALLOC_ROOTPOOL); 3983 spa_config_exit(spa, SCL_ALL, FTAG); 3984 if (error) { 3985 mutex_exit(&spa_namespace_lock); 3986 nvlist_free(config); 3987 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 3988 pname); 3989 return (error); 3990 } 3991 3992 /* 3993 * Get the boot vdev. 3994 */ 3995 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 3996 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 3997 (u_longlong_t)guid); 3998 error = SET_ERROR(ENOENT); 3999 goto out; 4000 } 4001 4002 /* 4003 * Determine if there is a better boot device. 4004 */ 4005 avd = bvd; 4006 spa_alt_rootvdev(rvd, &avd, &txg); 4007 if (avd != bvd) { 4008 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 4009 "try booting from '%s'", avd->vdev_path); 4010 error = SET_ERROR(EINVAL); 4011 goto out; 4012 } 4013 4014 /* 4015 * If the boot device is part of a spare vdev then ensure that 4016 * we're booting off the active spare. 4017 */ 4018 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 4019 !bvd->vdev_isspare) { 4020 cmn_err(CE_NOTE, "The boot device is currently spared. Please " 4021 "try booting from '%s'", 4022 bvd->vdev_parent-> 4023 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 4024 error = SET_ERROR(EINVAL); 4025 goto out; 4026 } 4027 4028 error = 0; 4029 out: 4030 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4031 vdev_free(rvd); 4032 spa_config_exit(spa, SCL_ALL, FTAG); 4033 mutex_exit(&spa_namespace_lock); 4034 4035 nvlist_free(config); 4036 return (error); 4037 } 4038 4039 #endif 4040 4041 /* 4042 * Import a non-root pool into the system. 4043 */ 4044 int 4045 spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 4046 { 4047 spa_t *spa; 4048 char *altroot = NULL; 4049 spa_load_state_t state = SPA_LOAD_IMPORT; 4050 zpool_rewind_policy_t policy; 4051 uint64_t mode = spa_mode_global; 4052 uint64_t readonly = B_FALSE; 4053 int error; 4054 nvlist_t *nvroot; 4055 nvlist_t **spares, **l2cache; 4056 uint_t nspares, nl2cache; 4057 4058 /* 4059 * If a pool with this name exists, return failure. 4060 */ 4061 mutex_enter(&spa_namespace_lock); 4062 if (spa_lookup(pool) != NULL) { 4063 mutex_exit(&spa_namespace_lock); 4064 return (SET_ERROR(EEXIST)); 4065 } 4066 4067 /* 4068 * Create and initialize the spa structure. 4069 */ 4070 (void) nvlist_lookup_string(props, 4071 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 4072 (void) nvlist_lookup_uint64(props, 4073 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 4074 if (readonly) 4075 mode = FREAD; 4076 spa = spa_add(pool, config, altroot); 4077 spa->spa_import_flags = flags; 4078 4079 /* 4080 * Verbatim import - Take a pool and insert it into the namespace 4081 * as if it had been loaded at boot. 4082 */ 4083 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 4084 if (props != NULL) 4085 spa_configfile_set(spa, props, B_FALSE); 4086 4087 spa_config_sync(spa, B_FALSE, B_TRUE); 4088 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT); 4089 4090 mutex_exit(&spa_namespace_lock); 4091 return (0); 4092 } 4093 4094 spa_activate(spa, mode); 4095 4096 /* 4097 * Don't start async tasks until we know everything is healthy. 4098 */ 4099 spa_async_suspend(spa); 4100 4101 zpool_get_rewind_policy(config, &policy); 4102 if (policy.zrp_request & ZPOOL_DO_REWIND) 4103 state = SPA_LOAD_RECOVER; 4104 4105 /* 4106 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig 4107 * because the user-supplied config is actually the one to trust when 4108 * doing an import. 4109 */ 4110 if (state != SPA_LOAD_RECOVER) 4111 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 4112 4113 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg, 4114 policy.zrp_request); 4115 4116 /* 4117 * Propagate anything learned while loading the pool and pass it 4118 * back to caller (i.e. rewind info, missing devices, etc). 4119 */ 4120 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4121 spa->spa_load_info) == 0); 4122 4123 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4124 /* 4125 * Toss any existing sparelist, as it doesn't have any validity 4126 * anymore, and conflicts with spa_has_spare(). 4127 */ 4128 if (spa->spa_spares.sav_config) { 4129 nvlist_free(spa->spa_spares.sav_config); 4130 spa->spa_spares.sav_config = NULL; 4131 spa_load_spares(spa); 4132 } 4133 if (spa->spa_l2cache.sav_config) { 4134 nvlist_free(spa->spa_l2cache.sav_config); 4135 spa->spa_l2cache.sav_config = NULL; 4136 spa_load_l2cache(spa); 4137 } 4138 4139 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4140 &nvroot) == 0); 4141 if (error == 0) 4142 error = spa_validate_aux(spa, nvroot, -1ULL, 4143 VDEV_ALLOC_SPARE); 4144 if (error == 0) 4145 error = spa_validate_aux(spa, nvroot, -1ULL, 4146 VDEV_ALLOC_L2CACHE); 4147 spa_config_exit(spa, SCL_ALL, FTAG); 4148 4149 if (props != NULL) 4150 spa_configfile_set(spa, props, B_FALSE); 4151 4152 if (error != 0 || (props && spa_writeable(spa) && 4153 (error = spa_prop_set(spa, props)))) { 4154 spa_unload(spa); 4155 spa_deactivate(spa); 4156 spa_remove(spa); 4157 mutex_exit(&spa_namespace_lock); 4158 return (error); 4159 } 4160 4161 spa_async_resume(spa); 4162 4163 /* 4164 * Override any spares and level 2 cache devices as specified by 4165 * the user, as these may have correct device names/devids, etc. 4166 */ 4167 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 4168 &spares, &nspares) == 0) { 4169 if (spa->spa_spares.sav_config) 4170 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 4171 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 4172 else 4173 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 4174 NV_UNIQUE_NAME, KM_SLEEP) == 0); 4175 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 4176 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 4177 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4178 spa_load_spares(spa); 4179 spa_config_exit(spa, SCL_ALL, FTAG); 4180 spa->spa_spares.sav_sync = B_TRUE; 4181 } 4182 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 4183 &l2cache, &nl2cache) == 0) { 4184 if (spa->spa_l2cache.sav_config) 4185 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 4186 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 4187 else 4188 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 4189 NV_UNIQUE_NAME, KM_SLEEP) == 0); 4190 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 4191 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 4192 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4193 spa_load_l2cache(spa); 4194 spa_config_exit(spa, SCL_ALL, FTAG); 4195 spa->spa_l2cache.sav_sync = B_TRUE; 4196 } 4197 4198 /* 4199 * Check for any removed devices. 4200 */ 4201 if (spa->spa_autoreplace) { 4202 spa_aux_check_removed(&spa->spa_spares); 4203 spa_aux_check_removed(&spa->spa_l2cache); 4204 } 4205 4206 if (spa_writeable(spa)) { 4207 /* 4208 * Update the config cache to include the newly-imported pool. 4209 */ 4210 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4211 } 4212 4213 /* 4214 * It's possible that the pool was expanded while it was exported. 4215 * We kick off an async task to handle this for us. 4216 */ 4217 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 4218 4219 spa_history_log_version(spa, "import"); 4220 4221 spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT); 4222 4223 mutex_exit(&spa_namespace_lock); 4224 4225 return (0); 4226 } 4227 4228 nvlist_t * 4229 spa_tryimport(nvlist_t *tryconfig) 4230 { 4231 nvlist_t *config = NULL; 4232 char *poolname; 4233 spa_t *spa; 4234 uint64_t state; 4235 int error; 4236 4237 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 4238 return (NULL); 4239 4240 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 4241 return (NULL); 4242 4243 /* 4244 * Create and initialize the spa structure. 4245 */ 4246 mutex_enter(&spa_namespace_lock); 4247 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 4248 spa_activate(spa, FREAD); 4249 4250 /* 4251 * Pass off the heavy lifting to spa_load(). 4252 * Pass TRUE for mosconfig because the user-supplied config 4253 * is actually the one to trust when doing an import. 4254 */ 4255 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE); 4256 4257 /* 4258 * If 'tryconfig' was at least parsable, return the current config. 4259 */ 4260 if (spa->spa_root_vdev != NULL) { 4261 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4262 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 4263 poolname) == 0); 4264 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4265 state) == 0); 4266 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 4267 spa->spa_uberblock.ub_timestamp) == 0); 4268 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4269 spa->spa_load_info) == 0); 4270 4271 /* 4272 * If the bootfs property exists on this pool then we 4273 * copy it out so that external consumers can tell which 4274 * pools are bootable. 4275 */ 4276 if ((!error || error == EEXIST) && spa->spa_bootfs) { 4277 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4278 4279 /* 4280 * We have to play games with the name since the 4281 * pool was opened as TRYIMPORT_NAME. 4282 */ 4283 if (dsl_dsobj_to_dsname(spa_name(spa), 4284 spa->spa_bootfs, tmpname) == 0) { 4285 char *cp; 4286 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4287 4288 cp = strchr(tmpname, '/'); 4289 if (cp == NULL) { 4290 (void) strlcpy(dsname, tmpname, 4291 MAXPATHLEN); 4292 } else { 4293 (void) snprintf(dsname, MAXPATHLEN, 4294 "%s/%s", poolname, ++cp); 4295 } 4296 VERIFY(nvlist_add_string(config, 4297 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 4298 kmem_free(dsname, MAXPATHLEN); 4299 } 4300 kmem_free(tmpname, MAXPATHLEN); 4301 } 4302 4303 /* 4304 * Add the list of hot spares and level 2 cache devices. 4305 */ 4306 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4307 spa_add_spares(spa, config); 4308 spa_add_l2cache(spa, config); 4309 spa_config_exit(spa, SCL_CONFIG, FTAG); 4310 } 4311 4312 spa_unload(spa); 4313 spa_deactivate(spa); 4314 spa_remove(spa); 4315 mutex_exit(&spa_namespace_lock); 4316 4317 return (config); 4318 } 4319 4320 /* 4321 * Pool export/destroy 4322 * 4323 * The act of destroying or exporting a pool is very simple. We make sure there 4324 * is no more pending I/O and any references to the pool are gone. Then, we 4325 * update the pool state and sync all the labels to disk, removing the 4326 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 4327 * we don't sync the labels or remove the configuration cache. 4328 */ 4329 static int 4330 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 4331 boolean_t force, boolean_t hardforce) 4332 { 4333 spa_t *spa; 4334 4335 if (oldconfig) 4336 *oldconfig = NULL; 4337 4338 if (!(spa_mode_global & FWRITE)) 4339 return (SET_ERROR(EROFS)); 4340 4341 mutex_enter(&spa_namespace_lock); 4342 if ((spa = spa_lookup(pool)) == NULL) { 4343 mutex_exit(&spa_namespace_lock); 4344 return (SET_ERROR(ENOENT)); 4345 } 4346 4347 /* 4348 * Put a hold on the pool, drop the namespace lock, stop async tasks, 4349 * reacquire the namespace lock, and see if we can export. 4350 */ 4351 spa_open_ref(spa, FTAG); 4352 mutex_exit(&spa_namespace_lock); 4353 spa_async_suspend(spa); 4354 mutex_enter(&spa_namespace_lock); 4355 spa_close(spa, FTAG); 4356 4357 /* 4358 * The pool will be in core if it's openable, 4359 * in which case we can modify its state. 4360 */ 4361 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 4362 /* 4363 * Objsets may be open only because they're dirty, so we 4364 * have to force it to sync before checking spa_refcnt. 4365 */ 4366 txg_wait_synced(spa->spa_dsl_pool, 0); 4367 spa_evicting_os_wait(spa); 4368 4369 /* 4370 * A pool cannot be exported or destroyed if there are active 4371 * references. If we are resetting a pool, allow references by 4372 * fault injection handlers. 4373 */ 4374 if (!spa_refcount_zero(spa) || 4375 (spa->spa_inject_ref != 0 && 4376 new_state != POOL_STATE_UNINITIALIZED)) { 4377 spa_async_resume(spa); 4378 mutex_exit(&spa_namespace_lock); 4379 return (SET_ERROR(EBUSY)); 4380 } 4381 4382 /* 4383 * A pool cannot be exported if it has an active shared spare. 4384 * This is to prevent other pools stealing the active spare 4385 * from an exported pool. At user's own will, such pool can 4386 * be forcedly exported. 4387 */ 4388 if (!force && new_state == POOL_STATE_EXPORTED && 4389 spa_has_active_shared_spare(spa)) { 4390 spa_async_resume(spa); 4391 mutex_exit(&spa_namespace_lock); 4392 return (SET_ERROR(EXDEV)); 4393 } 4394 4395 /* 4396 * We want this to be reflected on every label, 4397 * so mark them all dirty. spa_unload() will do the 4398 * final sync that pushes these changes out. 4399 */ 4400 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 4401 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4402 spa->spa_state = new_state; 4403 spa->spa_final_txg = spa_last_synced_txg(spa) + 4404 TXG_DEFER_SIZE + 1; 4405 vdev_config_dirty(spa->spa_root_vdev); 4406 spa_config_exit(spa, SCL_ALL, FTAG); 4407 } 4408 } 4409 4410 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 4411 4412 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4413 spa_unload(spa); 4414 spa_deactivate(spa); 4415 } 4416 4417 if (oldconfig && spa->spa_config) 4418 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 4419 4420 if (new_state != POOL_STATE_UNINITIALIZED) { 4421 if (!hardforce) 4422 spa_config_sync(spa, B_TRUE, B_TRUE); 4423 spa_remove(spa); 4424 } 4425 mutex_exit(&spa_namespace_lock); 4426 4427 return (0); 4428 } 4429 4430 /* 4431 * Destroy a storage pool. 4432 */ 4433 int 4434 spa_destroy(char *pool) 4435 { 4436 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 4437 B_FALSE, B_FALSE)); 4438 } 4439 4440 /* 4441 * Export a storage pool. 4442 */ 4443 int 4444 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 4445 boolean_t hardforce) 4446 { 4447 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 4448 force, hardforce)); 4449 } 4450 4451 /* 4452 * Similar to spa_export(), this unloads the spa_t without actually removing it 4453 * from the namespace in any way. 4454 */ 4455 int 4456 spa_reset(char *pool) 4457 { 4458 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 4459 B_FALSE, B_FALSE)); 4460 } 4461 4462 /* 4463 * ========================================================================== 4464 * Device manipulation 4465 * ========================================================================== 4466 */ 4467 4468 /* 4469 * Add a device to a storage pool. 4470 */ 4471 int 4472 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 4473 { 4474 uint64_t txg, id; 4475 int error; 4476 vdev_t *rvd = spa->spa_root_vdev; 4477 vdev_t *vd, *tvd; 4478 nvlist_t **spares, **l2cache; 4479 uint_t nspares, nl2cache; 4480 4481 ASSERT(spa_writeable(spa)); 4482 4483 txg = spa_vdev_enter(spa); 4484 4485 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 4486 VDEV_ALLOC_ADD)) != 0) 4487 return (spa_vdev_exit(spa, NULL, txg, error)); 4488 4489 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 4490 4491 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 4492 &nspares) != 0) 4493 nspares = 0; 4494 4495 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 4496 &nl2cache) != 0) 4497 nl2cache = 0; 4498 4499 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 4500 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 4501 4502 if (vd->vdev_children != 0 && 4503 (error = vdev_create(vd, txg, B_FALSE)) != 0) 4504 return (spa_vdev_exit(spa, vd, txg, error)); 4505 4506 /* 4507 * We must validate the spares and l2cache devices after checking the 4508 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 4509 */ 4510 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 4511 return (spa_vdev_exit(spa, vd, txg, error)); 4512 4513 /* 4514 * Transfer each new top-level vdev from vd to rvd. 4515 */ 4516 for (int c = 0; c < vd->vdev_children; c++) { 4517 4518 /* 4519 * Set the vdev id to the first hole, if one exists. 4520 */ 4521 for (id = 0; id < rvd->vdev_children; id++) { 4522 if (rvd->vdev_child[id]->vdev_ishole) { 4523 vdev_free(rvd->vdev_child[id]); 4524 break; 4525 } 4526 } 4527 tvd = vd->vdev_child[c]; 4528 vdev_remove_child(vd, tvd); 4529 tvd->vdev_id = id; 4530 vdev_add_child(rvd, tvd); 4531 vdev_config_dirty(tvd); 4532 } 4533 4534 if (nspares != 0) { 4535 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 4536 ZPOOL_CONFIG_SPARES); 4537 spa_load_spares(spa); 4538 spa->spa_spares.sav_sync = B_TRUE; 4539 } 4540 4541 if (nl2cache != 0) { 4542 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 4543 ZPOOL_CONFIG_L2CACHE); 4544 spa_load_l2cache(spa); 4545 spa->spa_l2cache.sav_sync = B_TRUE; 4546 } 4547 4548 /* 4549 * We have to be careful when adding new vdevs to an existing pool. 4550 * If other threads start allocating from these vdevs before we 4551 * sync the config cache, and we lose power, then upon reboot we may 4552 * fail to open the pool because there are DVAs that the config cache 4553 * can't translate. Therefore, we first add the vdevs without 4554 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 4555 * and then let spa_config_update() initialize the new metaslabs. 4556 * 4557 * spa_load() checks for added-but-not-initialized vdevs, so that 4558 * if we lose power at any point in this sequence, the remaining 4559 * steps will be completed the next time we load the pool. 4560 */ 4561 (void) spa_vdev_exit(spa, vd, txg, 0); 4562 4563 mutex_enter(&spa_namespace_lock); 4564 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4565 spa_event_notify(spa, NULL, ESC_ZFS_VDEV_ADD); 4566 mutex_exit(&spa_namespace_lock); 4567 4568 return (0); 4569 } 4570 4571 /* 4572 * Attach a device to a mirror. The arguments are the path to any device 4573 * in the mirror, and the nvroot for the new device. If the path specifies 4574 * a device that is not mirrored, we automatically insert the mirror vdev. 4575 * 4576 * If 'replacing' is specified, the new device is intended to replace the 4577 * existing device; in this case the two devices are made into their own 4578 * mirror using the 'replacing' vdev, which is functionally identical to 4579 * the mirror vdev (it actually reuses all the same ops) but has a few 4580 * extra rules: you can't attach to it after it's been created, and upon 4581 * completion of resilvering, the first disk (the one being replaced) 4582 * is automatically detached. 4583 */ 4584 int 4585 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 4586 { 4587 uint64_t txg, dtl_max_txg; 4588 vdev_t *rvd = spa->spa_root_vdev; 4589 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 4590 vdev_ops_t *pvops; 4591 char *oldvdpath, *newvdpath; 4592 int newvd_isspare; 4593 int error; 4594 4595 ASSERT(spa_writeable(spa)); 4596 4597 txg = spa_vdev_enter(spa); 4598 4599 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 4600 4601 if (oldvd == NULL) 4602 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4603 4604 if (!oldvd->vdev_ops->vdev_op_leaf) 4605 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4606 4607 pvd = oldvd->vdev_parent; 4608 4609 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 4610 VDEV_ALLOC_ATTACH)) != 0) 4611 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4612 4613 if (newrootvd->vdev_children != 1) 4614 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4615 4616 newvd = newrootvd->vdev_child[0]; 4617 4618 if (!newvd->vdev_ops->vdev_op_leaf) 4619 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4620 4621 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 4622 return (spa_vdev_exit(spa, newrootvd, txg, error)); 4623 4624 /* 4625 * Spares can't replace logs 4626 */ 4627 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 4628 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4629 4630 if (!replacing) { 4631 /* 4632 * For attach, the only allowable parent is a mirror or the root 4633 * vdev. 4634 */ 4635 if (pvd->vdev_ops != &vdev_mirror_ops && 4636 pvd->vdev_ops != &vdev_root_ops) 4637 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4638 4639 pvops = &vdev_mirror_ops; 4640 } else { 4641 /* 4642 * Active hot spares can only be replaced by inactive hot 4643 * spares. 4644 */ 4645 if (pvd->vdev_ops == &vdev_spare_ops && 4646 oldvd->vdev_isspare && 4647 !spa_has_spare(spa, newvd->vdev_guid)) 4648 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4649 4650 /* 4651 * If the source is a hot spare, and the parent isn't already a 4652 * spare, then we want to create a new hot spare. Otherwise, we 4653 * want to create a replacing vdev. The user is not allowed to 4654 * attach to a spared vdev child unless the 'isspare' state is 4655 * the same (spare replaces spare, non-spare replaces 4656 * non-spare). 4657 */ 4658 if (pvd->vdev_ops == &vdev_replacing_ops && 4659 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 4660 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4661 } else if (pvd->vdev_ops == &vdev_spare_ops && 4662 newvd->vdev_isspare != oldvd->vdev_isspare) { 4663 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4664 } 4665 4666 if (newvd->vdev_isspare) 4667 pvops = &vdev_spare_ops; 4668 else 4669 pvops = &vdev_replacing_ops; 4670 } 4671 4672 /* 4673 * Make sure the new device is big enough. 4674 */ 4675 if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 4676 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 4677 4678 /* 4679 * The new device cannot have a higher alignment requirement 4680 * than the top-level vdev. 4681 */ 4682 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 4683 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 4684 4685 /* 4686 * If this is an in-place replacement, update oldvd's path and devid 4687 * to make it distinguishable from newvd, and unopenable from now on. 4688 */ 4689 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 4690 spa_strfree(oldvd->vdev_path); 4691 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 4692 KM_SLEEP); 4693 (void) sprintf(oldvd->vdev_path, "%s/%s", 4694 newvd->vdev_path, "old"); 4695 if (oldvd->vdev_devid != NULL) { 4696 spa_strfree(oldvd->vdev_devid); 4697 oldvd->vdev_devid = NULL; 4698 } 4699 } 4700 4701 /* mark the device being resilvered */ 4702 newvd->vdev_resilver_txg = txg; 4703 4704 /* 4705 * If the parent is not a mirror, or if we're replacing, insert the new 4706 * mirror/replacing/spare vdev above oldvd. 4707 */ 4708 if (pvd->vdev_ops != pvops) 4709 pvd = vdev_add_parent(oldvd, pvops); 4710 4711 ASSERT(pvd->vdev_top->vdev_parent == rvd); 4712 ASSERT(pvd->vdev_ops == pvops); 4713 ASSERT(oldvd->vdev_parent == pvd); 4714 4715 /* 4716 * Extract the new device from its root and add it to pvd. 4717 */ 4718 vdev_remove_child(newrootvd, newvd); 4719 newvd->vdev_id = pvd->vdev_children; 4720 newvd->vdev_crtxg = oldvd->vdev_crtxg; 4721 vdev_add_child(pvd, newvd); 4722 4723 tvd = newvd->vdev_top; 4724 ASSERT(pvd->vdev_top == tvd); 4725 ASSERT(tvd->vdev_parent == rvd); 4726 4727 vdev_config_dirty(tvd); 4728 4729 /* 4730 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 4731 * for any dmu_sync-ed blocks. It will propagate upward when 4732 * spa_vdev_exit() calls vdev_dtl_reassess(). 4733 */ 4734 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 4735 4736 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 4737 dtl_max_txg - TXG_INITIAL); 4738 4739 if (newvd->vdev_isspare) { 4740 spa_spare_activate(newvd); 4741 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE); 4742 } 4743 4744 oldvdpath = spa_strdup(oldvd->vdev_path); 4745 newvdpath = spa_strdup(newvd->vdev_path); 4746 newvd_isspare = newvd->vdev_isspare; 4747 4748 /* 4749 * Mark newvd's DTL dirty in this txg. 4750 */ 4751 vdev_dirty(tvd, VDD_DTL, newvd, txg); 4752 4753 /* 4754 * Schedule the resilver to restart in the future. We do this to 4755 * ensure that dmu_sync-ed blocks have been stitched into the 4756 * respective datasets. 4757 */ 4758 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 4759 4760 if (spa->spa_bootfs) 4761 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH); 4762 4763 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_ATTACH); 4764 4765 /* 4766 * Commit the config 4767 */ 4768 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 4769 4770 spa_history_log_internal(spa, "vdev attach", NULL, 4771 "%s vdev=%s %s vdev=%s", 4772 replacing && newvd_isspare ? "spare in" : 4773 replacing ? "replace" : "attach", newvdpath, 4774 replacing ? "for" : "to", oldvdpath); 4775 4776 spa_strfree(oldvdpath); 4777 spa_strfree(newvdpath); 4778 4779 return (0); 4780 } 4781 4782 /* 4783 * Detach a device from a mirror or replacing vdev. 4784 * 4785 * If 'replace_done' is specified, only detach if the parent 4786 * is a replacing vdev. 4787 */ 4788 int 4789 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 4790 { 4791 uint64_t txg; 4792 int error; 4793 vdev_t *rvd = spa->spa_root_vdev; 4794 vdev_t *vd, *pvd, *cvd, *tvd; 4795 boolean_t unspare = B_FALSE; 4796 uint64_t unspare_guid = 0; 4797 char *vdpath; 4798 4799 ASSERT(spa_writeable(spa)); 4800 4801 txg = spa_vdev_enter(spa); 4802 4803 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 4804 4805 if (vd == NULL) 4806 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4807 4808 if (!vd->vdev_ops->vdev_op_leaf) 4809 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4810 4811 pvd = vd->vdev_parent; 4812 4813 /* 4814 * If the parent/child relationship is not as expected, don't do it. 4815 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 4816 * vdev that's replacing B with C. The user's intent in replacing 4817 * is to go from M(A,B) to M(A,C). If the user decides to cancel 4818 * the replace by detaching C, the expected behavior is to end up 4819 * M(A,B). But suppose that right after deciding to detach C, 4820 * the replacement of B completes. We would have M(A,C), and then 4821 * ask to detach C, which would leave us with just A -- not what 4822 * the user wanted. To prevent this, we make sure that the 4823 * parent/child relationship hasn't changed -- in this example, 4824 * that C's parent is still the replacing vdev R. 4825 */ 4826 if (pvd->vdev_guid != pguid && pguid != 0) 4827 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4828 4829 /* 4830 * Only 'replacing' or 'spare' vdevs can be replaced. 4831 */ 4832 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 4833 pvd->vdev_ops != &vdev_spare_ops) 4834 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4835 4836 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 4837 spa_version(spa) >= SPA_VERSION_SPARES); 4838 4839 /* 4840 * Only mirror, replacing, and spare vdevs support detach. 4841 */ 4842 if (pvd->vdev_ops != &vdev_replacing_ops && 4843 pvd->vdev_ops != &vdev_mirror_ops && 4844 pvd->vdev_ops != &vdev_spare_ops) 4845 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4846 4847 /* 4848 * If this device has the only valid copy of some data, 4849 * we cannot safely detach it. 4850 */ 4851 if (vdev_dtl_required(vd)) 4852 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4853 4854 ASSERT(pvd->vdev_children >= 2); 4855 4856 /* 4857 * If we are detaching the second disk from a replacing vdev, then 4858 * check to see if we changed the original vdev's path to have "/old" 4859 * at the end in spa_vdev_attach(). If so, undo that change now. 4860 */ 4861 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 4862 vd->vdev_path != NULL) { 4863 size_t len = strlen(vd->vdev_path); 4864 4865 for (int c = 0; c < pvd->vdev_children; c++) { 4866 cvd = pvd->vdev_child[c]; 4867 4868 if (cvd == vd || cvd->vdev_path == NULL) 4869 continue; 4870 4871 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 4872 strcmp(cvd->vdev_path + len, "/old") == 0) { 4873 spa_strfree(cvd->vdev_path); 4874 cvd->vdev_path = spa_strdup(vd->vdev_path); 4875 break; 4876 } 4877 } 4878 } 4879 4880 /* 4881 * If we are detaching the original disk from a spare, then it implies 4882 * that the spare should become a real disk, and be removed from the 4883 * active spare list for the pool. 4884 */ 4885 if (pvd->vdev_ops == &vdev_spare_ops && 4886 vd->vdev_id == 0 && 4887 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 4888 unspare = B_TRUE; 4889 4890 /* 4891 * Erase the disk labels so the disk can be used for other things. 4892 * This must be done after all other error cases are handled, 4893 * but before we disembowel vd (so we can still do I/O to it). 4894 * But if we can't do it, don't treat the error as fatal -- 4895 * it may be that the unwritability of the disk is the reason 4896 * it's being detached! 4897 */ 4898 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 4899 4900 /* 4901 * Remove vd from its parent and compact the parent's children. 4902 */ 4903 vdev_remove_child(pvd, vd); 4904 vdev_compact_children(pvd); 4905 4906 /* 4907 * Remember one of the remaining children so we can get tvd below. 4908 */ 4909 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 4910 4911 /* 4912 * If we need to remove the remaining child from the list of hot spares, 4913 * do it now, marking the vdev as no longer a spare in the process. 4914 * We must do this before vdev_remove_parent(), because that can 4915 * change the GUID if it creates a new toplevel GUID. For a similar 4916 * reason, we must remove the spare now, in the same txg as the detach; 4917 * otherwise someone could attach a new sibling, change the GUID, and 4918 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 4919 */ 4920 if (unspare) { 4921 ASSERT(cvd->vdev_isspare); 4922 spa_spare_remove(cvd); 4923 unspare_guid = cvd->vdev_guid; 4924 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 4925 cvd->vdev_unspare = B_TRUE; 4926 } 4927 4928 /* 4929 * If the parent mirror/replacing vdev only has one child, 4930 * the parent is no longer needed. Remove it from the tree. 4931 */ 4932 if (pvd->vdev_children == 1) { 4933 if (pvd->vdev_ops == &vdev_spare_ops) 4934 cvd->vdev_unspare = B_FALSE; 4935 vdev_remove_parent(cvd); 4936 } 4937 4938 4939 /* 4940 * We don't set tvd until now because the parent we just removed 4941 * may have been the previous top-level vdev. 4942 */ 4943 tvd = cvd->vdev_top; 4944 ASSERT(tvd->vdev_parent == rvd); 4945 4946 /* 4947 * Reevaluate the parent vdev state. 4948 */ 4949 vdev_propagate_state(cvd); 4950 4951 /* 4952 * If the 'autoexpand' property is set on the pool then automatically 4953 * try to expand the size of the pool. For example if the device we 4954 * just detached was smaller than the others, it may be possible to 4955 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 4956 * first so that we can obtain the updated sizes of the leaf vdevs. 4957 */ 4958 if (spa->spa_autoexpand) { 4959 vdev_reopen(tvd); 4960 vdev_expand(tvd, txg); 4961 } 4962 4963 vdev_config_dirty(tvd); 4964 4965 /* 4966 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 4967 * vd->vdev_detached is set and free vd's DTL object in syncing context. 4968 * But first make sure we're not on any *other* txg's DTL list, to 4969 * prevent vd from being accessed after it's freed. 4970 */ 4971 vdpath = spa_strdup(vd->vdev_path); 4972 for (int t = 0; t < TXG_SIZE; t++) 4973 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 4974 vd->vdev_detached = B_TRUE; 4975 vdev_dirty(tvd, VDD_DTL, vd, txg); 4976 4977 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 4978 4979 /* hang on to the spa before we release the lock */ 4980 spa_open_ref(spa, FTAG); 4981 4982 error = spa_vdev_exit(spa, vd, txg, 0); 4983 4984 spa_history_log_internal(spa, "detach", NULL, 4985 "vdev=%s", vdpath); 4986 spa_strfree(vdpath); 4987 4988 /* 4989 * If this was the removal of the original device in a hot spare vdev, 4990 * then we want to go through and remove the device from the hot spare 4991 * list of every other pool. 4992 */ 4993 if (unspare) { 4994 spa_t *altspa = NULL; 4995 4996 mutex_enter(&spa_namespace_lock); 4997 while ((altspa = spa_next(altspa)) != NULL) { 4998 if (altspa->spa_state != POOL_STATE_ACTIVE || 4999 altspa == spa) 5000 continue; 5001 5002 spa_open_ref(altspa, FTAG); 5003 mutex_exit(&spa_namespace_lock); 5004 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 5005 mutex_enter(&spa_namespace_lock); 5006 spa_close(altspa, FTAG); 5007 } 5008 mutex_exit(&spa_namespace_lock); 5009 5010 /* search the rest of the vdevs for spares to remove */ 5011 spa_vdev_resilver_done(spa); 5012 } 5013 5014 /* all done with the spa; OK to release */ 5015 mutex_enter(&spa_namespace_lock); 5016 spa_close(spa, FTAG); 5017 mutex_exit(&spa_namespace_lock); 5018 5019 return (error); 5020 } 5021 5022 /* 5023 * Split a set of devices from their mirrors, and create a new pool from them. 5024 */ 5025 int 5026 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 5027 nvlist_t *props, boolean_t exp) 5028 { 5029 int error = 0; 5030 uint64_t txg, *glist; 5031 spa_t *newspa; 5032 uint_t c, children, lastlog; 5033 nvlist_t **child, *nvl, *tmp; 5034 dmu_tx_t *tx; 5035 char *altroot = NULL; 5036 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 5037 boolean_t activate_slog; 5038 5039 ASSERT(spa_writeable(spa)); 5040 5041 txg = spa_vdev_enter(spa); 5042 5043 /* clear the log and flush everything up to now */ 5044 activate_slog = spa_passivate_log(spa); 5045 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5046 error = spa_offline_log(spa); 5047 txg = spa_vdev_config_enter(spa); 5048 5049 if (activate_slog) 5050 spa_activate_log(spa); 5051 5052 if (error != 0) 5053 return (spa_vdev_exit(spa, NULL, txg, error)); 5054 5055 /* check new spa name before going any further */ 5056 if (spa_lookup(newname) != NULL) 5057 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 5058 5059 /* 5060 * scan through all the children to ensure they're all mirrors 5061 */ 5062 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 5063 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 5064 &children) != 0) 5065 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5066 5067 /* first, check to ensure we've got the right child count */ 5068 rvd = spa->spa_root_vdev; 5069 lastlog = 0; 5070 for (c = 0; c < rvd->vdev_children; c++) { 5071 vdev_t *vd = rvd->vdev_child[c]; 5072 5073 /* don't count the holes & logs as children */ 5074 if (vd->vdev_islog || vd->vdev_ishole) { 5075 if (lastlog == 0) 5076 lastlog = c; 5077 continue; 5078 } 5079 5080 lastlog = 0; 5081 } 5082 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 5083 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5084 5085 /* next, ensure no spare or cache devices are part of the split */ 5086 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 5087 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 5088 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5089 5090 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 5091 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 5092 5093 /* then, loop over each vdev and validate it */ 5094 for (c = 0; c < children; c++) { 5095 uint64_t is_hole = 0; 5096 5097 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 5098 &is_hole); 5099 5100 if (is_hole != 0) { 5101 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 5102 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 5103 continue; 5104 } else { 5105 error = SET_ERROR(EINVAL); 5106 break; 5107 } 5108 } 5109 5110 /* which disk is going to be split? */ 5111 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 5112 &glist[c]) != 0) { 5113 error = SET_ERROR(EINVAL); 5114 break; 5115 } 5116 5117 /* look it up in the spa */ 5118 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 5119 if (vml[c] == NULL) { 5120 error = SET_ERROR(ENODEV); 5121 break; 5122 } 5123 5124 /* make sure there's nothing stopping the split */ 5125 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 5126 vml[c]->vdev_islog || 5127 vml[c]->vdev_ishole || 5128 vml[c]->vdev_isspare || 5129 vml[c]->vdev_isl2cache || 5130 !vdev_writeable(vml[c]) || 5131 vml[c]->vdev_children != 0 || 5132 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 5133 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 5134 error = SET_ERROR(EINVAL); 5135 break; 5136 } 5137 5138 if (vdev_dtl_required(vml[c])) { 5139 error = SET_ERROR(EBUSY); 5140 break; 5141 } 5142 5143 /* we need certain info from the top level */ 5144 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 5145 vml[c]->vdev_top->vdev_ms_array) == 0); 5146 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 5147 vml[c]->vdev_top->vdev_ms_shift) == 0); 5148 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 5149 vml[c]->vdev_top->vdev_asize) == 0); 5150 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 5151 vml[c]->vdev_top->vdev_ashift) == 0); 5152 5153 /* transfer per-vdev ZAPs */ 5154 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0); 5155 VERIFY0(nvlist_add_uint64(child[c], 5156 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap)); 5157 5158 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0); 5159 VERIFY0(nvlist_add_uint64(child[c], 5160 ZPOOL_CONFIG_VDEV_TOP_ZAP, 5161 vml[c]->vdev_parent->vdev_top_zap)); 5162 } 5163 5164 if (error != 0) { 5165 kmem_free(vml, children * sizeof (vdev_t *)); 5166 kmem_free(glist, children * sizeof (uint64_t)); 5167 return (spa_vdev_exit(spa, NULL, txg, error)); 5168 } 5169 5170 /* stop writers from using the disks */ 5171 for (c = 0; c < children; c++) { 5172 if (vml[c] != NULL) 5173 vml[c]->vdev_offline = B_TRUE; 5174 } 5175 vdev_reopen(spa->spa_root_vdev); 5176 5177 /* 5178 * Temporarily record the splitting vdevs in the spa config. This 5179 * will disappear once the config is regenerated. 5180 */ 5181 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5182 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 5183 glist, children) == 0); 5184 kmem_free(glist, children * sizeof (uint64_t)); 5185 5186 mutex_enter(&spa->spa_props_lock); 5187 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 5188 nvl) == 0); 5189 mutex_exit(&spa->spa_props_lock); 5190 spa->spa_config_splitting = nvl; 5191 vdev_config_dirty(spa->spa_root_vdev); 5192 5193 /* configure and create the new pool */ 5194 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 5195 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 5196 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 5197 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 5198 spa_version(spa)) == 0); 5199 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 5200 spa->spa_config_txg) == 0); 5201 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 5202 spa_generate_guid(NULL)) == 0); 5203 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 5204 (void) nvlist_lookup_string(props, 5205 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5206 5207 /* add the new pool to the namespace */ 5208 newspa = spa_add(newname, config, altroot); 5209 newspa->spa_avz_action = AVZ_ACTION_REBUILD; 5210 newspa->spa_config_txg = spa->spa_config_txg; 5211 spa_set_log_state(newspa, SPA_LOG_CLEAR); 5212 5213 /* release the spa config lock, retaining the namespace lock */ 5214 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5215 5216 if (zio_injection_enabled) 5217 zio_handle_panic_injection(spa, FTAG, 1); 5218 5219 spa_activate(newspa, spa_mode_global); 5220 spa_async_suspend(newspa); 5221 5222 /* create the new pool from the disks of the original pool */ 5223 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE); 5224 if (error) 5225 goto out; 5226 5227 /* if that worked, generate a real config for the new pool */ 5228 if (newspa->spa_root_vdev != NULL) { 5229 VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 5230 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5231 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 5232 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 5233 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 5234 B_TRUE)); 5235 } 5236 5237 /* set the props */ 5238 if (props != NULL) { 5239 spa_configfile_set(newspa, props, B_FALSE); 5240 error = spa_prop_set(newspa, props); 5241 if (error) 5242 goto out; 5243 } 5244 5245 /* flush everything */ 5246 txg = spa_vdev_config_enter(newspa); 5247 vdev_config_dirty(newspa->spa_root_vdev); 5248 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 5249 5250 if (zio_injection_enabled) 5251 zio_handle_panic_injection(spa, FTAG, 2); 5252 5253 spa_async_resume(newspa); 5254 5255 /* finally, update the original pool's config */ 5256 txg = spa_vdev_config_enter(spa); 5257 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 5258 error = dmu_tx_assign(tx, TXG_WAIT); 5259 if (error != 0) 5260 dmu_tx_abort(tx); 5261 for (c = 0; c < children; c++) { 5262 if (vml[c] != NULL) { 5263 vdev_split(vml[c]); 5264 if (error == 0) 5265 spa_history_log_internal(spa, "detach", tx, 5266 "vdev=%s", vml[c]->vdev_path); 5267 5268 vdev_free(vml[c]); 5269 } 5270 } 5271 spa->spa_avz_action = AVZ_ACTION_REBUILD; 5272 vdev_config_dirty(spa->spa_root_vdev); 5273 spa->spa_config_splitting = NULL; 5274 nvlist_free(nvl); 5275 if (error == 0) 5276 dmu_tx_commit(tx); 5277 (void) spa_vdev_exit(spa, NULL, txg, 0); 5278 5279 if (zio_injection_enabled) 5280 zio_handle_panic_injection(spa, FTAG, 3); 5281 5282 /* split is complete; log a history record */ 5283 spa_history_log_internal(newspa, "split", NULL, 5284 "from pool %s", spa_name(spa)); 5285 5286 kmem_free(vml, children * sizeof (vdev_t *)); 5287 5288 /* if we're not going to mount the filesystems in userland, export */ 5289 if (exp) 5290 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 5291 B_FALSE, B_FALSE); 5292 5293 return (error); 5294 5295 out: 5296 spa_unload(newspa); 5297 spa_deactivate(newspa); 5298 spa_remove(newspa); 5299 5300 txg = spa_vdev_config_enter(spa); 5301 5302 /* re-online all offlined disks */ 5303 for (c = 0; c < children; c++) { 5304 if (vml[c] != NULL) 5305 vml[c]->vdev_offline = B_FALSE; 5306 } 5307 vdev_reopen(spa->spa_root_vdev); 5308 5309 nvlist_free(spa->spa_config_splitting); 5310 spa->spa_config_splitting = NULL; 5311 (void) spa_vdev_exit(spa, NULL, txg, error); 5312 5313 kmem_free(vml, children * sizeof (vdev_t *)); 5314 return (error); 5315 } 5316 5317 static nvlist_t * 5318 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 5319 { 5320 for (int i = 0; i < count; i++) { 5321 uint64_t guid; 5322 5323 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 5324 &guid) == 0); 5325 5326 if (guid == target_guid) 5327 return (nvpp[i]); 5328 } 5329 5330 return (NULL); 5331 } 5332 5333 static void 5334 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 5335 nvlist_t *dev_to_remove) 5336 { 5337 nvlist_t **newdev = NULL; 5338 5339 if (count > 1) 5340 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 5341 5342 for (int i = 0, j = 0; i < count; i++) { 5343 if (dev[i] == dev_to_remove) 5344 continue; 5345 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 5346 } 5347 5348 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 5349 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 5350 5351 for (int i = 0; i < count - 1; i++) 5352 nvlist_free(newdev[i]); 5353 5354 if (count > 1) 5355 kmem_free(newdev, (count - 1) * sizeof (void *)); 5356 } 5357 5358 /* 5359 * Evacuate the device. 5360 */ 5361 static int 5362 spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) 5363 { 5364 uint64_t txg; 5365 int error = 0; 5366 5367 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5368 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5369 ASSERT(vd == vd->vdev_top); 5370 5371 /* 5372 * Evacuate the device. We don't hold the config lock as writer 5373 * since we need to do I/O but we do keep the 5374 * spa_namespace_lock held. Once this completes the device 5375 * should no longer have any blocks allocated on it. 5376 */ 5377 if (vd->vdev_islog) { 5378 if (vd->vdev_stat.vs_alloc != 0) 5379 error = spa_offline_log(spa); 5380 } else { 5381 error = SET_ERROR(ENOTSUP); 5382 } 5383 5384 if (error) 5385 return (error); 5386 5387 /* 5388 * The evacuation succeeded. Remove any remaining MOS metadata 5389 * associated with this vdev, and wait for these changes to sync. 5390 */ 5391 ASSERT0(vd->vdev_stat.vs_alloc); 5392 txg = spa_vdev_config_enter(spa); 5393 vd->vdev_removing = B_TRUE; 5394 vdev_dirty_leaves(vd, VDD_DTL, txg); 5395 vdev_config_dirty(vd); 5396 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5397 5398 return (0); 5399 } 5400 5401 /* 5402 * Complete the removal by cleaning up the namespace. 5403 */ 5404 static void 5405 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd) 5406 { 5407 vdev_t *rvd = spa->spa_root_vdev; 5408 uint64_t id = vd->vdev_id; 5409 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 5410 5411 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5412 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5413 ASSERT(vd == vd->vdev_top); 5414 5415 /* 5416 * Only remove any devices which are empty. 5417 */ 5418 if (vd->vdev_stat.vs_alloc != 0) 5419 return; 5420 5421 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 5422 5423 if (list_link_active(&vd->vdev_state_dirty_node)) 5424 vdev_state_clean(vd); 5425 if (list_link_active(&vd->vdev_config_dirty_node)) 5426 vdev_config_clean(vd); 5427 5428 vdev_free(vd); 5429 5430 if (last_vdev) { 5431 vdev_compact_children(rvd); 5432 } else { 5433 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 5434 vdev_add_child(rvd, vd); 5435 } 5436 vdev_config_dirty(rvd); 5437 5438 /* 5439 * Reassess the health of our root vdev. 5440 */ 5441 vdev_reopen(rvd); 5442 } 5443 5444 /* 5445 * Remove a device from the pool - 5446 * 5447 * Removing a device from the vdev namespace requires several steps 5448 * and can take a significant amount of time. As a result we use 5449 * the spa_vdev_config_[enter/exit] functions which allow us to 5450 * grab and release the spa_config_lock while still holding the namespace 5451 * lock. During each step the configuration is synced out. 5452 * 5453 * Currently, this supports removing only hot spares, slogs, and level 2 ARC 5454 * devices. 5455 */ 5456 int 5457 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 5458 { 5459 vdev_t *vd; 5460 sysevent_t *ev = NULL; 5461 metaslab_group_t *mg; 5462 nvlist_t **spares, **l2cache, *nv; 5463 uint64_t txg = 0; 5464 uint_t nspares, nl2cache; 5465 int error = 0; 5466 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 5467 5468 ASSERT(spa_writeable(spa)); 5469 5470 if (!locked) 5471 txg = spa_vdev_enter(spa); 5472 5473 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 5474 5475 if (spa->spa_spares.sav_vdevs != NULL && 5476 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5477 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 5478 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 5479 /* 5480 * Only remove the hot spare if it's not currently in use 5481 * in this pool. 5482 */ 5483 if (vd == NULL || unspare) { 5484 if (vd == NULL) 5485 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 5486 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX); 5487 spa_vdev_remove_aux(spa->spa_spares.sav_config, 5488 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 5489 spa_load_spares(spa); 5490 spa->spa_spares.sav_sync = B_TRUE; 5491 } else { 5492 error = SET_ERROR(EBUSY); 5493 } 5494 } else if (spa->spa_l2cache.sav_vdevs != NULL && 5495 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5496 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 5497 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 5498 /* 5499 * Cache devices can always be removed. 5500 */ 5501 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 5502 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_AUX); 5503 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 5504 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 5505 spa_load_l2cache(spa); 5506 spa->spa_l2cache.sav_sync = B_TRUE; 5507 } else if (vd != NULL && vd->vdev_islog) { 5508 ASSERT(!locked); 5509 ASSERT(vd == vd->vdev_top); 5510 5511 mg = vd->vdev_mg; 5512 5513 /* 5514 * Stop allocating from this vdev. 5515 */ 5516 metaslab_group_passivate(mg); 5517 5518 /* 5519 * Wait for the youngest allocations and frees to sync, 5520 * and then wait for the deferral of those frees to finish. 5521 */ 5522 spa_vdev_config_exit(spa, NULL, 5523 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 5524 5525 /* 5526 * Attempt to evacuate the vdev. 5527 */ 5528 error = spa_vdev_remove_evacuate(spa, vd); 5529 5530 txg = spa_vdev_config_enter(spa); 5531 5532 /* 5533 * If we couldn't evacuate the vdev, unwind. 5534 */ 5535 if (error) { 5536 metaslab_group_activate(mg); 5537 return (spa_vdev_exit(spa, NULL, txg, error)); 5538 } 5539 5540 /* 5541 * Clean up the vdev namespace. 5542 */ 5543 ev = spa_event_create(spa, vd, ESC_ZFS_VDEV_REMOVE_DEV); 5544 spa_vdev_remove_from_namespace(spa, vd); 5545 5546 } else if (vd != NULL) { 5547 /* 5548 * Normal vdevs cannot be removed (yet). 5549 */ 5550 error = SET_ERROR(ENOTSUP); 5551 } else { 5552 /* 5553 * There is no vdev of any kind with the specified guid. 5554 */ 5555 error = SET_ERROR(ENOENT); 5556 } 5557 5558 if (!locked) 5559 error = spa_vdev_exit(spa, NULL, txg, error); 5560 5561 if (ev) 5562 spa_event_post(ev); 5563 5564 return (error); 5565 } 5566 5567 /* 5568 * Find any device that's done replacing, or a vdev marked 'unspare' that's 5569 * currently spared, so we can detach it. 5570 */ 5571 static vdev_t * 5572 spa_vdev_resilver_done_hunt(vdev_t *vd) 5573 { 5574 vdev_t *newvd, *oldvd; 5575 5576 for (int c = 0; c < vd->vdev_children; c++) { 5577 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 5578 if (oldvd != NULL) 5579 return (oldvd); 5580 } 5581 5582 /* 5583 * Check for a completed replacement. We always consider the first 5584 * vdev in the list to be the oldest vdev, and the last one to be 5585 * the newest (see spa_vdev_attach() for how that works). In 5586 * the case where the newest vdev is faulted, we will not automatically 5587 * remove it after a resilver completes. This is OK as it will require 5588 * user intervention to determine which disk the admin wishes to keep. 5589 */ 5590 if (vd->vdev_ops == &vdev_replacing_ops) { 5591 ASSERT(vd->vdev_children > 1); 5592 5593 newvd = vd->vdev_child[vd->vdev_children - 1]; 5594 oldvd = vd->vdev_child[0]; 5595 5596 if (vdev_dtl_empty(newvd, DTL_MISSING) && 5597 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5598 !vdev_dtl_required(oldvd)) 5599 return (oldvd); 5600 } 5601 5602 /* 5603 * Check for a completed resilver with the 'unspare' flag set. 5604 */ 5605 if (vd->vdev_ops == &vdev_spare_ops) { 5606 vdev_t *first = vd->vdev_child[0]; 5607 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 5608 5609 if (last->vdev_unspare) { 5610 oldvd = first; 5611 newvd = last; 5612 } else if (first->vdev_unspare) { 5613 oldvd = last; 5614 newvd = first; 5615 } else { 5616 oldvd = NULL; 5617 } 5618 5619 if (oldvd != NULL && 5620 vdev_dtl_empty(newvd, DTL_MISSING) && 5621 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5622 !vdev_dtl_required(oldvd)) 5623 return (oldvd); 5624 5625 /* 5626 * If there are more than two spares attached to a disk, 5627 * and those spares are not required, then we want to 5628 * attempt to free them up now so that they can be used 5629 * by other pools. Once we're back down to a single 5630 * disk+spare, we stop removing them. 5631 */ 5632 if (vd->vdev_children > 2) { 5633 newvd = vd->vdev_child[1]; 5634 5635 if (newvd->vdev_isspare && last->vdev_isspare && 5636 vdev_dtl_empty(last, DTL_MISSING) && 5637 vdev_dtl_empty(last, DTL_OUTAGE) && 5638 !vdev_dtl_required(newvd)) 5639 return (newvd); 5640 } 5641 } 5642 5643 return (NULL); 5644 } 5645 5646 static void 5647 spa_vdev_resilver_done(spa_t *spa) 5648 { 5649 vdev_t *vd, *pvd, *ppvd; 5650 uint64_t guid, sguid, pguid, ppguid; 5651 5652 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5653 5654 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 5655 pvd = vd->vdev_parent; 5656 ppvd = pvd->vdev_parent; 5657 guid = vd->vdev_guid; 5658 pguid = pvd->vdev_guid; 5659 ppguid = ppvd->vdev_guid; 5660 sguid = 0; 5661 /* 5662 * If we have just finished replacing a hot spared device, then 5663 * we need to detach the parent's first child (the original hot 5664 * spare) as well. 5665 */ 5666 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 5667 ppvd->vdev_children == 2) { 5668 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 5669 sguid = ppvd->vdev_child[1]->vdev_guid; 5670 } 5671 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 5672 5673 spa_config_exit(spa, SCL_ALL, FTAG); 5674 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 5675 return; 5676 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 5677 return; 5678 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5679 } 5680 5681 spa_config_exit(spa, SCL_ALL, FTAG); 5682 } 5683 5684 /* 5685 * Update the stored path or FRU for this vdev. 5686 */ 5687 int 5688 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 5689 boolean_t ispath) 5690 { 5691 vdev_t *vd; 5692 boolean_t sync = B_FALSE; 5693 5694 ASSERT(spa_writeable(spa)); 5695 5696 spa_vdev_state_enter(spa, SCL_ALL); 5697 5698 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 5699 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 5700 5701 if (!vd->vdev_ops->vdev_op_leaf) 5702 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 5703 5704 if (ispath) { 5705 if (strcmp(value, vd->vdev_path) != 0) { 5706 spa_strfree(vd->vdev_path); 5707 vd->vdev_path = spa_strdup(value); 5708 sync = B_TRUE; 5709 } 5710 } else { 5711 if (vd->vdev_fru == NULL) { 5712 vd->vdev_fru = spa_strdup(value); 5713 sync = B_TRUE; 5714 } else if (strcmp(value, vd->vdev_fru) != 0) { 5715 spa_strfree(vd->vdev_fru); 5716 vd->vdev_fru = spa_strdup(value); 5717 sync = B_TRUE; 5718 } 5719 } 5720 5721 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 5722 } 5723 5724 int 5725 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 5726 { 5727 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 5728 } 5729 5730 int 5731 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 5732 { 5733 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 5734 } 5735 5736 /* 5737 * ========================================================================== 5738 * SPA Scanning 5739 * ========================================================================== 5740 */ 5741 5742 int 5743 spa_scan_stop(spa_t *spa) 5744 { 5745 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5746 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 5747 return (SET_ERROR(EBUSY)); 5748 return (dsl_scan_cancel(spa->spa_dsl_pool)); 5749 } 5750 5751 int 5752 spa_scan(spa_t *spa, pool_scan_func_t func) 5753 { 5754 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5755 5756 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 5757 return (SET_ERROR(ENOTSUP)); 5758 5759 /* 5760 * If a resilver was requested, but there is no DTL on a 5761 * writeable leaf device, we have nothing to do. 5762 */ 5763 if (func == POOL_SCAN_RESILVER && 5764 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 5765 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 5766 return (0); 5767 } 5768 5769 return (dsl_scan(spa->spa_dsl_pool, func)); 5770 } 5771 5772 /* 5773 * ========================================================================== 5774 * SPA async task processing 5775 * ========================================================================== 5776 */ 5777 5778 static void 5779 spa_async_remove(spa_t *spa, vdev_t *vd) 5780 { 5781 if (vd->vdev_remove_wanted) { 5782 vd->vdev_remove_wanted = B_FALSE; 5783 vd->vdev_delayed_close = B_FALSE; 5784 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 5785 5786 /* 5787 * We want to clear the stats, but we don't want to do a full 5788 * vdev_clear() as that will cause us to throw away 5789 * degraded/faulted state as well as attempt to reopen the 5790 * device, all of which is a waste. 5791 */ 5792 vd->vdev_stat.vs_read_errors = 0; 5793 vd->vdev_stat.vs_write_errors = 0; 5794 vd->vdev_stat.vs_checksum_errors = 0; 5795 5796 vdev_state_dirty(vd->vdev_top); 5797 } 5798 5799 for (int c = 0; c < vd->vdev_children; c++) 5800 spa_async_remove(spa, vd->vdev_child[c]); 5801 } 5802 5803 static void 5804 spa_async_probe(spa_t *spa, vdev_t *vd) 5805 { 5806 if (vd->vdev_probe_wanted) { 5807 vd->vdev_probe_wanted = B_FALSE; 5808 vdev_reopen(vd); /* vdev_open() does the actual probe */ 5809 } 5810 5811 for (int c = 0; c < vd->vdev_children; c++) 5812 spa_async_probe(spa, vd->vdev_child[c]); 5813 } 5814 5815 static void 5816 spa_async_autoexpand(spa_t *spa, vdev_t *vd) 5817 { 5818 sysevent_id_t eid; 5819 nvlist_t *attr; 5820 char *physpath; 5821 5822 if (!spa->spa_autoexpand) 5823 return; 5824 5825 for (int c = 0; c < vd->vdev_children; c++) { 5826 vdev_t *cvd = vd->vdev_child[c]; 5827 spa_async_autoexpand(spa, cvd); 5828 } 5829 5830 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 5831 return; 5832 5833 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5834 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 5835 5836 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5837 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 5838 5839 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 5840 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 5841 5842 nvlist_free(attr); 5843 kmem_free(physpath, MAXPATHLEN); 5844 } 5845 5846 static void 5847 spa_async_thread(spa_t *spa) 5848 { 5849 int tasks; 5850 5851 ASSERT(spa->spa_sync_on); 5852 5853 mutex_enter(&spa->spa_async_lock); 5854 tasks = spa->spa_async_tasks; 5855 spa->spa_async_tasks = 0; 5856 mutex_exit(&spa->spa_async_lock); 5857 5858 /* 5859 * See if the config needs to be updated. 5860 */ 5861 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 5862 uint64_t old_space, new_space; 5863 5864 mutex_enter(&spa_namespace_lock); 5865 old_space = metaslab_class_get_space(spa_normal_class(spa)); 5866 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5867 new_space = metaslab_class_get_space(spa_normal_class(spa)); 5868 mutex_exit(&spa_namespace_lock); 5869 5870 /* 5871 * If the pool grew as a result of the config update, 5872 * then log an internal history event. 5873 */ 5874 if (new_space != old_space) { 5875 spa_history_log_internal(spa, "vdev online", NULL, 5876 "pool '%s' size: %llu(+%llu)", 5877 spa_name(spa), new_space, new_space - old_space); 5878 } 5879 } 5880 5881 /* 5882 * See if any devices need to be marked REMOVED. 5883 */ 5884 if (tasks & SPA_ASYNC_REMOVE) { 5885 spa_vdev_state_enter(spa, SCL_NONE); 5886 spa_async_remove(spa, spa->spa_root_vdev); 5887 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 5888 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 5889 for (int i = 0; i < spa->spa_spares.sav_count; i++) 5890 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 5891 (void) spa_vdev_state_exit(spa, NULL, 0); 5892 } 5893 5894 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 5895 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5896 spa_async_autoexpand(spa, spa->spa_root_vdev); 5897 spa_config_exit(spa, SCL_CONFIG, FTAG); 5898 } 5899 5900 /* 5901 * See if any devices need to be probed. 5902 */ 5903 if (tasks & SPA_ASYNC_PROBE) { 5904 spa_vdev_state_enter(spa, SCL_NONE); 5905 spa_async_probe(spa, spa->spa_root_vdev); 5906 (void) spa_vdev_state_exit(spa, NULL, 0); 5907 } 5908 5909 /* 5910 * If any devices are done replacing, detach them. 5911 */ 5912 if (tasks & SPA_ASYNC_RESILVER_DONE) 5913 spa_vdev_resilver_done(spa); 5914 5915 /* 5916 * Kick off a resilver. 5917 */ 5918 if (tasks & SPA_ASYNC_RESILVER) 5919 dsl_resilver_restart(spa->spa_dsl_pool, 0); 5920 5921 /* 5922 * Let the world know that we're done. 5923 */ 5924 mutex_enter(&spa->spa_async_lock); 5925 spa->spa_async_thread = NULL; 5926 cv_broadcast(&spa->spa_async_cv); 5927 mutex_exit(&spa->spa_async_lock); 5928 thread_exit(); 5929 } 5930 5931 void 5932 spa_async_suspend(spa_t *spa) 5933 { 5934 mutex_enter(&spa->spa_async_lock); 5935 spa->spa_async_suspended++; 5936 while (spa->spa_async_thread != NULL) 5937 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 5938 mutex_exit(&spa->spa_async_lock); 5939 } 5940 5941 void 5942 spa_async_resume(spa_t *spa) 5943 { 5944 mutex_enter(&spa->spa_async_lock); 5945 ASSERT(spa->spa_async_suspended != 0); 5946 spa->spa_async_suspended--; 5947 mutex_exit(&spa->spa_async_lock); 5948 } 5949 5950 static boolean_t 5951 spa_async_tasks_pending(spa_t *spa) 5952 { 5953 uint_t non_config_tasks; 5954 uint_t config_task; 5955 boolean_t config_task_suspended; 5956 5957 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 5958 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 5959 if (spa->spa_ccw_fail_time == 0) { 5960 config_task_suspended = B_FALSE; 5961 } else { 5962 config_task_suspended = 5963 (gethrtime() - spa->spa_ccw_fail_time) < 5964 (zfs_ccw_retry_interval * NANOSEC); 5965 } 5966 5967 return (non_config_tasks || (config_task && !config_task_suspended)); 5968 } 5969 5970 static void 5971 spa_async_dispatch(spa_t *spa) 5972 { 5973 mutex_enter(&spa->spa_async_lock); 5974 if (spa_async_tasks_pending(spa) && 5975 !spa->spa_async_suspended && 5976 spa->spa_async_thread == NULL && 5977 rootdir != NULL) 5978 spa->spa_async_thread = thread_create(NULL, 0, 5979 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 5980 mutex_exit(&spa->spa_async_lock); 5981 } 5982 5983 void 5984 spa_async_request(spa_t *spa, int task) 5985 { 5986 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 5987 mutex_enter(&spa->spa_async_lock); 5988 spa->spa_async_tasks |= task; 5989 mutex_exit(&spa->spa_async_lock); 5990 } 5991 5992 /* 5993 * ========================================================================== 5994 * SPA syncing routines 5995 * ========================================================================== 5996 */ 5997 5998 static int 5999 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6000 { 6001 bpobj_t *bpo = arg; 6002 bpobj_enqueue(bpo, bp, tx); 6003 return (0); 6004 } 6005 6006 static int 6007 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6008 { 6009 zio_t *zio = arg; 6010 6011 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 6012 zio->io_flags)); 6013 return (0); 6014 } 6015 6016 /* 6017 * Note: this simple function is not inlined to make it easier to dtrace the 6018 * amount of time spent syncing frees. 6019 */ 6020 static void 6021 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 6022 { 6023 zio_t *zio = zio_root(spa, NULL, NULL, 0); 6024 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 6025 VERIFY(zio_wait(zio) == 0); 6026 } 6027 6028 /* 6029 * Note: this simple function is not inlined to make it easier to dtrace the 6030 * amount of time spent syncing deferred frees. 6031 */ 6032 static void 6033 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 6034 { 6035 zio_t *zio = zio_root(spa, NULL, NULL, 0); 6036 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 6037 spa_free_sync_cb, zio, tx), ==, 0); 6038 VERIFY0(zio_wait(zio)); 6039 } 6040 6041 6042 static void 6043 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 6044 { 6045 char *packed = NULL; 6046 size_t bufsize; 6047 size_t nvsize = 0; 6048 dmu_buf_t *db; 6049 6050 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 6051 6052 /* 6053 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 6054 * information. This avoids the dmu_buf_will_dirty() path and 6055 * saves us a pre-read to get data we don't actually care about. 6056 */ 6057 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 6058 packed = kmem_alloc(bufsize, KM_SLEEP); 6059 6060 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 6061 KM_SLEEP) == 0); 6062 bzero(packed + nvsize, bufsize - nvsize); 6063 6064 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 6065 6066 kmem_free(packed, bufsize); 6067 6068 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 6069 dmu_buf_will_dirty(db, tx); 6070 *(uint64_t *)db->db_data = nvsize; 6071 dmu_buf_rele(db, FTAG); 6072 } 6073 6074 static void 6075 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 6076 const char *config, const char *entry) 6077 { 6078 nvlist_t *nvroot; 6079 nvlist_t **list; 6080 int i; 6081 6082 if (!sav->sav_sync) 6083 return; 6084 6085 /* 6086 * Update the MOS nvlist describing the list of available devices. 6087 * spa_validate_aux() will have already made sure this nvlist is 6088 * valid and the vdevs are labeled appropriately. 6089 */ 6090 if (sav->sav_object == 0) { 6091 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 6092 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 6093 sizeof (uint64_t), tx); 6094 VERIFY(zap_update(spa->spa_meta_objset, 6095 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 6096 &sav->sav_object, tx) == 0); 6097 } 6098 6099 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 6100 if (sav->sav_count == 0) { 6101 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 6102 } else { 6103 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 6104 for (i = 0; i < sav->sav_count; i++) 6105 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 6106 B_FALSE, VDEV_CONFIG_L2CACHE); 6107 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 6108 sav->sav_count) == 0); 6109 for (i = 0; i < sav->sav_count; i++) 6110 nvlist_free(list[i]); 6111 kmem_free(list, sav->sav_count * sizeof (void *)); 6112 } 6113 6114 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 6115 nvlist_free(nvroot); 6116 6117 sav->sav_sync = B_FALSE; 6118 } 6119 6120 /* 6121 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t. 6122 * The all-vdev ZAP must be empty. 6123 */ 6124 static void 6125 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) 6126 { 6127 spa_t *spa = vd->vdev_spa; 6128 if (vd->vdev_top_zap != 0) { 6129 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 6130 vd->vdev_top_zap, tx)); 6131 } 6132 if (vd->vdev_leaf_zap != 0) { 6133 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 6134 vd->vdev_leaf_zap, tx)); 6135 } 6136 for (uint64_t i = 0; i < vd->vdev_children; i++) { 6137 spa_avz_build(vd->vdev_child[i], avz, tx); 6138 } 6139 } 6140 6141 static void 6142 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 6143 { 6144 nvlist_t *config; 6145 6146 /* 6147 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS, 6148 * its config may not be dirty but we still need to build per-vdev ZAPs. 6149 * Similarly, if the pool is being assembled (e.g. after a split), we 6150 * need to rebuild the AVZ although the config may not be dirty. 6151 */ 6152 if (list_is_empty(&spa->spa_config_dirty_list) && 6153 spa->spa_avz_action == AVZ_ACTION_NONE) 6154 return; 6155 6156 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6157 6158 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE || 6159 spa->spa_all_vdev_zaps != 0); 6160 6161 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { 6162 /* Make and build the new AVZ */ 6163 uint64_t new_avz = zap_create(spa->spa_meta_objset, 6164 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); 6165 spa_avz_build(spa->spa_root_vdev, new_avz, tx); 6166 6167 /* Diff old AVZ with new one */ 6168 zap_cursor_t zc; 6169 zap_attribute_t za; 6170 6171 for (zap_cursor_init(&zc, spa->spa_meta_objset, 6172 spa->spa_all_vdev_zaps); 6173 zap_cursor_retrieve(&zc, &za) == 0; 6174 zap_cursor_advance(&zc)) { 6175 uint64_t vdzap = za.za_first_integer; 6176 if (zap_lookup_int(spa->spa_meta_objset, new_avz, 6177 vdzap) == ENOENT) { 6178 /* 6179 * ZAP is listed in old AVZ but not in new one; 6180 * destroy it 6181 */ 6182 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap, 6183 tx)); 6184 } 6185 } 6186 6187 zap_cursor_fini(&zc); 6188 6189 /* Destroy the old AVZ */ 6190 VERIFY0(zap_destroy(spa->spa_meta_objset, 6191 spa->spa_all_vdev_zaps, tx)); 6192 6193 /* Replace the old AVZ in the dir obj with the new one */ 6194 VERIFY0(zap_update(spa->spa_meta_objset, 6195 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, 6196 sizeof (new_avz), 1, &new_avz, tx)); 6197 6198 spa->spa_all_vdev_zaps = new_avz; 6199 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) { 6200 zap_cursor_t zc; 6201 zap_attribute_t za; 6202 6203 /* Walk through the AVZ and destroy all listed ZAPs */ 6204 for (zap_cursor_init(&zc, spa->spa_meta_objset, 6205 spa->spa_all_vdev_zaps); 6206 zap_cursor_retrieve(&zc, &za) == 0; 6207 zap_cursor_advance(&zc)) { 6208 uint64_t zap = za.za_first_integer; 6209 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx)); 6210 } 6211 6212 zap_cursor_fini(&zc); 6213 6214 /* Destroy and unlink the AVZ itself */ 6215 VERIFY0(zap_destroy(spa->spa_meta_objset, 6216 spa->spa_all_vdev_zaps, tx)); 6217 VERIFY0(zap_remove(spa->spa_meta_objset, 6218 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx)); 6219 spa->spa_all_vdev_zaps = 0; 6220 } 6221 6222 if (spa->spa_all_vdev_zaps == 0) { 6223 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset, 6224 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 6225 DMU_POOL_VDEV_ZAP_MAP, tx); 6226 } 6227 spa->spa_avz_action = AVZ_ACTION_NONE; 6228 6229 /* Create ZAPs for vdevs that don't have them. */ 6230 vdev_construct_zaps(spa->spa_root_vdev, tx); 6231 6232 config = spa_config_generate(spa, spa->spa_root_vdev, 6233 dmu_tx_get_txg(tx), B_FALSE); 6234 6235 /* 6236 * If we're upgrading the spa version then make sure that 6237 * the config object gets updated with the correct version. 6238 */ 6239 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 6240 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 6241 spa->spa_uberblock.ub_version); 6242 6243 spa_config_exit(spa, SCL_STATE, FTAG); 6244 6245 nvlist_free(spa->spa_config_syncing); 6246 spa->spa_config_syncing = config; 6247 6248 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 6249 } 6250 6251 static void 6252 spa_sync_version(void *arg, dmu_tx_t *tx) 6253 { 6254 uint64_t *versionp = arg; 6255 uint64_t version = *versionp; 6256 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6257 6258 /* 6259 * Setting the version is special cased when first creating the pool. 6260 */ 6261 ASSERT(tx->tx_txg != TXG_INITIAL); 6262 6263 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 6264 ASSERT(version >= spa_version(spa)); 6265 6266 spa->spa_uberblock.ub_version = version; 6267 vdev_config_dirty(spa->spa_root_vdev); 6268 spa_history_log_internal(spa, "set", tx, "version=%lld", version); 6269 } 6270 6271 /* 6272 * Set zpool properties. 6273 */ 6274 static void 6275 spa_sync_props(void *arg, dmu_tx_t *tx) 6276 { 6277 nvlist_t *nvp = arg; 6278 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6279 objset_t *mos = spa->spa_meta_objset; 6280 nvpair_t *elem = NULL; 6281 6282 mutex_enter(&spa->spa_props_lock); 6283 6284 while ((elem = nvlist_next_nvpair(nvp, elem))) { 6285 uint64_t intval; 6286 char *strval, *fname; 6287 zpool_prop_t prop; 6288 const char *propname; 6289 zprop_type_t proptype; 6290 spa_feature_t fid; 6291 6292 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 6293 case ZPROP_INVAL: 6294 /* 6295 * We checked this earlier in spa_prop_validate(). 6296 */ 6297 ASSERT(zpool_prop_feature(nvpair_name(elem))); 6298 6299 fname = strchr(nvpair_name(elem), '@') + 1; 6300 VERIFY0(zfeature_lookup_name(fname, &fid)); 6301 6302 spa_feature_enable(spa, fid, tx); 6303 spa_history_log_internal(spa, "set", tx, 6304 "%s=enabled", nvpair_name(elem)); 6305 break; 6306 6307 case ZPOOL_PROP_VERSION: 6308 intval = fnvpair_value_uint64(elem); 6309 /* 6310 * The version is synced seperatly before other 6311 * properties and should be correct by now. 6312 */ 6313 ASSERT3U(spa_version(spa), >=, intval); 6314 break; 6315 6316 case ZPOOL_PROP_ALTROOT: 6317 /* 6318 * 'altroot' is a non-persistent property. It should 6319 * have been set temporarily at creation or import time. 6320 */ 6321 ASSERT(spa->spa_root != NULL); 6322 break; 6323 6324 case ZPOOL_PROP_READONLY: 6325 case ZPOOL_PROP_CACHEFILE: 6326 /* 6327 * 'readonly' and 'cachefile' are also non-persisitent 6328 * properties. 6329 */ 6330 break; 6331 case ZPOOL_PROP_COMMENT: 6332 strval = fnvpair_value_string(elem); 6333 if (spa->spa_comment != NULL) 6334 spa_strfree(spa->spa_comment); 6335 spa->spa_comment = spa_strdup(strval); 6336 /* 6337 * We need to dirty the configuration on all the vdevs 6338 * so that their labels get updated. It's unnecessary 6339 * to do this for pool creation since the vdev's 6340 * configuratoin has already been dirtied. 6341 */ 6342 if (tx->tx_txg != TXG_INITIAL) 6343 vdev_config_dirty(spa->spa_root_vdev); 6344 spa_history_log_internal(spa, "set", tx, 6345 "%s=%s", nvpair_name(elem), strval); 6346 break; 6347 default: 6348 /* 6349 * Set pool property values in the poolprops mos object. 6350 */ 6351 if (spa->spa_pool_props_object == 0) { 6352 spa->spa_pool_props_object = 6353 zap_create_link(mos, DMU_OT_POOL_PROPS, 6354 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 6355 tx); 6356 } 6357 6358 /* normalize the property name */ 6359 propname = zpool_prop_to_name(prop); 6360 proptype = zpool_prop_get_type(prop); 6361 6362 if (nvpair_type(elem) == DATA_TYPE_STRING) { 6363 ASSERT(proptype == PROP_TYPE_STRING); 6364 strval = fnvpair_value_string(elem); 6365 VERIFY0(zap_update(mos, 6366 spa->spa_pool_props_object, propname, 6367 1, strlen(strval) + 1, strval, tx)); 6368 spa_history_log_internal(spa, "set", tx, 6369 "%s=%s", nvpair_name(elem), strval); 6370 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 6371 intval = fnvpair_value_uint64(elem); 6372 6373 if (proptype == PROP_TYPE_INDEX) { 6374 const char *unused; 6375 VERIFY0(zpool_prop_index_to_string( 6376 prop, intval, &unused)); 6377 } 6378 VERIFY0(zap_update(mos, 6379 spa->spa_pool_props_object, propname, 6380 8, 1, &intval, tx)); 6381 spa_history_log_internal(spa, "set", tx, 6382 "%s=%lld", nvpair_name(elem), intval); 6383 } else { 6384 ASSERT(0); /* not allowed */ 6385 } 6386 6387 switch (prop) { 6388 case ZPOOL_PROP_DELEGATION: 6389 spa->spa_delegation = intval; 6390 break; 6391 case ZPOOL_PROP_BOOTFS: 6392 spa->spa_bootfs = intval; 6393 break; 6394 case ZPOOL_PROP_FAILUREMODE: 6395 spa->spa_failmode = intval; 6396 break; 6397 case ZPOOL_PROP_AUTOEXPAND: 6398 spa->spa_autoexpand = intval; 6399 if (tx->tx_txg != TXG_INITIAL) 6400 spa_async_request(spa, 6401 SPA_ASYNC_AUTOEXPAND); 6402 break; 6403 case ZPOOL_PROP_DEDUPDITTO: 6404 spa->spa_dedup_ditto = intval; 6405 break; 6406 default: 6407 break; 6408 } 6409 } 6410 6411 } 6412 6413 mutex_exit(&spa->spa_props_lock); 6414 } 6415 6416 /* 6417 * Perform one-time upgrade on-disk changes. spa_version() does not 6418 * reflect the new version this txg, so there must be no changes this 6419 * txg to anything that the upgrade code depends on after it executes. 6420 * Therefore this must be called after dsl_pool_sync() does the sync 6421 * tasks. 6422 */ 6423 static void 6424 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 6425 { 6426 dsl_pool_t *dp = spa->spa_dsl_pool; 6427 6428 ASSERT(spa->spa_sync_pass == 1); 6429 6430 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 6431 6432 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 6433 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 6434 dsl_pool_create_origin(dp, tx); 6435 6436 /* Keeping the origin open increases spa_minref */ 6437 spa->spa_minref += 3; 6438 } 6439 6440 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 6441 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 6442 dsl_pool_upgrade_clones(dp, tx); 6443 } 6444 6445 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 6446 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 6447 dsl_pool_upgrade_dir_clones(dp, tx); 6448 6449 /* Keeping the freedir open increases spa_minref */ 6450 spa->spa_minref += 3; 6451 } 6452 6453 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 6454 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6455 spa_feature_create_zap_objects(spa, tx); 6456 } 6457 6458 /* 6459 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 6460 * when possibility to use lz4 compression for metadata was added 6461 * Old pools that have this feature enabled must be upgraded to have 6462 * this feature active 6463 */ 6464 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6465 boolean_t lz4_en = spa_feature_is_enabled(spa, 6466 SPA_FEATURE_LZ4_COMPRESS); 6467 boolean_t lz4_ac = spa_feature_is_active(spa, 6468 SPA_FEATURE_LZ4_COMPRESS); 6469 6470 if (lz4_en && !lz4_ac) 6471 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 6472 } 6473 6474 /* 6475 * If we haven't written the salt, do so now. Note that the 6476 * feature may not be activated yet, but that's fine since 6477 * the presence of this ZAP entry is backwards compatible. 6478 */ 6479 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 6480 DMU_POOL_CHECKSUM_SALT) == ENOENT) { 6481 VERIFY0(zap_add(spa->spa_meta_objset, 6482 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 6483 sizeof (spa->spa_cksum_salt.zcs_bytes), 6484 spa->spa_cksum_salt.zcs_bytes, tx)); 6485 } 6486 6487 rrw_exit(&dp->dp_config_rwlock, FTAG); 6488 } 6489 6490 /* 6491 * Sync the specified transaction group. New blocks may be dirtied as 6492 * part of the process, so we iterate until it converges. 6493 */ 6494 void 6495 spa_sync(spa_t *spa, uint64_t txg) 6496 { 6497 dsl_pool_t *dp = spa->spa_dsl_pool; 6498 objset_t *mos = spa->spa_meta_objset; 6499 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 6500 vdev_t *rvd = spa->spa_root_vdev; 6501 vdev_t *vd; 6502 dmu_tx_t *tx; 6503 int error; 6504 uint32_t max_queue_depth = zfs_vdev_async_write_max_active * 6505 zfs_vdev_queue_depth_pct / 100; 6506 6507 VERIFY(spa_writeable(spa)); 6508 6509 /* 6510 * Lock out configuration changes. 6511 */ 6512 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6513 6514 spa->spa_syncing_txg = txg; 6515 spa->spa_sync_pass = 0; 6516 6517 mutex_enter(&spa->spa_alloc_lock); 6518 VERIFY0(avl_numnodes(&spa->spa_alloc_tree)); 6519 mutex_exit(&spa->spa_alloc_lock); 6520 6521 /* 6522 * If there are any pending vdev state changes, convert them 6523 * into config changes that go out with this transaction group. 6524 */ 6525 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6526 while (list_head(&spa->spa_state_dirty_list) != NULL) { 6527 /* 6528 * We need the write lock here because, for aux vdevs, 6529 * calling vdev_config_dirty() modifies sav_config. 6530 * This is ugly and will become unnecessary when we 6531 * eliminate the aux vdev wart by integrating all vdevs 6532 * into the root vdev tree. 6533 */ 6534 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6535 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 6536 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 6537 vdev_state_clean(vd); 6538 vdev_config_dirty(vd); 6539 } 6540 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6541 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 6542 } 6543 spa_config_exit(spa, SCL_STATE, FTAG); 6544 6545 tx = dmu_tx_create_assigned(dp, txg); 6546 6547 spa->spa_sync_starttime = gethrtime(); 6548 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 6549 spa->spa_sync_starttime + spa->spa_deadman_synctime)); 6550 6551 /* 6552 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 6553 * set spa_deflate if we have no raid-z vdevs. 6554 */ 6555 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 6556 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 6557 int i; 6558 6559 for (i = 0; i < rvd->vdev_children; i++) { 6560 vd = rvd->vdev_child[i]; 6561 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 6562 break; 6563 } 6564 if (i == rvd->vdev_children) { 6565 spa->spa_deflate = TRUE; 6566 VERIFY(0 == zap_add(spa->spa_meta_objset, 6567 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 6568 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 6569 } 6570 } 6571 6572 /* 6573 * Set the top-level vdev's max queue depth. Evaluate each 6574 * top-level's async write queue depth in case it changed. 6575 * The max queue depth will not change in the middle of syncing 6576 * out this txg. 6577 */ 6578 uint64_t queue_depth_total = 0; 6579 for (int c = 0; c < rvd->vdev_children; c++) { 6580 vdev_t *tvd = rvd->vdev_child[c]; 6581 metaslab_group_t *mg = tvd->vdev_mg; 6582 6583 if (mg == NULL || mg->mg_class != spa_normal_class(spa) || 6584 !metaslab_group_initialized(mg)) 6585 continue; 6586 6587 /* 6588 * It is safe to do a lock-free check here because only async 6589 * allocations look at mg_max_alloc_queue_depth, and async 6590 * allocations all happen from spa_sync(). 6591 */ 6592 ASSERT0(refcount_count(&mg->mg_alloc_queue_depth)); 6593 mg->mg_max_alloc_queue_depth = max_queue_depth; 6594 queue_depth_total += mg->mg_max_alloc_queue_depth; 6595 } 6596 metaslab_class_t *mc = spa_normal_class(spa); 6597 ASSERT0(refcount_count(&mc->mc_alloc_slots)); 6598 mc->mc_alloc_max_slots = queue_depth_total; 6599 mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 6600 6601 ASSERT3U(mc->mc_alloc_max_slots, <=, 6602 max_queue_depth * rvd->vdev_children); 6603 6604 /* 6605 * Iterate to convergence. 6606 */ 6607 do { 6608 int pass = ++spa->spa_sync_pass; 6609 6610 spa_sync_config_object(spa, tx); 6611 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 6612 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 6613 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 6614 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 6615 spa_errlog_sync(spa, txg); 6616 dsl_pool_sync(dp, txg); 6617 6618 if (pass < zfs_sync_pass_deferred_free) { 6619 spa_sync_frees(spa, free_bpl, tx); 6620 } else { 6621 /* 6622 * We can not defer frees in pass 1, because 6623 * we sync the deferred frees later in pass 1. 6624 */ 6625 ASSERT3U(pass, >, 1); 6626 bplist_iterate(free_bpl, bpobj_enqueue_cb, 6627 &spa->spa_deferred_bpobj, tx); 6628 } 6629 6630 ddt_sync(spa, txg); 6631 dsl_scan_sync(dp, tx); 6632 6633 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 6634 vdev_sync(vd, txg); 6635 6636 if (pass == 1) { 6637 spa_sync_upgrades(spa, tx); 6638 ASSERT3U(txg, >=, 6639 spa->spa_uberblock.ub_rootbp.blk_birth); 6640 /* 6641 * Note: We need to check if the MOS is dirty 6642 * because we could have marked the MOS dirty 6643 * without updating the uberblock (e.g. if we 6644 * have sync tasks but no dirty user data). We 6645 * need to check the uberblock's rootbp because 6646 * it is updated if we have synced out dirty 6647 * data (though in this case the MOS will most 6648 * likely also be dirty due to second order 6649 * effects, we don't want to rely on that here). 6650 */ 6651 if (spa->spa_uberblock.ub_rootbp.blk_birth < txg && 6652 !dmu_objset_is_dirty(mos, txg)) { 6653 /* 6654 * Nothing changed on the first pass, 6655 * therefore this TXG is a no-op. Avoid 6656 * syncing deferred frees, so that we 6657 * can keep this TXG as a no-op. 6658 */ 6659 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, 6660 txg)); 6661 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 6662 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 6663 break; 6664 } 6665 spa_sync_deferred_frees(spa, tx); 6666 } 6667 6668 } while (dmu_objset_is_dirty(mos, txg)); 6669 6670 if (!list_is_empty(&spa->spa_config_dirty_list)) { 6671 /* 6672 * Make sure that the number of ZAPs for all the vdevs matches 6673 * the number of ZAPs in the per-vdev ZAP list. This only gets 6674 * called if the config is dirty; otherwise there may be 6675 * outstanding AVZ operations that weren't completed in 6676 * spa_sync_config_object. 6677 */ 6678 uint64_t all_vdev_zap_entry_count; 6679 ASSERT0(zap_count(spa->spa_meta_objset, 6680 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); 6681 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, 6682 all_vdev_zap_entry_count); 6683 } 6684 6685 /* 6686 * Rewrite the vdev configuration (which includes the uberblock) 6687 * to commit the transaction group. 6688 * 6689 * If there are no dirty vdevs, we sync the uberblock to a few 6690 * random top-level vdevs that are known to be visible in the 6691 * config cache (see spa_vdev_add() for a complete description). 6692 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 6693 */ 6694 for (;;) { 6695 /* 6696 * We hold SCL_STATE to prevent vdev open/close/etc. 6697 * while we're attempting to write the vdev labels. 6698 */ 6699 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6700 6701 if (list_is_empty(&spa->spa_config_dirty_list)) { 6702 vdev_t *svd[SPA_DVAS_PER_BP]; 6703 int svdcount = 0; 6704 int children = rvd->vdev_children; 6705 int c0 = spa_get_random(children); 6706 6707 for (int c = 0; c < children; c++) { 6708 vd = rvd->vdev_child[(c0 + c) % children]; 6709 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 6710 continue; 6711 svd[svdcount++] = vd; 6712 if (svdcount == SPA_DVAS_PER_BP) 6713 break; 6714 } 6715 error = vdev_config_sync(svd, svdcount, txg); 6716 } else { 6717 error = vdev_config_sync(rvd->vdev_child, 6718 rvd->vdev_children, txg); 6719 } 6720 6721 if (error == 0) 6722 spa->spa_last_synced_guid = rvd->vdev_guid; 6723 6724 spa_config_exit(spa, SCL_STATE, FTAG); 6725 6726 if (error == 0) 6727 break; 6728 zio_suspend(spa, NULL); 6729 zio_resume_wait(spa); 6730 } 6731 dmu_tx_commit(tx); 6732 6733 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 6734 6735 /* 6736 * Clear the dirty config list. 6737 */ 6738 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 6739 vdev_config_clean(vd); 6740 6741 /* 6742 * Now that the new config has synced transactionally, 6743 * let it become visible to the config cache. 6744 */ 6745 if (spa->spa_config_syncing != NULL) { 6746 spa_config_set(spa, spa->spa_config_syncing); 6747 spa->spa_config_txg = txg; 6748 spa->spa_config_syncing = NULL; 6749 } 6750 6751 dsl_pool_sync_done(dp, txg); 6752 6753 mutex_enter(&spa->spa_alloc_lock); 6754 VERIFY0(avl_numnodes(&spa->spa_alloc_tree)); 6755 mutex_exit(&spa->spa_alloc_lock); 6756 6757 /* 6758 * Update usable space statistics. 6759 */ 6760 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 6761 vdev_sync_done(vd, txg); 6762 6763 spa_update_dspace(spa); 6764 6765 /* 6766 * It had better be the case that we didn't dirty anything 6767 * since vdev_config_sync(). 6768 */ 6769 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 6770 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 6771 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 6772 6773 spa->spa_sync_pass = 0; 6774 6775 /* 6776 * Update the last synced uberblock here. We want to do this at 6777 * the end of spa_sync() so that consumers of spa_last_synced_txg() 6778 * will be guaranteed that all the processing associated with 6779 * that txg has been completed. 6780 */ 6781 spa->spa_ubsync = spa->spa_uberblock; 6782 spa_config_exit(spa, SCL_CONFIG, FTAG); 6783 6784 spa_handle_ignored_writes(spa); 6785 6786 /* 6787 * If any async tasks have been requested, kick them off. 6788 */ 6789 spa_async_dispatch(spa); 6790 } 6791 6792 /* 6793 * Sync all pools. We don't want to hold the namespace lock across these 6794 * operations, so we take a reference on the spa_t and drop the lock during the 6795 * sync. 6796 */ 6797 void 6798 spa_sync_allpools(void) 6799 { 6800 spa_t *spa = NULL; 6801 mutex_enter(&spa_namespace_lock); 6802 while ((spa = spa_next(spa)) != NULL) { 6803 if (spa_state(spa) != POOL_STATE_ACTIVE || 6804 !spa_writeable(spa) || spa_suspended(spa)) 6805 continue; 6806 spa_open_ref(spa, FTAG); 6807 mutex_exit(&spa_namespace_lock); 6808 txg_wait_synced(spa_get_dsl(spa), 0); 6809 mutex_enter(&spa_namespace_lock); 6810 spa_close(spa, FTAG); 6811 } 6812 mutex_exit(&spa_namespace_lock); 6813 } 6814 6815 /* 6816 * ========================================================================== 6817 * Miscellaneous routines 6818 * ========================================================================== 6819 */ 6820 6821 /* 6822 * Remove all pools in the system. 6823 */ 6824 void 6825 spa_evict_all(void) 6826 { 6827 spa_t *spa; 6828 6829 /* 6830 * Remove all cached state. All pools should be closed now, 6831 * so every spa in the AVL tree should be unreferenced. 6832 */ 6833 mutex_enter(&spa_namespace_lock); 6834 while ((spa = spa_next(NULL)) != NULL) { 6835 /* 6836 * Stop async tasks. The async thread may need to detach 6837 * a device that's been replaced, which requires grabbing 6838 * spa_namespace_lock, so we must drop it here. 6839 */ 6840 spa_open_ref(spa, FTAG); 6841 mutex_exit(&spa_namespace_lock); 6842 spa_async_suspend(spa); 6843 mutex_enter(&spa_namespace_lock); 6844 spa_close(spa, FTAG); 6845 6846 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6847 spa_unload(spa); 6848 spa_deactivate(spa); 6849 } 6850 spa_remove(spa); 6851 } 6852 mutex_exit(&spa_namespace_lock); 6853 } 6854 6855 vdev_t * 6856 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 6857 { 6858 vdev_t *vd; 6859 int i; 6860 6861 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 6862 return (vd); 6863 6864 if (aux) { 6865 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 6866 vd = spa->spa_l2cache.sav_vdevs[i]; 6867 if (vd->vdev_guid == guid) 6868 return (vd); 6869 } 6870 6871 for (i = 0; i < spa->spa_spares.sav_count; i++) { 6872 vd = spa->spa_spares.sav_vdevs[i]; 6873 if (vd->vdev_guid == guid) 6874 return (vd); 6875 } 6876 } 6877 6878 return (NULL); 6879 } 6880 6881 void 6882 spa_upgrade(spa_t *spa, uint64_t version) 6883 { 6884 ASSERT(spa_writeable(spa)); 6885 6886 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6887 6888 /* 6889 * This should only be called for a non-faulted pool, and since a 6890 * future version would result in an unopenable pool, this shouldn't be 6891 * possible. 6892 */ 6893 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 6894 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 6895 6896 spa->spa_uberblock.ub_version = version; 6897 vdev_config_dirty(spa->spa_root_vdev); 6898 6899 spa_config_exit(spa, SCL_ALL, FTAG); 6900 6901 txg_wait_synced(spa_get_dsl(spa), 0); 6902 } 6903 6904 boolean_t 6905 spa_has_spare(spa_t *spa, uint64_t guid) 6906 { 6907 int i; 6908 uint64_t spareguid; 6909 spa_aux_vdev_t *sav = &spa->spa_spares; 6910 6911 for (i = 0; i < sav->sav_count; i++) 6912 if (sav->sav_vdevs[i]->vdev_guid == guid) 6913 return (B_TRUE); 6914 6915 for (i = 0; i < sav->sav_npending; i++) { 6916 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 6917 &spareguid) == 0 && spareguid == guid) 6918 return (B_TRUE); 6919 } 6920 6921 return (B_FALSE); 6922 } 6923 6924 /* 6925 * Check if a pool has an active shared spare device. 6926 * Note: reference count of an active spare is 2, as a spare and as a replace 6927 */ 6928 static boolean_t 6929 spa_has_active_shared_spare(spa_t *spa) 6930 { 6931 int i, refcnt; 6932 uint64_t pool; 6933 spa_aux_vdev_t *sav = &spa->spa_spares; 6934 6935 for (i = 0; i < sav->sav_count; i++) { 6936 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 6937 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 6938 refcnt > 2) 6939 return (B_TRUE); 6940 } 6941 6942 return (B_FALSE); 6943 } 6944 6945 static sysevent_t * 6946 spa_event_create(spa_t *spa, vdev_t *vd, const char *name) 6947 { 6948 sysevent_t *ev = NULL; 6949 #ifdef _KERNEL 6950 sysevent_attr_list_t *attr = NULL; 6951 sysevent_value_t value; 6952 6953 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 6954 SE_SLEEP); 6955 ASSERT(ev != NULL); 6956 6957 value.value_type = SE_DATA_TYPE_STRING; 6958 value.value.sv_string = spa_name(spa); 6959 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 6960 goto done; 6961 6962 value.value_type = SE_DATA_TYPE_UINT64; 6963 value.value.sv_uint64 = spa_guid(spa); 6964 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 6965 goto done; 6966 6967 if (vd) { 6968 value.value_type = SE_DATA_TYPE_UINT64; 6969 value.value.sv_uint64 = vd->vdev_guid; 6970 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 6971 SE_SLEEP) != 0) 6972 goto done; 6973 6974 if (vd->vdev_path) { 6975 value.value_type = SE_DATA_TYPE_STRING; 6976 value.value.sv_string = vd->vdev_path; 6977 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 6978 &value, SE_SLEEP) != 0) 6979 goto done; 6980 } 6981 } 6982 6983 if (sysevent_attach_attributes(ev, attr) != 0) 6984 goto done; 6985 attr = NULL; 6986 6987 done: 6988 if (attr) 6989 sysevent_free_attr(attr); 6990 6991 #endif 6992 return (ev); 6993 } 6994 6995 static void 6996 spa_event_post(sysevent_t *ev) 6997 { 6998 #ifdef _KERNEL 6999 sysevent_id_t eid; 7000 7001 (void) log_sysevent(ev, SE_SLEEP, &eid); 7002 sysevent_free(ev); 7003 #endif 7004 } 7005 7006 /* 7007 * Post a sysevent corresponding to the given event. The 'name' must be one of 7008 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 7009 * filled in from the spa and (optionally) the vdev. This doesn't do anything 7010 * in the userland libzpool, as we don't want consumers to misinterpret ztest 7011 * or zdb as real changes. 7012 */ 7013 void 7014 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 7015 { 7016 spa_event_post(spa_event_create(spa, vd, name)); 7017 } 7018