xref: /freebsd/sys/contrib/openzfs/module/zfs/spa.c (revision 09af4bf2)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * CDDL HEADER START
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
10eda14cbcSMatt Macy  * See the License for the specific language governing permissions
11eda14cbcSMatt Macy  * and limitations under the License.
12eda14cbcSMatt Macy  *
13eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy  *
19eda14cbcSMatt Macy  * CDDL HEADER END
20eda14cbcSMatt Macy  */
21eda14cbcSMatt Macy 
22eda14cbcSMatt Macy /*
23eda14cbcSMatt Macy  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
242c48331dSMatt Macy  * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
25eda14cbcSMatt Macy  * Copyright (c) 2018, Nexenta Systems, Inc.  All rights reserved.
26eda14cbcSMatt Macy  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27eda14cbcSMatt Macy  * Copyright 2013 Saso Kiselkov. All rights reserved.
28eda14cbcSMatt Macy  * Copyright (c) 2014 Integros [integros.com]
29eda14cbcSMatt Macy  * Copyright 2016 Toomas Soome <tsoome@me.com>
30eda14cbcSMatt Macy  * Copyright (c) 2016 Actifio, Inc. All rights reserved.
31eda14cbcSMatt Macy  * Copyright 2018 Joyent, Inc.
32eda14cbcSMatt Macy  * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
33eda14cbcSMatt Macy  * Copyright 2017 Joyent, Inc.
34eda14cbcSMatt Macy  * Copyright (c) 2017, Intel Corporation.
35ee36e25aSMartin Matuska  * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
364e8d558cSMartin Matuska  * Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
37eda14cbcSMatt Macy  */
38eda14cbcSMatt Macy 
39eda14cbcSMatt Macy /*
40eda14cbcSMatt Macy  * SPA: Storage Pool Allocator
41eda14cbcSMatt Macy  *
42eda14cbcSMatt Macy  * This file contains all the routines used when modifying on-disk SPA state.
43eda14cbcSMatt Macy  * This includes opening, importing, destroying, exporting a pool, and syncing a
44eda14cbcSMatt Macy  * pool.
45eda14cbcSMatt Macy  */
46eda14cbcSMatt Macy 
47eda14cbcSMatt Macy #include <sys/zfs_context.h>
48eda14cbcSMatt Macy #include <sys/fm/fs/zfs.h>
49eda14cbcSMatt Macy #include <sys/spa_impl.h>
50eda14cbcSMatt Macy #include <sys/zio.h>
51eda14cbcSMatt Macy #include <sys/zio_checksum.h>
52eda14cbcSMatt Macy #include <sys/dmu.h>
53eda14cbcSMatt Macy #include <sys/dmu_tx.h>
54eda14cbcSMatt Macy #include <sys/zap.h>
55eda14cbcSMatt Macy #include <sys/zil.h>
562a58b312SMartin Matuska #include <sys/brt.h>
57eda14cbcSMatt Macy #include <sys/ddt.h>
58eda14cbcSMatt Macy #include <sys/vdev_impl.h>
59eda14cbcSMatt Macy #include <sys/vdev_removal.h>
60eda14cbcSMatt Macy #include <sys/vdev_indirect_mapping.h>
61eda14cbcSMatt Macy #include <sys/vdev_indirect_births.h>
62eda14cbcSMatt Macy #include <sys/vdev_initialize.h>
63eda14cbcSMatt Macy #include <sys/vdev_rebuild.h>
64eda14cbcSMatt Macy #include <sys/vdev_trim.h>
65eda14cbcSMatt Macy #include <sys/vdev_disk.h>
66e716630dSMartin Matuska #include <sys/vdev_raidz.h>
677877fdebSMatt Macy #include <sys/vdev_draid.h>
68eda14cbcSMatt Macy #include <sys/metaslab.h>
69eda14cbcSMatt Macy #include <sys/metaslab_impl.h>
70eda14cbcSMatt Macy #include <sys/mmp.h>
71eda14cbcSMatt Macy #include <sys/uberblock_impl.h>
72eda14cbcSMatt Macy #include <sys/txg.h>
73eda14cbcSMatt Macy #include <sys/avl.h>
74eda14cbcSMatt Macy #include <sys/bpobj.h>
75eda14cbcSMatt Macy #include <sys/dmu_traverse.h>
76eda14cbcSMatt Macy #include <sys/dmu_objset.h>
77eda14cbcSMatt Macy #include <sys/unique.h>
78eda14cbcSMatt Macy #include <sys/dsl_pool.h>
79eda14cbcSMatt Macy #include <sys/dsl_dataset.h>
80eda14cbcSMatt Macy #include <sys/dsl_dir.h>
81eda14cbcSMatt Macy #include <sys/dsl_prop.h>
82eda14cbcSMatt Macy #include <sys/dsl_synctask.h>
83eda14cbcSMatt Macy #include <sys/fs/zfs.h>
84eda14cbcSMatt Macy #include <sys/arc.h>
85eda14cbcSMatt Macy #include <sys/callb.h>
86eda14cbcSMatt Macy #include <sys/systeminfo.h>
87eda14cbcSMatt Macy #include <sys/zfs_ioctl.h>
88eda14cbcSMatt Macy #include <sys/dsl_scan.h>
89eda14cbcSMatt Macy #include <sys/zfeature.h>
90eda14cbcSMatt Macy #include <sys/dsl_destroy.h>
91eda14cbcSMatt Macy #include <sys/zvol.h>
92eda14cbcSMatt Macy 
93eda14cbcSMatt Macy #ifdef	_KERNEL
94eda14cbcSMatt Macy #include <sys/fm/protocol.h>
95eda14cbcSMatt Macy #include <sys/fm/util.h>
96eda14cbcSMatt Macy #include <sys/callb.h>
97eda14cbcSMatt Macy #include <sys/zone.h>
98eda14cbcSMatt Macy #include <sys/vmsystm.h>
99eda14cbcSMatt Macy #endif	/* _KERNEL */
100eda14cbcSMatt Macy 
101eda14cbcSMatt Macy #include "zfs_prop.h"
102eda14cbcSMatt Macy #include "zfs_comutil.h"
10314c2e0a0SMartin Matuska #include <cityhash.h>
104eda14cbcSMatt Macy 
105eda14cbcSMatt Macy /*
1066c1e79dfSMartin Matuska  * spa_thread() existed on Illumos as a parent thread for the various worker
1076c1e79dfSMartin Matuska  * threads that actually run the pool, as a way to both reference the entire
1086c1e79dfSMartin Matuska  * pool work as a single object, and to share properties like scheduling
1096c1e79dfSMartin Matuska  * options. It has not yet been adapted to Linux or FreeBSD. This define is
1106c1e79dfSMartin Matuska  * used to mark related parts of the code to make things easier for the reader,
1116c1e79dfSMartin Matuska  * and to compile this code out. It can be removed when someone implements it,
1126c1e79dfSMartin Matuska  * moves it to some Illumos-specific place, or removes it entirely.
1136c1e79dfSMartin Matuska  */
1146c1e79dfSMartin Matuska #undef HAVE_SPA_THREAD
1156c1e79dfSMartin Matuska 
1166c1e79dfSMartin Matuska /*
1176c1e79dfSMartin Matuska  * The "System Duty Cycle" scheduling class is an Illumos feature to help
1186c1e79dfSMartin Matuska  * prevent CPU-intensive kernel threads from affecting latency on interactive
1196c1e79dfSMartin Matuska  * threads. It doesn't exist on Linux or FreeBSD, so the supporting code is
1206c1e79dfSMartin Matuska  * gated behind a define. On Illumos SDC depends on spa_thread(), but
1216c1e79dfSMartin Matuska  * spa_thread() also has other uses, so this is a separate define.
1226c1e79dfSMartin Matuska  */
1236c1e79dfSMartin Matuska #undef HAVE_SYSDC
1246c1e79dfSMartin Matuska 
1256c1e79dfSMartin Matuska /*
126eda14cbcSMatt Macy  * The interval, in seconds, at which failed configuration cache file writes
127eda14cbcSMatt Macy  * should be retried.
128eda14cbcSMatt Macy  */
129eda14cbcSMatt Macy int zfs_ccw_retry_interval = 300;
130eda14cbcSMatt Macy 
131eda14cbcSMatt Macy typedef enum zti_modes {
132eda14cbcSMatt Macy 	ZTI_MODE_FIXED,			/* value is # of threads (min 1) */
13316038816SMartin Matuska 	ZTI_MODE_SCALE,			/* Taskqs scale with CPUs. */
13414c2e0a0SMartin Matuska 	ZTI_MODE_SYNC,			/* sync thread assigned */
135eda14cbcSMatt Macy 	ZTI_MODE_NULL,			/* don't create a taskq */
136eda14cbcSMatt Macy 	ZTI_NMODES
137eda14cbcSMatt Macy } zti_modes_t;
138eda14cbcSMatt Macy 
139eda14cbcSMatt Macy #define	ZTI_P(n, q)	{ ZTI_MODE_FIXED, (n), (q) }
140eda14cbcSMatt Macy #define	ZTI_PCT(n)	{ ZTI_MODE_ONLINE_PERCENT, (n), 1 }
14116038816SMartin Matuska #define	ZTI_SCALE	{ ZTI_MODE_SCALE, 0, 1 }
14214c2e0a0SMartin Matuska #define	ZTI_SYNC	{ ZTI_MODE_SYNC, 0, 1 }
143eda14cbcSMatt Macy #define	ZTI_NULL	{ ZTI_MODE_NULL, 0, 0 }
144eda14cbcSMatt Macy 
145eda14cbcSMatt Macy #define	ZTI_N(n)	ZTI_P(n, 1)
146eda14cbcSMatt Macy #define	ZTI_ONE		ZTI_N(1)
147eda14cbcSMatt Macy 
148eda14cbcSMatt Macy typedef struct zio_taskq_info {
149eda14cbcSMatt Macy 	zti_modes_t zti_mode;
150eda14cbcSMatt Macy 	uint_t zti_value;
151eda14cbcSMatt Macy 	uint_t zti_count;
152eda14cbcSMatt Macy } zio_taskq_info_t;
153eda14cbcSMatt Macy 
154eda14cbcSMatt Macy static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
155eda14cbcSMatt Macy 	"iss", "iss_h", "int", "int_h"
156eda14cbcSMatt Macy };
157eda14cbcSMatt Macy 
158eda14cbcSMatt Macy /*
159eda14cbcSMatt Macy  * This table defines the taskq settings for each ZFS I/O type. When
160eda14cbcSMatt Macy  * initializing a pool, we use this table to create an appropriately sized
161eda14cbcSMatt Macy  * taskq. Some operations are low volume and therefore have a small, static
162eda14cbcSMatt Macy  * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
16314c2e0a0SMartin Matuska  * macros. Other operations process a large amount of data; the ZTI_SCALE
164eda14cbcSMatt Macy  * macro causes us to create a taskq oriented for throughput. Some operations
165eda14cbcSMatt Macy  * are so high frequency and short-lived that the taskq itself can become a
166eda14cbcSMatt Macy  * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
167eda14cbcSMatt Macy  * additional degree of parallelism specified by the number of threads per-
168eda14cbcSMatt Macy  * taskq and the number of taskqs; when dispatching an event in this case, the
16914c2e0a0SMartin Matuska  * particular taskq is chosen at random. ZTI_SCALE uses a number of taskqs
17014c2e0a0SMartin Matuska  * that scales with the number of CPUs.
171eda14cbcSMatt Macy  *
172eda14cbcSMatt Macy  * The different taskq priorities are to handle the different contexts (issue
173eda14cbcSMatt Macy  * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
174eda14cbcSMatt Macy  * need to be handled with minimum delay.
175eda14cbcSMatt Macy  */
176b356da80SMartin Matuska static zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
177eda14cbcSMatt Macy 	/* ISSUE	ISSUE_HIGH	INTR		INTR_HIGH */
178eda14cbcSMatt Macy 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* NULL */
17916038816SMartin Matuska 	{ ZTI_N(8),	ZTI_NULL,	ZTI_SCALE,	ZTI_NULL }, /* READ */
18014c2e0a0SMartin Matuska 	{ ZTI_SYNC,	ZTI_N(5),	ZTI_SCALE,	ZTI_N(5) }, /* WRITE */
18116038816SMartin Matuska 	{ ZTI_SCALE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* FREE */
182eda14cbcSMatt Macy 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* CLAIM */
183eda14cbcSMatt Macy 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* FLUSH */
184eda14cbcSMatt Macy 	{ ZTI_N(4),	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* TRIM */
185eda14cbcSMatt Macy };
186eda14cbcSMatt Macy 
187eda14cbcSMatt Macy static void spa_sync_version(void *arg, dmu_tx_t *tx);
188eda14cbcSMatt Macy static void spa_sync_props(void *arg, dmu_tx_t *tx);
189eda14cbcSMatt Macy static boolean_t spa_has_active_shared_spare(spa_t *spa);
190a0b956f5SMartin Matuska static int spa_load_impl(spa_t *spa, spa_import_type_t type,
191a0b956f5SMartin Matuska     const char **ereport);
192eda14cbcSMatt Macy static void spa_vdev_resilver_done(spa_t *spa);
193eda14cbcSMatt Macy 
194b2526e8bSMartin Matuska /*
195b2526e8bSMartin Matuska  * Percentage of all CPUs that can be used by the metaslab preload taskq.
196b2526e8bSMartin Matuska  */
197b2526e8bSMartin Matuska static uint_t metaslab_preload_pct = 50;
198b2526e8bSMartin Matuska 
199e92ffd9bSMartin Matuska static uint_t	zio_taskq_batch_pct = 80;	  /* 1 thread per cpu in pset */
200e92ffd9bSMartin Matuska static uint_t	zio_taskq_batch_tpq;		  /* threads per taskq */
2016c1e79dfSMartin Matuska 
2026c1e79dfSMartin Matuska #ifdef HAVE_SYSDC
203e92ffd9bSMartin Matuska static const boolean_t	zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
204e92ffd9bSMartin Matuska static const uint_t	zio_taskq_basedc = 80;	  /* base duty cycle */
2056c1e79dfSMartin Matuska #endif
206eda14cbcSMatt Macy 
2076c1e79dfSMartin Matuska #ifdef HAVE_SPA_THREAD
208e92ffd9bSMartin Matuska static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */
2096c1e79dfSMartin Matuska #endif
210eda14cbcSMatt Macy 
21114c2e0a0SMartin Matuska static uint_t	zio_taskq_write_tpq = 16;
21214c2e0a0SMartin Matuska 
213eda14cbcSMatt Macy /*
214eda14cbcSMatt Macy  * Report any spa_load_verify errors found, but do not fail spa_load.
215eda14cbcSMatt Macy  * This is used by zdb to analyze non-idle pools.
216eda14cbcSMatt Macy  */
217eda14cbcSMatt Macy boolean_t	spa_load_verify_dryrun = B_FALSE;
218eda14cbcSMatt Macy 
219eda14cbcSMatt Macy /*
22081b22a98SMartin Matuska  * Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ).
22181b22a98SMartin Matuska  * This is used by zdb for spacemaps verification.
22281b22a98SMartin Matuska  */
22381b22a98SMartin Matuska boolean_t	spa_mode_readable_spacemaps = B_FALSE;
22481b22a98SMartin Matuska 
22581b22a98SMartin Matuska /*
226eda14cbcSMatt Macy  * This (illegal) pool name is used when temporarily importing a spa_t in order
227eda14cbcSMatt Macy  * to get the vdev stats associated with the imported devices.
228eda14cbcSMatt Macy  */
229eda14cbcSMatt Macy #define	TRYIMPORT_NAME	"$import"
230eda14cbcSMatt Macy 
231eda14cbcSMatt Macy /*
232eda14cbcSMatt Macy  * For debugging purposes: print out vdev tree during pool import.
233eda14cbcSMatt Macy  */
234e92ffd9bSMartin Matuska static int		spa_load_print_vdev_tree = B_FALSE;
235eda14cbcSMatt Macy 
236eda14cbcSMatt Macy /*
237eda14cbcSMatt Macy  * A non-zero value for zfs_max_missing_tvds means that we allow importing
238eda14cbcSMatt Macy  * pools with missing top-level vdevs. This is strictly intended for advanced
239eda14cbcSMatt Macy  * pool recovery cases since missing data is almost inevitable. Pools with
240eda14cbcSMatt Macy  * missing devices can only be imported read-only for safety reasons, and their
241eda14cbcSMatt Macy  * fail-mode will be automatically set to "continue".
242eda14cbcSMatt Macy  *
243eda14cbcSMatt Macy  * With 1 missing vdev we should be able to import the pool and mount all
244eda14cbcSMatt Macy  * datasets. User data that was not modified after the missing device has been
245eda14cbcSMatt Macy  * added should be recoverable. This means that snapshots created prior to the
246eda14cbcSMatt Macy  * addition of that device should be completely intact.
247eda14cbcSMatt Macy  *
248eda14cbcSMatt Macy  * With 2 missing vdevs, some datasets may fail to mount since there are
249eda14cbcSMatt Macy  * dataset statistics that are stored as regular metadata. Some data might be
250eda14cbcSMatt Macy  * recoverable if those vdevs were added recently.
251eda14cbcSMatt Macy  *
252eda14cbcSMatt Macy  * With 3 or more missing vdevs, the pool is severely damaged and MOS entries
253eda14cbcSMatt Macy  * may be missing entirely. Chances of data recovery are very low. Note that
254eda14cbcSMatt Macy  * there are also risks of performing an inadvertent rewind as we might be
255eda14cbcSMatt Macy  * missing all the vdevs with the latest uberblocks.
256eda14cbcSMatt Macy  */
257dbd5678dSMartin Matuska uint64_t	zfs_max_missing_tvds = 0;
258eda14cbcSMatt Macy 
259eda14cbcSMatt Macy /*
260eda14cbcSMatt Macy  * The parameters below are similar to zfs_max_missing_tvds but are only
261eda14cbcSMatt Macy  * intended for a preliminary open of the pool with an untrusted config which
262eda14cbcSMatt Macy  * might be incomplete or out-dated.
263eda14cbcSMatt Macy  *
264eda14cbcSMatt Macy  * We are more tolerant for pools opened from a cachefile since we could have
265eda14cbcSMatt Macy  * an out-dated cachefile where a device removal was not registered.
266eda14cbcSMatt Macy  * We could have set the limit arbitrarily high but in the case where devices
267eda14cbcSMatt Macy  * are really missing we would want to return the proper error codes; we chose
268eda14cbcSMatt Macy  * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
269eda14cbcSMatt Macy  * and we get a chance to retrieve the trusted config.
270eda14cbcSMatt Macy  */
271eda14cbcSMatt Macy uint64_t	zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
272eda14cbcSMatt Macy 
273eda14cbcSMatt Macy /*
274eda14cbcSMatt Macy  * In the case where config was assembled by scanning device paths (/dev/dsks
275eda14cbcSMatt Macy  * by default) we are less tolerant since all the existing devices should have
276eda14cbcSMatt Macy  * been detected and we want spa_load to return the right error codes.
277eda14cbcSMatt Macy  */
278eda14cbcSMatt Macy uint64_t	zfs_max_missing_tvds_scan = 0;
279eda14cbcSMatt Macy 
280eda14cbcSMatt Macy /*
281eda14cbcSMatt Macy  * Debugging aid that pauses spa_sync() towards the end.
282eda14cbcSMatt Macy  */
283e92ffd9bSMartin Matuska static const boolean_t	zfs_pause_spa_sync = B_FALSE;
284eda14cbcSMatt Macy 
285eda14cbcSMatt Macy /*
286eda14cbcSMatt Macy  * Variables to indicate the livelist condense zthr func should wait at certain
287eda14cbcSMatt Macy  * points for the livelist to be removed - used to test condense/destroy races
288eda14cbcSMatt Macy  */
289e92ffd9bSMartin Matuska static int zfs_livelist_condense_zthr_pause = 0;
290e92ffd9bSMartin Matuska static int zfs_livelist_condense_sync_pause = 0;
291eda14cbcSMatt Macy 
292eda14cbcSMatt Macy /*
293eda14cbcSMatt Macy  * Variables to track whether or not condense cancellation has been
294eda14cbcSMatt Macy  * triggered in testing.
295eda14cbcSMatt Macy  */
296e92ffd9bSMartin Matuska static int zfs_livelist_condense_sync_cancel = 0;
297e92ffd9bSMartin Matuska static int zfs_livelist_condense_zthr_cancel = 0;
298eda14cbcSMatt Macy 
299eda14cbcSMatt Macy /*
300eda14cbcSMatt Macy  * Variable to track whether or not extra ALLOC blkptrs were added to a
301eda14cbcSMatt Macy  * livelist entry while it was being condensed (caused by the way we track
302eda14cbcSMatt Macy  * remapped blkptrs in dbuf_remap_impl)
303eda14cbcSMatt Macy  */
304e92ffd9bSMartin Matuska static int zfs_livelist_condense_new_alloc = 0;
305eda14cbcSMatt Macy 
306eda14cbcSMatt Macy /*
307eda14cbcSMatt Macy  * ==========================================================================
308eda14cbcSMatt Macy  * SPA properties routines
309eda14cbcSMatt Macy  * ==========================================================================
310eda14cbcSMatt Macy  */
311eda14cbcSMatt Macy 
312eda14cbcSMatt Macy /*
313eda14cbcSMatt Macy  * Add a (source=src, propname=propval) list to an nvlist.
314eda14cbcSMatt Macy  */
315eda14cbcSMatt Macy static void
spa_prop_add_list(nvlist_t * nvl,zpool_prop_t prop,const char * strval,uint64_t intval,zprop_source_t src)316a0b956f5SMartin Matuska spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval,
317eda14cbcSMatt Macy     uint64_t intval, zprop_source_t src)
318eda14cbcSMatt Macy {
319eda14cbcSMatt Macy 	const char *propname = zpool_prop_to_name(prop);
320eda14cbcSMatt Macy 	nvlist_t *propval;
321eda14cbcSMatt Macy 
32281b22a98SMartin Matuska 	propval = fnvlist_alloc();
32381b22a98SMartin Matuska 	fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
324eda14cbcSMatt Macy 
325eda14cbcSMatt Macy 	if (strval != NULL)
32681b22a98SMartin Matuska 		fnvlist_add_string(propval, ZPROP_VALUE, strval);
327eda14cbcSMatt Macy 	else
32881b22a98SMartin Matuska 		fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
329eda14cbcSMatt Macy 
33081b22a98SMartin Matuska 	fnvlist_add_nvlist(nvl, propname, propval);
331eda14cbcSMatt Macy 	nvlist_free(propval);
332eda14cbcSMatt Macy }
333eda14cbcSMatt Macy 
334eda14cbcSMatt Macy /*
335c98ecfceSAllan Jude  * Add a user property (source=src, propname=propval) to an nvlist.
336c98ecfceSAllan Jude  */
337c98ecfceSAllan Jude static void
spa_prop_add_user(nvlist_t * nvl,const char * propname,char * strval,zprop_source_t src)338c98ecfceSAllan Jude spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
339c98ecfceSAllan Jude     zprop_source_t src)
340c98ecfceSAllan Jude {
341c98ecfceSAllan Jude 	nvlist_t *propval;
342c98ecfceSAllan Jude 
343c98ecfceSAllan Jude 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
344c98ecfceSAllan Jude 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
345c98ecfceSAllan Jude 	VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
346c98ecfceSAllan Jude 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
347c98ecfceSAllan Jude 	nvlist_free(propval);
348c98ecfceSAllan Jude }
349c98ecfceSAllan Jude 
350c98ecfceSAllan Jude /*
351eda14cbcSMatt Macy  * Get property values from the spa configuration.
352eda14cbcSMatt Macy  */
353eda14cbcSMatt Macy static void
spa_prop_get_config(spa_t * spa,nvlist_t ** nvp)354eda14cbcSMatt Macy spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
355eda14cbcSMatt Macy {
356eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
357eda14cbcSMatt Macy 	dsl_pool_t *pool = spa->spa_dsl_pool;
358eda14cbcSMatt Macy 	uint64_t size, alloc, cap, version;
359eda14cbcSMatt Macy 	const zprop_source_t src = ZPROP_SRC_NONE;
360eda14cbcSMatt Macy 	spa_config_dirent_t *dp;
361eda14cbcSMatt Macy 	metaslab_class_t *mc = spa_normal_class(spa);
362eda14cbcSMatt Macy 
363eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
364eda14cbcSMatt Macy 
365eda14cbcSMatt Macy 	if (rvd != NULL) {
366eda14cbcSMatt Macy 		alloc = metaslab_class_get_alloc(mc);
367eda14cbcSMatt Macy 		alloc += metaslab_class_get_alloc(spa_special_class(spa));
368eda14cbcSMatt Macy 		alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
369184c1b94SMartin Matuska 		alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
370eda14cbcSMatt Macy 
371eda14cbcSMatt Macy 		size = metaslab_class_get_space(mc);
372eda14cbcSMatt Macy 		size += metaslab_class_get_space(spa_special_class(spa));
373eda14cbcSMatt Macy 		size += metaslab_class_get_space(spa_dedup_class(spa));
374184c1b94SMartin Matuska 		size += metaslab_class_get_space(spa_embedded_log_class(spa));
375eda14cbcSMatt Macy 
376eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
377eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
378eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
379eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
380eda14cbcSMatt Macy 		    size - alloc, src);
381eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
382eda14cbcSMatt Macy 		    spa->spa_checkpoint_info.sci_dspace, src);
383eda14cbcSMatt Macy 
384eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
385eda14cbcSMatt Macy 		    metaslab_class_fragmentation(mc), src);
386eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
387eda14cbcSMatt Macy 		    metaslab_class_expandable_space(mc), src);
388eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
389eda14cbcSMatt Macy 		    (spa_mode(spa) == SPA_MODE_READ), src);
390eda14cbcSMatt Macy 
391eda14cbcSMatt Macy 		cap = (size == 0) ? 0 : (alloc * 100 / size);
392eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
393eda14cbcSMatt Macy 
394eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
395eda14cbcSMatt Macy 		    ddt_get_pool_dedup_ratio(spa), src);
3962a58b312SMartin Matuska 		spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONEUSED, NULL,
3972a58b312SMartin Matuska 		    brt_get_used(spa), src);
3982a58b312SMartin Matuska 		spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONESAVED, NULL,
3992a58b312SMartin Matuska 		    brt_get_saved(spa), src);
4002a58b312SMartin Matuska 		spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONERATIO, NULL,
4012a58b312SMartin Matuska 		    brt_get_ratio(spa), src);
402eda14cbcSMatt Macy 
403eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
404eda14cbcSMatt Macy 		    rvd->vdev_state, src);
405eda14cbcSMatt Macy 
406eda14cbcSMatt Macy 		version = spa_version(spa);
407eda14cbcSMatt Macy 		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
408eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
409eda14cbcSMatt Macy 			    version, ZPROP_SRC_DEFAULT);
410eda14cbcSMatt Macy 		} else {
411eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
412eda14cbcSMatt Macy 			    version, ZPROP_SRC_LOCAL);
413eda14cbcSMatt Macy 		}
414eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
415eda14cbcSMatt Macy 		    NULL, spa_load_guid(spa), src);
416eda14cbcSMatt Macy 	}
417eda14cbcSMatt Macy 
418eda14cbcSMatt Macy 	if (pool != NULL) {
419eda14cbcSMatt Macy 		/*
420eda14cbcSMatt Macy 		 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
421eda14cbcSMatt Macy 		 * when opening pools before this version freedir will be NULL.
422eda14cbcSMatt Macy 		 */
423eda14cbcSMatt Macy 		if (pool->dp_free_dir != NULL) {
424eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
425eda14cbcSMatt Macy 			    dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
426eda14cbcSMatt Macy 			    src);
427eda14cbcSMatt Macy 		} else {
428eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
429eda14cbcSMatt Macy 			    NULL, 0, src);
430eda14cbcSMatt Macy 		}
431eda14cbcSMatt Macy 
432eda14cbcSMatt Macy 		if (pool->dp_leak_dir != NULL) {
433eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
434eda14cbcSMatt Macy 			    dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
435eda14cbcSMatt Macy 			    src);
436eda14cbcSMatt Macy 		} else {
437eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
438eda14cbcSMatt Macy 			    NULL, 0, src);
439eda14cbcSMatt Macy 		}
440eda14cbcSMatt Macy 	}
441eda14cbcSMatt Macy 
442eda14cbcSMatt Macy 	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
443eda14cbcSMatt Macy 
444eda14cbcSMatt Macy 	if (spa->spa_comment != NULL) {
445eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
446eda14cbcSMatt Macy 		    0, ZPROP_SRC_LOCAL);
447eda14cbcSMatt Macy 	}
448eda14cbcSMatt Macy 
449ee36e25aSMartin Matuska 	if (spa->spa_compatibility != NULL) {
450ee36e25aSMartin Matuska 		spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
451ee36e25aSMartin Matuska 		    spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
452ee36e25aSMartin Matuska 	}
453ee36e25aSMartin Matuska 
454eda14cbcSMatt Macy 	if (spa->spa_root != NULL)
455eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
456eda14cbcSMatt Macy 		    0, ZPROP_SRC_LOCAL);
457eda14cbcSMatt Macy 
458eda14cbcSMatt Macy 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
459eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
460eda14cbcSMatt Macy 		    MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
461eda14cbcSMatt Macy 	} else {
462eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
463eda14cbcSMatt Macy 		    SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
464eda14cbcSMatt Macy 	}
465eda14cbcSMatt Macy 
466eda14cbcSMatt Macy 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
467eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
468eda14cbcSMatt Macy 		    DNODE_MAX_SIZE, ZPROP_SRC_NONE);
469eda14cbcSMatt Macy 	} else {
470eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
471eda14cbcSMatt Macy 		    DNODE_MIN_SIZE, ZPROP_SRC_NONE);
472eda14cbcSMatt Macy 	}
473eda14cbcSMatt Macy 
474eda14cbcSMatt Macy 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
475eda14cbcSMatt Macy 		if (dp->scd_path == NULL) {
476eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
477eda14cbcSMatt Macy 			    "none", 0, ZPROP_SRC_LOCAL);
478eda14cbcSMatt Macy 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
479eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
480eda14cbcSMatt Macy 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
481eda14cbcSMatt Macy 		}
482eda14cbcSMatt Macy 	}
483eda14cbcSMatt Macy }
484eda14cbcSMatt Macy 
485eda14cbcSMatt Macy /*
486eda14cbcSMatt Macy  * Get zpool property values.
487eda14cbcSMatt Macy  */
488eda14cbcSMatt Macy int
spa_prop_get(spa_t * spa,nvlist_t ** nvp)489eda14cbcSMatt Macy spa_prop_get(spa_t *spa, nvlist_t **nvp)
490eda14cbcSMatt Macy {
491eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
492eda14cbcSMatt Macy 	zap_cursor_t zc;
493eda14cbcSMatt Macy 	zap_attribute_t za;
494eda14cbcSMatt Macy 	dsl_pool_t *dp;
495eda14cbcSMatt Macy 	int err;
496eda14cbcSMatt Macy 
497eda14cbcSMatt Macy 	err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
498eda14cbcSMatt Macy 	if (err)
499eda14cbcSMatt Macy 		return (err);
500eda14cbcSMatt Macy 
501eda14cbcSMatt Macy 	dp = spa_get_dsl(spa);
502eda14cbcSMatt Macy 	dsl_pool_config_enter(dp, FTAG);
503eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);
504eda14cbcSMatt Macy 
505eda14cbcSMatt Macy 	/*
506eda14cbcSMatt Macy 	 * Get properties from the spa config.
507eda14cbcSMatt Macy 	 */
508eda14cbcSMatt Macy 	spa_prop_get_config(spa, nvp);
509eda14cbcSMatt Macy 
510eda14cbcSMatt Macy 	/* If no pool property object, no more prop to get. */
511eda14cbcSMatt Macy 	if (mos == NULL || spa->spa_pool_props_object == 0)
512eda14cbcSMatt Macy 		goto out;
513eda14cbcSMatt Macy 
514eda14cbcSMatt Macy 	/*
515eda14cbcSMatt Macy 	 * Get properties from the MOS pool property object.
516eda14cbcSMatt Macy 	 */
517eda14cbcSMatt Macy 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
518eda14cbcSMatt Macy 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
519eda14cbcSMatt Macy 	    zap_cursor_advance(&zc)) {
520eda14cbcSMatt Macy 		uint64_t intval = 0;
521eda14cbcSMatt Macy 		char *strval = NULL;
522eda14cbcSMatt Macy 		zprop_source_t src = ZPROP_SRC_DEFAULT;
523eda14cbcSMatt Macy 		zpool_prop_t prop;
524eda14cbcSMatt Macy 
525c98ecfceSAllan Jude 		if ((prop = zpool_name_to_prop(za.za_name)) ==
526c98ecfceSAllan Jude 		    ZPOOL_PROP_INVAL && !zfs_prop_user(za.za_name))
527eda14cbcSMatt Macy 			continue;
528eda14cbcSMatt Macy 
529eda14cbcSMatt Macy 		switch (za.za_integer_length) {
530eda14cbcSMatt Macy 		case 8:
531eda14cbcSMatt Macy 			/* integer property */
532eda14cbcSMatt Macy 			if (za.za_first_integer !=
533eda14cbcSMatt Macy 			    zpool_prop_default_numeric(prop))
534eda14cbcSMatt Macy 				src = ZPROP_SRC_LOCAL;
535eda14cbcSMatt Macy 
536eda14cbcSMatt Macy 			if (prop == ZPOOL_PROP_BOOTFS) {
537eda14cbcSMatt Macy 				dsl_dataset_t *ds = NULL;
538eda14cbcSMatt Macy 
539eda14cbcSMatt Macy 				err = dsl_dataset_hold_obj(dp,
540eda14cbcSMatt Macy 				    za.za_first_integer, FTAG, &ds);
541eda14cbcSMatt Macy 				if (err != 0)
542eda14cbcSMatt Macy 					break;
543eda14cbcSMatt Macy 
544eda14cbcSMatt Macy 				strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
545eda14cbcSMatt Macy 				    KM_SLEEP);
546eda14cbcSMatt Macy 				dsl_dataset_name(ds, strval);
547eda14cbcSMatt Macy 				dsl_dataset_rele(ds, FTAG);
548eda14cbcSMatt Macy 			} else {
549eda14cbcSMatt Macy 				strval = NULL;
550eda14cbcSMatt Macy 				intval = za.za_first_integer;
551eda14cbcSMatt Macy 			}
552eda14cbcSMatt Macy 
553eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, prop, strval, intval, src);
554eda14cbcSMatt Macy 
555eda14cbcSMatt Macy 			if (strval != NULL)
556eda14cbcSMatt Macy 				kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
557eda14cbcSMatt Macy 
558eda14cbcSMatt Macy 			break;
559eda14cbcSMatt Macy 
560eda14cbcSMatt Macy 		case 1:
561eda14cbcSMatt Macy 			/* string property */
562eda14cbcSMatt Macy 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
563eda14cbcSMatt Macy 			err = zap_lookup(mos, spa->spa_pool_props_object,
564eda14cbcSMatt Macy 			    za.za_name, 1, za.za_num_integers, strval);
565eda14cbcSMatt Macy 			if (err) {
566eda14cbcSMatt Macy 				kmem_free(strval, za.za_num_integers);
567eda14cbcSMatt Macy 				break;
568eda14cbcSMatt Macy 			}
569c98ecfceSAllan Jude 			if (prop != ZPOOL_PROP_INVAL) {
570eda14cbcSMatt Macy 				spa_prop_add_list(*nvp, prop, strval, 0, src);
571c98ecfceSAllan Jude 			} else {
572c98ecfceSAllan Jude 				src = ZPROP_SRC_LOCAL;
573c98ecfceSAllan Jude 				spa_prop_add_user(*nvp, za.za_name, strval,
574c98ecfceSAllan Jude 				    src);
575c98ecfceSAllan Jude 			}
576eda14cbcSMatt Macy 			kmem_free(strval, za.za_num_integers);
577eda14cbcSMatt Macy 			break;
578eda14cbcSMatt Macy 
579eda14cbcSMatt Macy 		default:
580eda14cbcSMatt Macy 			break;
581eda14cbcSMatt Macy 		}
582eda14cbcSMatt Macy 	}
583eda14cbcSMatt Macy 	zap_cursor_fini(&zc);
584eda14cbcSMatt Macy out:
585eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
586eda14cbcSMatt Macy 	dsl_pool_config_exit(dp, FTAG);
587eda14cbcSMatt Macy 	if (err && err != ENOENT) {
588eda14cbcSMatt Macy 		nvlist_free(*nvp);
589eda14cbcSMatt Macy 		*nvp = NULL;
590eda14cbcSMatt Macy 		return (err);
591eda14cbcSMatt Macy 	}
592eda14cbcSMatt Macy 
593eda14cbcSMatt Macy 	return (0);
594eda14cbcSMatt Macy }
595eda14cbcSMatt Macy 
596eda14cbcSMatt Macy /*
597eda14cbcSMatt Macy  * Validate the given pool properties nvlist and modify the list
598eda14cbcSMatt Macy  * for the property values to be set.
599eda14cbcSMatt Macy  */
600eda14cbcSMatt Macy static int
spa_prop_validate(spa_t * spa,nvlist_t * props)601eda14cbcSMatt Macy spa_prop_validate(spa_t *spa, nvlist_t *props)
602eda14cbcSMatt Macy {
603eda14cbcSMatt Macy 	nvpair_t *elem;
604eda14cbcSMatt Macy 	int error = 0, reset_bootfs = 0;
605eda14cbcSMatt Macy 	uint64_t objnum = 0;
606eda14cbcSMatt Macy 	boolean_t has_feature = B_FALSE;
607eda14cbcSMatt Macy 
608eda14cbcSMatt Macy 	elem = NULL;
609eda14cbcSMatt Macy 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
610eda14cbcSMatt Macy 		uint64_t intval;
6112a58b312SMartin Matuska 		const char *strval, *slash, *check, *fname;
612eda14cbcSMatt Macy 		const char *propname = nvpair_name(elem);
613eda14cbcSMatt Macy 		zpool_prop_t prop = zpool_name_to_prop(propname);
614eda14cbcSMatt Macy 
615eda14cbcSMatt Macy 		switch (prop) {
616eda14cbcSMatt Macy 		case ZPOOL_PROP_INVAL:
617eda14cbcSMatt Macy 			/*
618eda14cbcSMatt Macy 			 * Sanitize the input.
619eda14cbcSMatt Macy 			 */
620c98ecfceSAllan Jude 			if (zfs_prop_user(propname)) {
621c98ecfceSAllan Jude 				if (strlen(propname) >= ZAP_MAXNAMELEN) {
622c98ecfceSAllan Jude 					error = SET_ERROR(ENAMETOOLONG);
623c98ecfceSAllan Jude 					break;
624c98ecfceSAllan Jude 				}
625c98ecfceSAllan Jude 
626c98ecfceSAllan Jude 				if (strlen(fnvpair_value_string(elem)) >=
627c98ecfceSAllan Jude 				    ZAP_MAXVALUELEN) {
628c98ecfceSAllan Jude 					error = SET_ERROR(E2BIG);
629c98ecfceSAllan Jude 					break;
630c98ecfceSAllan Jude 				}
631c98ecfceSAllan Jude 			} else if (zpool_prop_feature(propname)) {
632eda14cbcSMatt Macy 				if (nvpair_type(elem) != DATA_TYPE_UINT64) {
633eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
634eda14cbcSMatt Macy 					break;
635eda14cbcSMatt Macy 				}
636eda14cbcSMatt Macy 
637eda14cbcSMatt Macy 				if (nvpair_value_uint64(elem, &intval) != 0) {
638eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
639eda14cbcSMatt Macy 					break;
640eda14cbcSMatt Macy 				}
641eda14cbcSMatt Macy 
642eda14cbcSMatt Macy 				if (intval != 0) {
643eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
644eda14cbcSMatt Macy 					break;
645eda14cbcSMatt Macy 				}
646eda14cbcSMatt Macy 
647eda14cbcSMatt Macy 				fname = strchr(propname, '@') + 1;
648eda14cbcSMatt Macy 				if (zfeature_lookup_name(fname, NULL) != 0) {
649eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
650eda14cbcSMatt Macy 					break;
651eda14cbcSMatt Macy 				}
652eda14cbcSMatt Macy 
653eda14cbcSMatt Macy 				has_feature = B_TRUE;
654c98ecfceSAllan Jude 			} else {
655c98ecfceSAllan Jude 				error = SET_ERROR(EINVAL);
656c98ecfceSAllan Jude 				break;
657c98ecfceSAllan Jude 			}
658eda14cbcSMatt Macy 			break;
659eda14cbcSMatt Macy 
660eda14cbcSMatt Macy 		case ZPOOL_PROP_VERSION:
661eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
662eda14cbcSMatt Macy 			if (!error &&
663eda14cbcSMatt Macy 			    (intval < spa_version(spa) ||
664eda14cbcSMatt Macy 			    intval > SPA_VERSION_BEFORE_FEATURES ||
665eda14cbcSMatt Macy 			    has_feature))
666eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
667eda14cbcSMatt Macy 			break;
668eda14cbcSMatt Macy 
669eda14cbcSMatt Macy 		case ZPOOL_PROP_DELEGATION:
670eda14cbcSMatt Macy 		case ZPOOL_PROP_AUTOREPLACE:
671eda14cbcSMatt Macy 		case ZPOOL_PROP_LISTSNAPS:
672eda14cbcSMatt Macy 		case ZPOOL_PROP_AUTOEXPAND:
673eda14cbcSMatt Macy 		case ZPOOL_PROP_AUTOTRIM:
674eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
675eda14cbcSMatt Macy 			if (!error && intval > 1)
676eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
677eda14cbcSMatt Macy 			break;
678eda14cbcSMatt Macy 
679eda14cbcSMatt Macy 		case ZPOOL_PROP_MULTIHOST:
680eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
681eda14cbcSMatt Macy 			if (!error && intval > 1)
682eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
683eda14cbcSMatt Macy 
684eda14cbcSMatt Macy 			if (!error) {
685eda14cbcSMatt Macy 				uint32_t hostid = zone_get_hostid(NULL);
686eda14cbcSMatt Macy 				if (hostid)
687eda14cbcSMatt Macy 					spa->spa_hostid = hostid;
688eda14cbcSMatt Macy 				else
689eda14cbcSMatt Macy 					error = SET_ERROR(ENOTSUP);
690eda14cbcSMatt Macy 			}
691eda14cbcSMatt Macy 
692eda14cbcSMatt Macy 			break;
693eda14cbcSMatt Macy 
694eda14cbcSMatt Macy 		case ZPOOL_PROP_BOOTFS:
695eda14cbcSMatt Macy 			/*
696eda14cbcSMatt Macy 			 * If the pool version is less than SPA_VERSION_BOOTFS,
697eda14cbcSMatt Macy 			 * or the pool is still being created (version == 0),
698eda14cbcSMatt Macy 			 * the bootfs property cannot be set.
699eda14cbcSMatt Macy 			 */
700eda14cbcSMatt Macy 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
701eda14cbcSMatt Macy 				error = SET_ERROR(ENOTSUP);
702eda14cbcSMatt Macy 				break;
703eda14cbcSMatt Macy 			}
704eda14cbcSMatt Macy 
705eda14cbcSMatt Macy 			/*
706eda14cbcSMatt Macy 			 * Make sure the vdev config is bootable
707eda14cbcSMatt Macy 			 */
708eda14cbcSMatt Macy 			if (!vdev_is_bootable(spa->spa_root_vdev)) {
709eda14cbcSMatt Macy 				error = SET_ERROR(ENOTSUP);
710eda14cbcSMatt Macy 				break;
711eda14cbcSMatt Macy 			}
712eda14cbcSMatt Macy 
713eda14cbcSMatt Macy 			reset_bootfs = 1;
714eda14cbcSMatt Macy 
715eda14cbcSMatt Macy 			error = nvpair_value_string(elem, &strval);
716eda14cbcSMatt Macy 
717eda14cbcSMatt Macy 			if (!error) {
718eda14cbcSMatt Macy 				objset_t *os;
719eda14cbcSMatt Macy 
720eda14cbcSMatt Macy 				if (strval == NULL || strval[0] == '\0') {
721eda14cbcSMatt Macy 					objnum = zpool_prop_default_numeric(
722eda14cbcSMatt Macy 					    ZPOOL_PROP_BOOTFS);
723eda14cbcSMatt Macy 					break;
724eda14cbcSMatt Macy 				}
725eda14cbcSMatt Macy 
726eda14cbcSMatt Macy 				error = dmu_objset_hold(strval, FTAG, &os);
727eda14cbcSMatt Macy 				if (error != 0)
728eda14cbcSMatt Macy 					break;
729eda14cbcSMatt Macy 
730eda14cbcSMatt Macy 				/* Must be ZPL. */
731eda14cbcSMatt Macy 				if (dmu_objset_type(os) != DMU_OST_ZFS) {
732eda14cbcSMatt Macy 					error = SET_ERROR(ENOTSUP);
733eda14cbcSMatt Macy 				} else {
734eda14cbcSMatt Macy 					objnum = dmu_objset_id(os);
735eda14cbcSMatt Macy 				}
736eda14cbcSMatt Macy 				dmu_objset_rele(os, FTAG);
737eda14cbcSMatt Macy 			}
738eda14cbcSMatt Macy 			break;
739eda14cbcSMatt Macy 
740eda14cbcSMatt Macy 		case ZPOOL_PROP_FAILUREMODE:
741eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
742eda14cbcSMatt Macy 			if (!error && intval > ZIO_FAILURE_MODE_PANIC)
743eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
744eda14cbcSMatt Macy 
745eda14cbcSMatt Macy 			/*
746eda14cbcSMatt Macy 			 * This is a special case which only occurs when
747eda14cbcSMatt Macy 			 * the pool has completely failed. This allows
748eda14cbcSMatt Macy 			 * the user to change the in-core failmode property
749eda14cbcSMatt Macy 			 * without syncing it out to disk (I/Os might
750eda14cbcSMatt Macy 			 * currently be blocked). We do this by returning
751eda14cbcSMatt Macy 			 * EIO to the caller (spa_prop_set) to trick it
752eda14cbcSMatt Macy 			 * into thinking we encountered a property validation
753eda14cbcSMatt Macy 			 * error.
754eda14cbcSMatt Macy 			 */
755eda14cbcSMatt Macy 			if (!error && spa_suspended(spa)) {
756eda14cbcSMatt Macy 				spa->spa_failmode = intval;
757eda14cbcSMatt Macy 				error = SET_ERROR(EIO);
758eda14cbcSMatt Macy 			}
759eda14cbcSMatt Macy 			break;
760eda14cbcSMatt Macy 
761eda14cbcSMatt Macy 		case ZPOOL_PROP_CACHEFILE:
762eda14cbcSMatt Macy 			if ((error = nvpair_value_string(elem, &strval)) != 0)
763eda14cbcSMatt Macy 				break;
764eda14cbcSMatt Macy 
765eda14cbcSMatt Macy 			if (strval[0] == '\0')
766eda14cbcSMatt Macy 				break;
767eda14cbcSMatt Macy 
768eda14cbcSMatt Macy 			if (strcmp(strval, "none") == 0)
769eda14cbcSMatt Macy 				break;
770eda14cbcSMatt Macy 
771eda14cbcSMatt Macy 			if (strval[0] != '/') {
772eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
773eda14cbcSMatt Macy 				break;
774eda14cbcSMatt Macy 			}
775eda14cbcSMatt Macy 
776eda14cbcSMatt Macy 			slash = strrchr(strval, '/');
777eda14cbcSMatt Macy 			ASSERT(slash != NULL);
778eda14cbcSMatt Macy 
779eda14cbcSMatt Macy 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
780eda14cbcSMatt Macy 			    strcmp(slash, "/..") == 0)
781eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
782eda14cbcSMatt Macy 			break;
783eda14cbcSMatt Macy 
784eda14cbcSMatt Macy 		case ZPOOL_PROP_COMMENT:
785eda14cbcSMatt Macy 			if ((error = nvpair_value_string(elem, &strval)) != 0)
786eda14cbcSMatt Macy 				break;
787eda14cbcSMatt Macy 			for (check = strval; *check != '\0'; check++) {
788eda14cbcSMatt Macy 				if (!isprint(*check)) {
789eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
790eda14cbcSMatt Macy 					break;
791eda14cbcSMatt Macy 				}
792eda14cbcSMatt Macy 			}
793eda14cbcSMatt Macy 			if (strlen(strval) > ZPROP_MAX_COMMENT)
794eda14cbcSMatt Macy 				error = SET_ERROR(E2BIG);
795eda14cbcSMatt Macy 			break;
796eda14cbcSMatt Macy 
797eda14cbcSMatt Macy 		default:
798eda14cbcSMatt Macy 			break;
799eda14cbcSMatt Macy 		}
800eda14cbcSMatt Macy 
801eda14cbcSMatt Macy 		if (error)
802eda14cbcSMatt Macy 			break;
803eda14cbcSMatt Macy 	}
804eda14cbcSMatt Macy 
805eda14cbcSMatt Macy 	(void) nvlist_remove_all(props,
806eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
807eda14cbcSMatt Macy 
808eda14cbcSMatt Macy 	if (!error && reset_bootfs) {
809eda14cbcSMatt Macy 		error = nvlist_remove(props,
810eda14cbcSMatt Macy 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
811eda14cbcSMatt Macy 
812eda14cbcSMatt Macy 		if (!error) {
813eda14cbcSMatt Macy 			error = nvlist_add_uint64(props,
814eda14cbcSMatt Macy 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
815eda14cbcSMatt Macy 		}
816eda14cbcSMatt Macy 	}
817eda14cbcSMatt Macy 
818eda14cbcSMatt Macy 	return (error);
819eda14cbcSMatt Macy }
820eda14cbcSMatt Macy 
821eda14cbcSMatt Macy void
spa_configfile_set(spa_t * spa,nvlist_t * nvp,boolean_t need_sync)822eda14cbcSMatt Macy spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
823eda14cbcSMatt Macy {
8242a58b312SMartin Matuska 	const char *cachefile;
825eda14cbcSMatt Macy 	spa_config_dirent_t *dp;
826eda14cbcSMatt Macy 
827eda14cbcSMatt Macy 	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
828eda14cbcSMatt Macy 	    &cachefile) != 0)
829eda14cbcSMatt Macy 		return;
830eda14cbcSMatt Macy 
831eda14cbcSMatt Macy 	dp = kmem_alloc(sizeof (spa_config_dirent_t),
832eda14cbcSMatt Macy 	    KM_SLEEP);
833eda14cbcSMatt Macy 
834eda14cbcSMatt Macy 	if (cachefile[0] == '\0')
835eda14cbcSMatt Macy 		dp->scd_path = spa_strdup(spa_config_path);
836eda14cbcSMatt Macy 	else if (strcmp(cachefile, "none") == 0)
837eda14cbcSMatt Macy 		dp->scd_path = NULL;
838eda14cbcSMatt Macy 	else
839eda14cbcSMatt Macy 		dp->scd_path = spa_strdup(cachefile);
840eda14cbcSMatt Macy 
841eda14cbcSMatt Macy 	list_insert_head(&spa->spa_config_list, dp);
842eda14cbcSMatt Macy 	if (need_sync)
843eda14cbcSMatt Macy 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
844eda14cbcSMatt Macy }
845eda14cbcSMatt Macy 
846eda14cbcSMatt Macy int
spa_prop_set(spa_t * spa,nvlist_t * nvp)847eda14cbcSMatt Macy spa_prop_set(spa_t *spa, nvlist_t *nvp)
848eda14cbcSMatt Macy {
849eda14cbcSMatt Macy 	int error;
850eda14cbcSMatt Macy 	nvpair_t *elem = NULL;
851eda14cbcSMatt Macy 	boolean_t need_sync = B_FALSE;
852eda14cbcSMatt Macy 
853eda14cbcSMatt Macy 	if ((error = spa_prop_validate(spa, nvp)) != 0)
854eda14cbcSMatt Macy 		return (error);
855eda14cbcSMatt Macy 
856eda14cbcSMatt Macy 	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
857eda14cbcSMatt Macy 		zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
858eda14cbcSMatt Macy 
859eda14cbcSMatt Macy 		if (prop == ZPOOL_PROP_CACHEFILE ||
860eda14cbcSMatt Macy 		    prop == ZPOOL_PROP_ALTROOT ||
861eda14cbcSMatt Macy 		    prop == ZPOOL_PROP_READONLY)
862eda14cbcSMatt Macy 			continue;
863eda14cbcSMatt Macy 
864c98ecfceSAllan Jude 		if (prop == ZPOOL_PROP_INVAL &&
865c98ecfceSAllan Jude 		    zfs_prop_user(nvpair_name(elem))) {
866c98ecfceSAllan Jude 			need_sync = B_TRUE;
867c98ecfceSAllan Jude 			break;
868c98ecfceSAllan Jude 		}
869c98ecfceSAllan Jude 
870eda14cbcSMatt Macy 		if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
871681ce946SMartin Matuska 			uint64_t ver = 0;
872eda14cbcSMatt Macy 
873eda14cbcSMatt Macy 			if (prop == ZPOOL_PROP_VERSION) {
874eda14cbcSMatt Macy 				VERIFY(nvpair_value_uint64(elem, &ver) == 0);
875eda14cbcSMatt Macy 			} else {
876eda14cbcSMatt Macy 				ASSERT(zpool_prop_feature(nvpair_name(elem)));
877eda14cbcSMatt Macy 				ver = SPA_VERSION_FEATURES;
878eda14cbcSMatt Macy 				need_sync = B_TRUE;
879eda14cbcSMatt Macy 			}
880eda14cbcSMatt Macy 
881eda14cbcSMatt Macy 			/* Save time if the version is already set. */
882eda14cbcSMatt Macy 			if (ver == spa_version(spa))
883eda14cbcSMatt Macy 				continue;
884eda14cbcSMatt Macy 
885eda14cbcSMatt Macy 			/*
886eda14cbcSMatt Macy 			 * In addition to the pool directory object, we might
887eda14cbcSMatt Macy 			 * create the pool properties object, the features for
888eda14cbcSMatt Macy 			 * read object, the features for write object, or the
889eda14cbcSMatt Macy 			 * feature descriptions object.
890eda14cbcSMatt Macy 			 */
891eda14cbcSMatt Macy 			error = dsl_sync_task(spa->spa_name, NULL,
892eda14cbcSMatt Macy 			    spa_sync_version, &ver,
893eda14cbcSMatt Macy 			    6, ZFS_SPACE_CHECK_RESERVED);
894eda14cbcSMatt Macy 			if (error)
895eda14cbcSMatt Macy 				return (error);
896eda14cbcSMatt Macy 			continue;
897eda14cbcSMatt Macy 		}
898eda14cbcSMatt Macy 
899eda14cbcSMatt Macy 		need_sync = B_TRUE;
900eda14cbcSMatt Macy 		break;
901eda14cbcSMatt Macy 	}
902eda14cbcSMatt Macy 
903eda14cbcSMatt Macy 	if (need_sync) {
904eda14cbcSMatt Macy 		return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
905eda14cbcSMatt Macy 		    nvp, 6, ZFS_SPACE_CHECK_RESERVED));
906eda14cbcSMatt Macy 	}
907eda14cbcSMatt Macy 
908eda14cbcSMatt Macy 	return (0);
909eda14cbcSMatt Macy }
910eda14cbcSMatt Macy 
911eda14cbcSMatt Macy /*
912eda14cbcSMatt Macy  * If the bootfs property value is dsobj, clear it.
913eda14cbcSMatt Macy  */
914eda14cbcSMatt Macy void
spa_prop_clear_bootfs(spa_t * spa,uint64_t dsobj,dmu_tx_t * tx)915eda14cbcSMatt Macy spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
916eda14cbcSMatt Macy {
917eda14cbcSMatt Macy 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
918eda14cbcSMatt Macy 		VERIFY(zap_remove(spa->spa_meta_objset,
919eda14cbcSMatt Macy 		    spa->spa_pool_props_object,
920eda14cbcSMatt Macy 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
921eda14cbcSMatt Macy 		spa->spa_bootfs = 0;
922eda14cbcSMatt Macy 	}
923eda14cbcSMatt Macy }
924eda14cbcSMatt Macy 
925eda14cbcSMatt Macy static int
spa_change_guid_check(void * arg,dmu_tx_t * tx)926eda14cbcSMatt Macy spa_change_guid_check(void *arg, dmu_tx_t *tx)
927eda14cbcSMatt Macy {
928eda14cbcSMatt Macy 	uint64_t *newguid __maybe_unused = arg;
929eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
930eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
931eda14cbcSMatt Macy 	uint64_t vdev_state;
932eda14cbcSMatt Macy 
933eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
934eda14cbcSMatt Macy 		int error = (spa_has_checkpoint(spa)) ?
935eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
936eda14cbcSMatt Macy 		return (SET_ERROR(error));
937eda14cbcSMatt Macy 	}
938eda14cbcSMatt Macy 
939eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
940eda14cbcSMatt Macy 	vdev_state = rvd->vdev_state;
941eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
942eda14cbcSMatt Macy 
943eda14cbcSMatt Macy 	if (vdev_state != VDEV_STATE_HEALTHY)
944eda14cbcSMatt Macy 		return (SET_ERROR(ENXIO));
945eda14cbcSMatt Macy 
946eda14cbcSMatt Macy 	ASSERT3U(spa_guid(spa), !=, *newguid);
947eda14cbcSMatt Macy 
948eda14cbcSMatt Macy 	return (0);
949eda14cbcSMatt Macy }
950eda14cbcSMatt Macy 
951eda14cbcSMatt Macy static void
spa_change_guid_sync(void * arg,dmu_tx_t * tx)952eda14cbcSMatt Macy spa_change_guid_sync(void *arg, dmu_tx_t *tx)
953eda14cbcSMatt Macy {
954eda14cbcSMatt Macy 	uint64_t *newguid = arg;
955eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
956eda14cbcSMatt Macy 	uint64_t oldguid;
957eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
958eda14cbcSMatt Macy 
959eda14cbcSMatt Macy 	oldguid = spa_guid(spa);
960eda14cbcSMatt Macy 
961eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
962eda14cbcSMatt Macy 	rvd->vdev_guid = *newguid;
963eda14cbcSMatt Macy 	rvd->vdev_guid_sum += (*newguid - oldguid);
964eda14cbcSMatt Macy 	vdev_config_dirty(rvd);
965eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
966eda14cbcSMatt Macy 
967eda14cbcSMatt Macy 	spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
968eda14cbcSMatt Macy 	    (u_longlong_t)oldguid, (u_longlong_t)*newguid);
969eda14cbcSMatt Macy }
970eda14cbcSMatt Macy 
971eda14cbcSMatt Macy /*
972eda14cbcSMatt Macy  * Change the GUID for the pool.  This is done so that we can later
973eda14cbcSMatt Macy  * re-import a pool built from a clone of our own vdevs.  We will modify
974eda14cbcSMatt Macy  * the root vdev's guid, our own pool guid, and then mark all of our
975eda14cbcSMatt Macy  * vdevs dirty.  Note that we must make sure that all our vdevs are
976eda14cbcSMatt Macy  * online when we do this, or else any vdevs that weren't present
977eda14cbcSMatt Macy  * would be orphaned from our pool.  We are also going to issue a
978eda14cbcSMatt Macy  * sysevent to update any watchers.
979eda14cbcSMatt Macy  */
980eda14cbcSMatt Macy int
spa_change_guid(spa_t * spa)981eda14cbcSMatt Macy spa_change_guid(spa_t *spa)
982eda14cbcSMatt Macy {
983eda14cbcSMatt Macy 	int error;
984eda14cbcSMatt Macy 	uint64_t guid;
985eda14cbcSMatt Macy 
986eda14cbcSMatt Macy 	mutex_enter(&spa->spa_vdev_top_lock);
987eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
988eda14cbcSMatt Macy 	guid = spa_generate_guid(NULL);
989eda14cbcSMatt Macy 
990eda14cbcSMatt Macy 	error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
991eda14cbcSMatt Macy 	    spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
992eda14cbcSMatt Macy 
993eda14cbcSMatt Macy 	if (error == 0) {
994be181ee2SMartin Matuska 		/*
995be181ee2SMartin Matuska 		 * Clear the kobj flag from all the vdevs to allow
996be181ee2SMartin Matuska 		 * vdev_cache_process_kobj_evt() to post events to all the
997be181ee2SMartin Matuska 		 * vdevs since GUID is updated.
998be181ee2SMartin Matuska 		 */
999be181ee2SMartin Matuska 		vdev_clear_kobj_evt(spa->spa_root_vdev);
1000be181ee2SMartin Matuska 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
1001be181ee2SMartin Matuska 			vdev_clear_kobj_evt(spa->spa_l2cache.sav_vdevs[i]);
1002be181ee2SMartin Matuska 
1003be181ee2SMartin Matuska 		spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
1004eda14cbcSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
1005eda14cbcSMatt Macy 	}
1006eda14cbcSMatt Macy 
1007eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
1008eda14cbcSMatt Macy 	mutex_exit(&spa->spa_vdev_top_lock);
1009eda14cbcSMatt Macy 
1010eda14cbcSMatt Macy 	return (error);
1011eda14cbcSMatt Macy }
1012eda14cbcSMatt Macy 
1013eda14cbcSMatt Macy /*
1014eda14cbcSMatt Macy  * ==========================================================================
1015eda14cbcSMatt Macy  * SPA state manipulation (open/create/destroy/import/export)
1016eda14cbcSMatt Macy  * ==========================================================================
1017eda14cbcSMatt Macy  */
1018eda14cbcSMatt Macy 
1019eda14cbcSMatt Macy static int
spa_error_entry_compare(const void * a,const void * b)1020eda14cbcSMatt Macy spa_error_entry_compare(const void *a, const void *b)
1021eda14cbcSMatt Macy {
1022eda14cbcSMatt Macy 	const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
1023eda14cbcSMatt Macy 	const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
1024eda14cbcSMatt Macy 	int ret;
1025eda14cbcSMatt Macy 
1026eda14cbcSMatt Macy 	ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
1027eda14cbcSMatt Macy 	    sizeof (zbookmark_phys_t));
1028eda14cbcSMatt Macy 
1029eda14cbcSMatt Macy 	return (TREE_ISIGN(ret));
1030eda14cbcSMatt Macy }
1031eda14cbcSMatt Macy 
1032eda14cbcSMatt Macy /*
1033eda14cbcSMatt Macy  * Utility function which retrieves copies of the current logs and
1034eda14cbcSMatt Macy  * re-initializes them in the process.
1035eda14cbcSMatt Macy  */
1036eda14cbcSMatt Macy void
spa_get_errlists(spa_t * spa,avl_tree_t * last,avl_tree_t * scrub)1037eda14cbcSMatt Macy spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
1038eda14cbcSMatt Macy {
1039eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
1040eda14cbcSMatt Macy 
1041da5137abSMartin Matuska 	memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t));
1042da5137abSMartin Matuska 	memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t));
1043eda14cbcSMatt Macy 
1044eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_scrub,
1045eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1046eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1047eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_last,
1048eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1049eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1050eda14cbcSMatt Macy }
1051eda14cbcSMatt Macy 
1052eda14cbcSMatt Macy static void
spa_taskqs_init(spa_t * spa,zio_type_t t,zio_taskq_type_t q)1053eda14cbcSMatt Macy spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1054eda14cbcSMatt Macy {
1055eda14cbcSMatt Macy 	const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
1056eda14cbcSMatt Macy 	enum zti_modes mode = ztip->zti_mode;
1057eda14cbcSMatt Macy 	uint_t value = ztip->zti_value;
1058eda14cbcSMatt Macy 	uint_t count = ztip->zti_count;
1059eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
106016038816SMartin Matuska 	uint_t cpus, flags = TASKQ_DYNAMIC;
1061eda14cbcSMatt Macy 
1062eda14cbcSMatt Macy 	switch (mode) {
1063eda14cbcSMatt Macy 	case ZTI_MODE_FIXED:
106416038816SMartin Matuska 		ASSERT3U(value, >, 0);
1065eda14cbcSMatt Macy 		break;
1066eda14cbcSMatt Macy 
106714c2e0a0SMartin Matuska 	case ZTI_MODE_SYNC:
106814c2e0a0SMartin Matuska 
106914c2e0a0SMartin Matuska 		/*
107014c2e0a0SMartin Matuska 		 * Create one wr_iss taskq for every 'zio_taskq_write_tpq' CPUs,
107114c2e0a0SMartin Matuska 		 * not to exceed the number of spa allocators, and align to it.
107214c2e0a0SMartin Matuska 		 */
107314c2e0a0SMartin Matuska 		cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
107414c2e0a0SMartin Matuska 		count = MAX(1, cpus / MAX(1, zio_taskq_write_tpq));
107514c2e0a0SMartin Matuska 		count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
107614c2e0a0SMartin Matuska 		count = MIN(count, spa->spa_alloc_count);
107714c2e0a0SMartin Matuska 		while (spa->spa_alloc_count % count != 0 &&
107814c2e0a0SMartin Matuska 		    spa->spa_alloc_count < count * 2)
107914c2e0a0SMartin Matuska 			count--;
108014c2e0a0SMartin Matuska 
108114c2e0a0SMartin Matuska 		/*
108214c2e0a0SMartin Matuska 		 * zio_taskq_batch_pct is unbounded and may exceed 100%, but no
108314c2e0a0SMartin Matuska 		 * single taskq may have more threads than 100% of online cpus.
108414c2e0a0SMartin Matuska 		 */
108514c2e0a0SMartin Matuska 		value = (zio_taskq_batch_pct + count / 2) / count;
108614c2e0a0SMartin Matuska 		value = MIN(value, 100);
108714c2e0a0SMartin Matuska 		flags |= TASKQ_THREADS_CPU_PCT;
1088eda14cbcSMatt Macy 		break;
1089eda14cbcSMatt Macy 
1090eda14cbcSMatt Macy 	case ZTI_MODE_SCALE:
109116038816SMartin Matuska 		flags |= TASKQ_THREADS_CPU_PCT;
109216038816SMartin Matuska 		/*
109316038816SMartin Matuska 		 * We want more taskqs to reduce lock contention, but we want
109416038816SMartin Matuska 		 * less for better request ordering and CPU utilization.
109516038816SMartin Matuska 		 */
109616038816SMartin Matuska 		cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
109716038816SMartin Matuska 		if (zio_taskq_batch_tpq > 0) {
109816038816SMartin Matuska 			count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
109916038816SMartin Matuska 			    zio_taskq_batch_tpq);
110016038816SMartin Matuska 		} else {
110116038816SMartin Matuska 			/*
110216038816SMartin Matuska 			 * Prefer 6 threads per taskq, but no more taskqs
110316038816SMartin Matuska 			 * than threads in them on large systems. For 80%:
110416038816SMartin Matuska 			 *
110516038816SMartin Matuska 			 *                 taskq   taskq   total
110616038816SMartin Matuska 			 * cpus    taskqs  percent threads threads
110716038816SMartin Matuska 			 * ------- ------- ------- ------- -------
110816038816SMartin Matuska 			 * 1       1       80%     1       1
110916038816SMartin Matuska 			 * 2       1       80%     1       1
111016038816SMartin Matuska 			 * 4       1       80%     3       3
111116038816SMartin Matuska 			 * 8       2       40%     3       6
111216038816SMartin Matuska 			 * 16      3       27%     4       12
111316038816SMartin Matuska 			 * 32      5       16%     5       25
111416038816SMartin Matuska 			 * 64      7       11%     7       49
111516038816SMartin Matuska 			 * 128     10      8%      10      100
111616038816SMartin Matuska 			 * 256     14      6%      15      210
111716038816SMartin Matuska 			 */
111816038816SMartin Matuska 			count = 1 + cpus / 6;
111916038816SMartin Matuska 			while (count * count > cpus)
112016038816SMartin Matuska 				count--;
112116038816SMartin Matuska 		}
112216038816SMartin Matuska 		/* Limit each taskq within 100% to not trigger assertion. */
112316038816SMartin Matuska 		count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
112416038816SMartin Matuska 		value = (zio_taskq_batch_pct + count / 2) / count;
112516038816SMartin Matuska 		break;
112616038816SMartin Matuska 
112716038816SMartin Matuska 	case ZTI_MODE_NULL:
112816038816SMartin Matuska 		tqs->stqs_count = 0;
112916038816SMartin Matuska 		tqs->stqs_taskq = NULL;
113016038816SMartin Matuska 		return;
113116038816SMartin Matuska 
113216038816SMartin Matuska 	default:
1133eda14cbcSMatt Macy 		panic("unrecognized mode for %s_%s taskq (%u:%u) in "
1134eda14cbcSMatt Macy 		    "spa_taskqs_init()",
113514c2e0a0SMartin Matuska 		    zio_type_name[t], zio_taskq_types[q], mode, value);
1136eda14cbcSMatt Macy 		break;
1137eda14cbcSMatt Macy 	}
1138eda14cbcSMatt Macy 
1139eda14cbcSMatt Macy 	ASSERT3U(count, >, 0);
114016038816SMartin Matuska 	tqs->stqs_count = count;
114116038816SMartin Matuska 	tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
114216038816SMartin Matuska 
114316038816SMartin Matuska 	for (uint_t i = 0; i < count; i++) {
1144eda14cbcSMatt Macy 		taskq_t *tq;
1145eda14cbcSMatt Macy 		char name[32];
1146eda14cbcSMatt Macy 
1147eda14cbcSMatt Macy 		if (count > 1)
114816038816SMartin Matuska 			(void) snprintf(name, sizeof (name), "%s_%s_%u",
114916038816SMartin Matuska 			    zio_type_name[t], zio_taskq_types[q], i);
115016038816SMartin Matuska 		else
115116038816SMartin Matuska 			(void) snprintf(name, sizeof (name), "%s_%s",
1152eda14cbcSMatt Macy 			    zio_type_name[t], zio_taskq_types[q]);
1153eda14cbcSMatt Macy 
1154eda14cbcSMatt Macy #ifdef HAVE_SYSDC
11556c1e79dfSMartin Matuska 		if (zio_taskq_sysdc && spa->spa_proc != &p0) {
1156eda14cbcSMatt Macy 			(void) zio_taskq_basedc;
1157e92ffd9bSMartin Matuska 			tq = taskq_create_sysdc(name, value, 50, INT_MAX,
1158eda14cbcSMatt Macy 			    spa->spa_proc, zio_taskq_basedc, flags);
1159eda14cbcSMatt Macy 		} else {
1160eda14cbcSMatt Macy #endif
11616c1e79dfSMartin Matuska 			pri_t pri = maxclsyspri;
1162eda14cbcSMatt Macy 			/*
1163eda14cbcSMatt Macy 			 * The write issue taskq can be extremely CPU
1164eda14cbcSMatt Macy 			 * intensive.  Run it at slightly less important
1165eda14cbcSMatt Macy 			 * priority than the other taskqs.
11662c48331dSMatt Macy 			 *
11672c48331dSMatt Macy 			 * Under Linux and FreeBSD this means incrementing
11682c48331dSMatt Macy 			 * the priority value as opposed to platforms like
11692c48331dSMatt Macy 			 * illumos where it should be decremented.
11702c48331dSMatt Macy 			 *
11712c48331dSMatt Macy 			 * On FreeBSD, if priorities divided by four (RQ_PPQ)
11722c48331dSMatt Macy 			 * are equal then a difference between them is
11732c48331dSMatt Macy 			 * insignificant.
11742c48331dSMatt Macy 			 */
1175eda14cbcSMatt Macy 			if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
11762c48331dSMatt Macy #if defined(__linux__)
11772c48331dSMatt Macy 				pri++;
1178eda14cbcSMatt Macy #elif defined(__FreeBSD__)
11792c48331dSMatt Macy 				pri += 4;
11802c48331dSMatt Macy #else
11812c48331dSMatt Macy #error "unknown OS"
11822c48331dSMatt Macy #endif
11832c48331dSMatt Macy 			}
11842c48331dSMatt Macy 			tq = taskq_create_proc(name, value, pri, 50,
1185eda14cbcSMatt Macy 			    INT_MAX, spa->spa_proc, flags);
1186eda14cbcSMatt Macy #ifdef HAVE_SYSDC
11876c1e79dfSMartin Matuska 		}
1188eda14cbcSMatt Macy #endif
11896c1e79dfSMartin Matuska 
1190eda14cbcSMatt Macy 		tqs->stqs_taskq[i] = tq;
1191eda14cbcSMatt Macy 	}
1192eda14cbcSMatt Macy }
1193eda14cbcSMatt Macy 
1194eda14cbcSMatt Macy static void
spa_taskqs_fini(spa_t * spa,zio_type_t t,zio_taskq_type_t q)1195eda14cbcSMatt Macy spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1196eda14cbcSMatt Macy {
1197eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1198eda14cbcSMatt Macy 
1199eda14cbcSMatt Macy 	if (tqs->stqs_taskq == NULL) {
1200eda14cbcSMatt Macy 		ASSERT3U(tqs->stqs_count, ==, 0);
1201eda14cbcSMatt Macy 		return;
1202eda14cbcSMatt Macy 	}
1203eda14cbcSMatt Macy 
1204eda14cbcSMatt Macy 	for (uint_t i = 0; i < tqs->stqs_count; i++) {
1205eda14cbcSMatt Macy 		ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
1206eda14cbcSMatt Macy 		taskq_destroy(tqs->stqs_taskq[i]);
1207eda14cbcSMatt Macy 	}
1208eda14cbcSMatt Macy 
1209eda14cbcSMatt Macy 	kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
1210eda14cbcSMatt Macy 	tqs->stqs_taskq = NULL;
1211eda14cbcSMatt Macy }
1212eda14cbcSMatt Macy 
1213eda14cbcSMatt Macy #ifdef _KERNEL
1214b356da80SMartin Matuska /*
1215b356da80SMartin Matuska  * The READ and WRITE rows of zio_taskqs are configurable at module load time
1216b356da80SMartin Matuska  * by setting zio_taskq_read or zio_taskq_write.
1217b356da80SMartin Matuska  *
1218b356da80SMartin Matuska  * Example (the defaults for READ and WRITE)
1219b356da80SMartin Matuska  *   zio_taskq_read='fixed,1,8 null scale null'
1220b356da80SMartin Matuska  *   zio_taskq_write='sync fixed,1,5 scale fixed,1,5'
1221b356da80SMartin Matuska  *
1222b356da80SMartin Matuska  * Each sets the entire row at a time.
1223b356da80SMartin Matuska  *
1224b356da80SMartin Matuska  * 'fixed' is parameterised: fixed,Q,T where Q is number of taskqs, T is number
1225b356da80SMartin Matuska  * of threads per taskq.
1226b356da80SMartin Matuska  *
1227b356da80SMartin Matuska  * 'null' can only be set on the high-priority queues (queue selection for
1228b356da80SMartin Matuska  * high-priority queues will fall back to the regular queue if the high-pri
1229b356da80SMartin Matuska  * is NULL.
1230b356da80SMartin Matuska  */
1231b356da80SMartin Matuska static const char *const modes[ZTI_NMODES] = {
1232b356da80SMartin Matuska 	"fixed", "scale", "sync", "null"
1233b356da80SMartin Matuska };
1234b356da80SMartin Matuska 
1235b356da80SMartin Matuska /* Parse the incoming config string. Modifies cfg */
1236b356da80SMartin Matuska static int
spa_taskq_param_set(zio_type_t t,char * cfg)1237b356da80SMartin Matuska spa_taskq_param_set(zio_type_t t, char *cfg)
1238b356da80SMartin Matuska {
1239b356da80SMartin Matuska 	int err = 0;
1240b356da80SMartin Matuska 
1241b356da80SMartin Matuska 	zio_taskq_info_t row[ZIO_TASKQ_TYPES] = {{0}};
1242b356da80SMartin Matuska 
1243b356da80SMartin Matuska 	char *next = cfg, *tok, *c;
1244b356da80SMartin Matuska 
1245b356da80SMartin Matuska 	/*
1246b356da80SMartin Matuska 	 * Parse out each element from the string and fill `row`. The entire
1247b356da80SMartin Matuska 	 * row has to be set at once, so any errors are flagged by just
1248b356da80SMartin Matuska 	 * breaking out of this loop early.
1249b356da80SMartin Matuska 	 */
1250b356da80SMartin Matuska 	uint_t q;
1251b356da80SMartin Matuska 	for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
1252b356da80SMartin Matuska 		/* `next` is the start of the config */
1253b356da80SMartin Matuska 		if (next == NULL)
1254b356da80SMartin Matuska 			break;
1255b356da80SMartin Matuska 
1256b356da80SMartin Matuska 		/* Eat up leading space */
1257b356da80SMartin Matuska 		while (isspace(*next))
1258b356da80SMartin Matuska 			next++;
1259b356da80SMartin Matuska 		if (*next == '\0')
1260b356da80SMartin Matuska 			break;
1261b356da80SMartin Matuska 
1262b356da80SMartin Matuska 		/* Mode ends at space or end of string */
1263b356da80SMartin Matuska 		tok = next;
1264b356da80SMartin Matuska 		next = strchr(tok, ' ');
1265b356da80SMartin Matuska 		if (next != NULL) *next++ = '\0';
1266b356da80SMartin Matuska 
1267b356da80SMartin Matuska 		/* Parameters start after a comma */
1268b356da80SMartin Matuska 		c = strchr(tok, ',');
1269b356da80SMartin Matuska 		if (c != NULL) *c++ = '\0';
1270b356da80SMartin Matuska 
1271b356da80SMartin Matuska 		/* Match mode string */
1272b356da80SMartin Matuska 		uint_t mode;
1273b356da80SMartin Matuska 		for (mode = 0; mode < ZTI_NMODES; mode++)
1274b356da80SMartin Matuska 			if (strcmp(tok, modes[mode]) == 0)
1275b356da80SMartin Matuska 				break;
1276b356da80SMartin Matuska 		if (mode == ZTI_NMODES)
1277b356da80SMartin Matuska 			break;
1278b356da80SMartin Matuska 
1279b356da80SMartin Matuska 		/* Invalid canary */
1280b356da80SMartin Matuska 		row[q].zti_mode = ZTI_NMODES;
1281b356da80SMartin Matuska 
1282b356da80SMartin Matuska 		/* Per-mode setup */
1283b356da80SMartin Matuska 		switch (mode) {
1284b356da80SMartin Matuska 
1285b356da80SMartin Matuska 		/*
1286b356da80SMartin Matuska 		 * FIXED is parameterised: number of queues, and number of
1287b356da80SMartin Matuska 		 * threads per queue.
1288b356da80SMartin Matuska 		 */
1289b356da80SMartin Matuska 		case ZTI_MODE_FIXED: {
1290b356da80SMartin Matuska 			/* No parameters? */
1291b356da80SMartin Matuska 			if (c == NULL || *c == '\0')
1292b356da80SMartin Matuska 				break;
1293b356da80SMartin Matuska 
1294b356da80SMartin Matuska 			/* Find next parameter */
1295b356da80SMartin Matuska 			tok = c;
1296b356da80SMartin Matuska 			c = strchr(tok, ',');
1297b356da80SMartin Matuska 			if (c == NULL)
1298b356da80SMartin Matuska 				break;
1299b356da80SMartin Matuska 
1300b356da80SMartin Matuska 			/* Take digits and convert */
1301b356da80SMartin Matuska 			unsigned long long nq;
1302b356da80SMartin Matuska 			if (!(isdigit(*tok)))
1303b356da80SMartin Matuska 				break;
1304b356da80SMartin Matuska 			err = ddi_strtoull(tok, &tok, 10, &nq);
1305b356da80SMartin Matuska 			/* Must succeed and also end at the next param sep */
1306b356da80SMartin Matuska 			if (err != 0 || tok != c)
1307b356da80SMartin Matuska 				break;
1308b356da80SMartin Matuska 
1309b356da80SMartin Matuska 			/* Move past the comma */
1310b356da80SMartin Matuska 			tok++;
1311b356da80SMartin Matuska 			/* Need another number */
1312b356da80SMartin Matuska 			if (!(isdigit(*tok)))
1313b356da80SMartin Matuska 				break;
1314b356da80SMartin Matuska 			/* Remember start to make sure we moved */
1315b356da80SMartin Matuska 			c = tok;
1316b356da80SMartin Matuska 
1317b356da80SMartin Matuska 			/* Take digits */
1318b356da80SMartin Matuska 			unsigned long long ntpq;
1319b356da80SMartin Matuska 			err = ddi_strtoull(tok, &tok, 10, &ntpq);
1320b356da80SMartin Matuska 			/* Must succeed, and moved forward */
1321b356da80SMartin Matuska 			if (err != 0 || tok == c || *tok != '\0')
1322b356da80SMartin Matuska 				break;
1323b356da80SMartin Matuska 
1324b356da80SMartin Matuska 			/*
1325b356da80SMartin Matuska 			 * sanity; zero queues/threads make no sense, and
1326b356da80SMartin Matuska 			 * 16K is almost certainly more than anyone will ever
1327b356da80SMartin Matuska 			 * need and avoids silly numbers like UINT32_MAX
1328b356da80SMartin Matuska 			 */
1329b356da80SMartin Matuska 			if (nq == 0 || nq >= 16384 ||
1330b356da80SMartin Matuska 			    ntpq == 0 || ntpq >= 16384)
1331b356da80SMartin Matuska 				break;
1332b356da80SMartin Matuska 
1333b356da80SMartin Matuska 			const zio_taskq_info_t zti = ZTI_P(ntpq, nq);
1334b356da80SMartin Matuska 			row[q] = zti;
1335b356da80SMartin Matuska 			break;
1336b356da80SMartin Matuska 		}
1337b356da80SMartin Matuska 
1338b356da80SMartin Matuska 		case ZTI_MODE_SCALE: {
1339b356da80SMartin Matuska 			const zio_taskq_info_t zti = ZTI_SCALE;
1340b356da80SMartin Matuska 			row[q] = zti;
1341b356da80SMartin Matuska 			break;
1342b356da80SMartin Matuska 		}
1343b356da80SMartin Matuska 
1344b356da80SMartin Matuska 		case ZTI_MODE_SYNC: {
1345b356da80SMartin Matuska 			const zio_taskq_info_t zti = ZTI_SYNC;
1346b356da80SMartin Matuska 			row[q] = zti;
1347b356da80SMartin Matuska 			break;
1348b356da80SMartin Matuska 		}
1349b356da80SMartin Matuska 
1350b356da80SMartin Matuska 		case ZTI_MODE_NULL: {
1351b356da80SMartin Matuska 			/*
1352b356da80SMartin Matuska 			 * Can only null the high-priority queues; the general-
1353b356da80SMartin Matuska 			 * purpose ones have to exist.
1354b356da80SMartin Matuska 			 */
1355b356da80SMartin Matuska 			if (q != ZIO_TASKQ_ISSUE_HIGH &&
1356b356da80SMartin Matuska 			    q != ZIO_TASKQ_INTERRUPT_HIGH)
1357b356da80SMartin Matuska 				break;
1358b356da80SMartin Matuska 
1359b356da80SMartin Matuska 			const zio_taskq_info_t zti = ZTI_NULL;
1360b356da80SMartin Matuska 			row[q] = zti;
1361b356da80SMartin Matuska 			break;
1362b356da80SMartin Matuska 		}
1363b356da80SMartin Matuska 
1364b356da80SMartin Matuska 		default:
1365b356da80SMartin Matuska 			break;
1366b356da80SMartin Matuska 		}
1367b356da80SMartin Matuska 
1368b356da80SMartin Matuska 		/* Ensure we set a mode */
1369b356da80SMartin Matuska 		if (row[q].zti_mode == ZTI_NMODES)
1370b356da80SMartin Matuska 			break;
1371b356da80SMartin Matuska 	}
1372b356da80SMartin Matuska 
1373b356da80SMartin Matuska 	/* Didn't get a full row, fail */
1374b356da80SMartin Matuska 	if (q < ZIO_TASKQ_TYPES)
1375b356da80SMartin Matuska 		return (SET_ERROR(EINVAL));
1376b356da80SMartin Matuska 
1377b356da80SMartin Matuska 	/* Eat trailing space */
1378b356da80SMartin Matuska 	if (next != NULL)
1379b356da80SMartin Matuska 		while (isspace(*next))
1380b356da80SMartin Matuska 			next++;
1381b356da80SMartin Matuska 
1382b356da80SMartin Matuska 	/* If there's anything left over then fail */
1383b356da80SMartin Matuska 	if (next != NULL && *next != '\0')
1384b356da80SMartin Matuska 		return (SET_ERROR(EINVAL));
1385b356da80SMartin Matuska 
1386b356da80SMartin Matuska 	/* Success! Copy it into the real config */
1387b356da80SMartin Matuska 	for (q = 0; q < ZIO_TASKQ_TYPES; q++)
1388b356da80SMartin Matuska 		zio_taskqs[t][q] = row[q];
1389b356da80SMartin Matuska 
1390b356da80SMartin Matuska 	return (0);
1391b356da80SMartin Matuska }
1392b356da80SMartin Matuska 
1393b356da80SMartin Matuska static int
spa_taskq_param_get(zio_type_t t,char * buf,boolean_t add_newline)1394b356da80SMartin Matuska spa_taskq_param_get(zio_type_t t, char *buf, boolean_t add_newline)
1395b356da80SMartin Matuska {
1396b356da80SMartin Matuska 	int pos = 0;
1397b356da80SMartin Matuska 
1398b356da80SMartin Matuska 	/* Build paramater string from live config */
1399b356da80SMartin Matuska 	const char *sep = "";
1400b356da80SMartin Matuska 	for (uint_t q = 0; q < ZIO_TASKQ_TYPES; q++) {
1401b356da80SMartin Matuska 		const zio_taskq_info_t *zti = &zio_taskqs[t][q];
1402b356da80SMartin Matuska 		if (zti->zti_mode == ZTI_MODE_FIXED)
1403b356da80SMartin Matuska 			pos += sprintf(&buf[pos], "%s%s,%u,%u", sep,
1404b356da80SMartin Matuska 			    modes[zti->zti_mode], zti->zti_count,
1405b356da80SMartin Matuska 			    zti->zti_value);
1406b356da80SMartin Matuska 		else
1407b356da80SMartin Matuska 			pos += sprintf(&buf[pos], "%s%s", sep,
1408b356da80SMartin Matuska 			    modes[zti->zti_mode]);
1409b356da80SMartin Matuska 		sep = " ";
1410b356da80SMartin Matuska 	}
1411b356da80SMartin Matuska 
1412b356da80SMartin Matuska 	if (add_newline)
1413b356da80SMartin Matuska 		buf[pos++] = '\n';
1414b356da80SMartin Matuska 	buf[pos] = '\0';
1415b356da80SMartin Matuska 
1416b356da80SMartin Matuska 	return (pos);
1417b356da80SMartin Matuska }
1418b356da80SMartin Matuska 
1419b356da80SMartin Matuska #ifdef __linux__
1420b356da80SMartin Matuska static int
spa_taskq_read_param_set(const char * val,zfs_kernel_param_t * kp)1421b356da80SMartin Matuska spa_taskq_read_param_set(const char *val, zfs_kernel_param_t *kp)
1422b356da80SMartin Matuska {
1423b356da80SMartin Matuska 	char *cfg = kmem_strdup(val);
1424b356da80SMartin Matuska 	int err = spa_taskq_param_set(ZIO_TYPE_READ, cfg);
1425b356da80SMartin Matuska 	kmem_free(cfg, strlen(val)+1);
1426b356da80SMartin Matuska 	return (-err);
1427b356da80SMartin Matuska }
1428b356da80SMartin Matuska static int
spa_taskq_read_param_get(char * buf,zfs_kernel_param_t * kp)1429b356da80SMartin Matuska spa_taskq_read_param_get(char *buf, zfs_kernel_param_t *kp)
1430b356da80SMartin Matuska {
1431b356da80SMartin Matuska 	return (spa_taskq_param_get(ZIO_TYPE_READ, buf, TRUE));
1432b356da80SMartin Matuska }
1433b356da80SMartin Matuska 
1434b356da80SMartin Matuska static int
spa_taskq_write_param_set(const char * val,zfs_kernel_param_t * kp)1435b356da80SMartin Matuska spa_taskq_write_param_set(const char *val, zfs_kernel_param_t *kp)
1436b356da80SMartin Matuska {
1437b356da80SMartin Matuska 	char *cfg = kmem_strdup(val);
1438b356da80SMartin Matuska 	int err = spa_taskq_param_set(ZIO_TYPE_WRITE, cfg);
1439b356da80SMartin Matuska 	kmem_free(cfg, strlen(val)+1);
1440b356da80SMartin Matuska 	return (-err);
1441b356da80SMartin Matuska }
1442b356da80SMartin Matuska static int
spa_taskq_write_param_get(char * buf,zfs_kernel_param_t * kp)1443b356da80SMartin Matuska spa_taskq_write_param_get(char *buf, zfs_kernel_param_t *kp)
1444b356da80SMartin Matuska {
1445b356da80SMartin Matuska 	return (spa_taskq_param_get(ZIO_TYPE_WRITE, buf, TRUE));
1446b356da80SMartin Matuska }
1447b356da80SMartin Matuska #else
1448b356da80SMartin Matuska /*
1449b356da80SMartin Matuska  * On FreeBSD load-time parameters can be set up before malloc() is available,
1450b356da80SMartin Matuska  * so we have to do all the parsing work on the stack.
1451b356da80SMartin Matuska  */
1452b356da80SMartin Matuska #define	SPA_TASKQ_PARAM_MAX	(128)
1453b356da80SMartin Matuska 
1454b356da80SMartin Matuska static int
spa_taskq_read_param(ZFS_MODULE_PARAM_ARGS)1455b356da80SMartin Matuska spa_taskq_read_param(ZFS_MODULE_PARAM_ARGS)
1456b356da80SMartin Matuska {
1457b356da80SMartin Matuska 	char buf[SPA_TASKQ_PARAM_MAX];
145809af4bf2SMark Johnston 	int err;
1459b356da80SMartin Matuska 
146009af4bf2SMark Johnston 	(void) spa_taskq_param_get(ZIO_TYPE_READ, buf, FALSE);
1461b356da80SMartin Matuska 	err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
146209af4bf2SMark Johnston 	if (err || req->newptr == NULL)
1463b356da80SMartin Matuska 		return (err);
1464b356da80SMartin Matuska 	return (spa_taskq_param_set(ZIO_TYPE_READ, buf));
1465b356da80SMartin Matuska }
1466b356da80SMartin Matuska 
1467b356da80SMartin Matuska static int
spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS)1468b356da80SMartin Matuska spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS)
1469b356da80SMartin Matuska {
1470b356da80SMartin Matuska 	char buf[SPA_TASKQ_PARAM_MAX];
147109af4bf2SMark Johnston 	int err;
1472b356da80SMartin Matuska 
147309af4bf2SMark Johnston 	(void) spa_taskq_param_get(ZIO_TYPE_WRITE, buf, FALSE);
1474b356da80SMartin Matuska 	err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
147509af4bf2SMark Johnston 	if (err || req->newptr == NULL)
1476b356da80SMartin Matuska 		return (err);
1477b356da80SMartin Matuska 	return (spa_taskq_param_set(ZIO_TYPE_WRITE, buf));
1478b356da80SMartin Matuska }
1479b356da80SMartin Matuska #endif
1480b356da80SMartin Matuska #endif /* _KERNEL */
1481b356da80SMartin Matuska 
1482eda14cbcSMatt Macy /*
1483eda14cbcSMatt Macy  * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
1484eda14cbcSMatt Macy  * Note that a type may have multiple discrete taskqs to avoid lock contention
148514c2e0a0SMartin Matuska  * on the taskq itself.
1486eda14cbcSMatt Macy  */
148714c2e0a0SMartin Matuska static taskq_t *
spa_taskq_dispatch_select(spa_t * spa,zio_type_t t,zio_taskq_type_t q,zio_t * zio)148814c2e0a0SMartin Matuska spa_taskq_dispatch_select(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
148914c2e0a0SMartin Matuska     zio_t *zio)
1490eda14cbcSMatt Macy {
1491eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1492eda14cbcSMatt Macy 	taskq_t *tq;
1493eda14cbcSMatt Macy 
1494eda14cbcSMatt Macy 	ASSERT3P(tqs->stqs_taskq, !=, NULL);
1495eda14cbcSMatt Macy 	ASSERT3U(tqs->stqs_count, !=, 0);
1496eda14cbcSMatt Macy 
149714c2e0a0SMartin Matuska 	if (tqs->stqs_count == 1) {
149814c2e0a0SMartin Matuska 		tq = tqs->stqs_taskq[0];
149914c2e0a0SMartin Matuska 	} else if ((t == ZIO_TYPE_WRITE) && (q == ZIO_TASKQ_ISSUE) &&
150014c2e0a0SMartin Matuska 	    (zio != NULL) && ZIO_HAS_ALLOCATOR(zio)) {
150114c2e0a0SMartin Matuska 		tq = tqs->stqs_taskq[zio->io_allocator % tqs->stqs_count];
150214c2e0a0SMartin Matuska 	} else {
150314c2e0a0SMartin Matuska 		tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
1504eda14cbcSMatt Macy 	}
1505eda14cbcSMatt Macy 	return (tq);
1506eda14cbcSMatt Macy }
1507eda14cbcSMatt Macy 
1508eda14cbcSMatt Macy void
spa_taskq_dispatch_ent(spa_t * spa,zio_type_t t,zio_taskq_type_t q,task_func_t * func,void * arg,uint_t flags,taskq_ent_t * ent,zio_t * zio)150914c2e0a0SMartin Matuska spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
151014c2e0a0SMartin Matuska     task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent,
1511eda14cbcSMatt Macy     zio_t *zio)
151214c2e0a0SMartin Matuska {
151314c2e0a0SMartin Matuska 	taskq_t *tq = spa_taskq_dispatch_select(spa, t, q, zio);
151414c2e0a0SMartin Matuska 	taskq_dispatch_ent(tq, func, arg, flags, ent);
151514c2e0a0SMartin Matuska }
151614c2e0a0SMartin Matuska 
151714c2e0a0SMartin Matuska /*
1518eda14cbcSMatt Macy  * Same as spa_taskq_dispatch_ent() but block on the task until completion.
1519eda14cbcSMatt Macy  */
1520eda14cbcSMatt Macy void
spa_taskq_dispatch_sync(spa_t * spa,zio_type_t t,zio_taskq_type_t q,task_func_t * func,void * arg,uint_t flags)1521eda14cbcSMatt Macy spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1522eda14cbcSMatt Macy     task_func_t *func, void *arg, uint_t flags)
1523eda14cbcSMatt Macy {
1524eda14cbcSMatt Macy 	taskq_t *tq = spa_taskq_dispatch_select(spa, t, q, NULL);
1525eda14cbcSMatt Macy 	taskqid_t id = taskq_dispatch(tq, func, arg, flags);
1526eda14cbcSMatt Macy 	if (id)
1527eda14cbcSMatt Macy 		taskq_wait_id(tq, id);
152814c2e0a0SMartin Matuska }
152914c2e0a0SMartin Matuska 
1530eda14cbcSMatt Macy static void
spa_create_zio_taskqs(spa_t * spa)1531eda14cbcSMatt Macy spa_create_zio_taskqs(spa_t *spa)
1532eda14cbcSMatt Macy {
1533eda14cbcSMatt Macy 	for (int t = 0; t < ZIO_TYPES; t++) {
1534eda14cbcSMatt Macy 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1535eda14cbcSMatt Macy 			spa_taskqs_init(spa, t, q);
1536eda14cbcSMatt Macy 		}
1537eda14cbcSMatt Macy 	}
1538eda14cbcSMatt Macy }
1539eda14cbcSMatt Macy 
1540eda14cbcSMatt Macy #if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
1541eda14cbcSMatt Macy static void
spa_thread(void * arg)1542eda14cbcSMatt Macy spa_thread(void *arg)
1543eda14cbcSMatt Macy {
1544eda14cbcSMatt Macy 	psetid_t zio_taskq_psrset_bind = PS_NONE;
1545eda14cbcSMatt Macy 	callb_cpr_t cprinfo;
1546eda14cbcSMatt Macy 
1547eda14cbcSMatt Macy 	spa_t *spa = arg;
1548eda14cbcSMatt Macy 	user_t *pu = PTOU(curproc);
1549eda14cbcSMatt Macy 
1550eda14cbcSMatt Macy 	CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1551eda14cbcSMatt Macy 	    spa->spa_name);
1552eda14cbcSMatt Macy 
1553eda14cbcSMatt Macy 	ASSERT(curproc != &p0);
1554eda14cbcSMatt Macy 	(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1555eda14cbcSMatt Macy 	    "zpool-%s", spa->spa_name);
1556eda14cbcSMatt Macy 	(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1557eda14cbcSMatt Macy 
1558eda14cbcSMatt Macy 	/* bind this thread to the requested psrset */
1559eda14cbcSMatt Macy 	if (zio_taskq_psrset_bind != PS_NONE) {
1560eda14cbcSMatt Macy 		pool_lock();
1561eda14cbcSMatt Macy 		mutex_enter(&cpu_lock);
1562eda14cbcSMatt Macy 		mutex_enter(&pidlock);
1563eda14cbcSMatt Macy 		mutex_enter(&curproc->p_lock);
1564eda14cbcSMatt Macy 
1565eda14cbcSMatt Macy 		if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1566eda14cbcSMatt Macy 		    0, NULL, NULL) == 0)  {
1567eda14cbcSMatt Macy 			curthread->t_bind_pset = zio_taskq_psrset_bind;
1568eda14cbcSMatt Macy 		} else {
1569eda14cbcSMatt Macy 			cmn_err(CE_WARN,
1570eda14cbcSMatt Macy 			    "Couldn't bind process for zfs pool \"%s\" to "
1571eda14cbcSMatt Macy 			    "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1572eda14cbcSMatt Macy 		}
1573eda14cbcSMatt Macy 
1574eda14cbcSMatt Macy 		mutex_exit(&curproc->p_lock);
1575eda14cbcSMatt Macy 		mutex_exit(&pidlock);
1576eda14cbcSMatt Macy 		mutex_exit(&cpu_lock);
1577eda14cbcSMatt Macy 		pool_unlock();
1578eda14cbcSMatt Macy 	}
1579eda14cbcSMatt Macy 
1580eda14cbcSMatt Macy #ifdef HAVE_SYSDC
1581eda14cbcSMatt Macy 	if (zio_taskq_sysdc) {
1582eda14cbcSMatt Macy 		sysdc_thread_enter(curthread, 100, 0);
1583eda14cbcSMatt Macy 	}
15846c1e79dfSMartin Matuska #endif
1585eda14cbcSMatt Macy 
1586eda14cbcSMatt Macy 	spa->spa_proc = curproc;
1587eda14cbcSMatt Macy 	spa->spa_did = curthread->t_did;
15886c1e79dfSMartin Matuska 
1589eda14cbcSMatt Macy 	spa_create_zio_taskqs(spa);
1590eda14cbcSMatt Macy 
1591eda14cbcSMatt Macy 	mutex_enter(&spa->spa_proc_lock);
1592eda14cbcSMatt Macy 	ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1593eda14cbcSMatt Macy 
1594eda14cbcSMatt Macy 	spa->spa_proc_state = SPA_PROC_ACTIVE;
1595eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_proc_cv);
1596eda14cbcSMatt Macy 
1597eda14cbcSMatt Macy 	CALLB_CPR_SAFE_BEGIN(&cprinfo);
1598eda14cbcSMatt Macy 	while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1599eda14cbcSMatt Macy 		cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1600eda14cbcSMatt Macy 	CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1601eda14cbcSMatt Macy 
1602eda14cbcSMatt Macy 	ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1603eda14cbcSMatt Macy 	spa->spa_proc_state = SPA_PROC_GONE;
1604eda14cbcSMatt Macy 	spa->spa_proc = &p0;
1605eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_proc_cv);
1606eda14cbcSMatt Macy 	CALLB_CPR_EXIT(&cprinfo);	/* drops spa_proc_lock */
1607eda14cbcSMatt Macy 
1608eda14cbcSMatt Macy 	mutex_enter(&curproc->p_lock);
1609eda14cbcSMatt Macy 	lwp_exit();
1610eda14cbcSMatt Macy }
1611eda14cbcSMatt Macy #endif
1612eda14cbcSMatt Macy 
1613eda14cbcSMatt Macy extern metaslab_ops_t *metaslab_allocator(spa_t *spa);
1614eda14cbcSMatt Macy 
1615eda14cbcSMatt Macy /*
1616eda14cbcSMatt Macy  * Activate an uninitialized pool.
16172ad756a6SMartin Matuska  */
16182ad756a6SMartin Matuska static void
spa_activate(spa_t * spa,spa_mode_t mode)1619eda14cbcSMatt Macy spa_activate(spa_t *spa, spa_mode_t mode)
1620eda14cbcSMatt Macy {
1621eda14cbcSMatt Macy 	metaslab_ops_t *msp = metaslab_allocator(spa);
1622eda14cbcSMatt Macy 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1623eda14cbcSMatt Macy 
1624eda14cbcSMatt Macy 	spa->spa_state = POOL_STATE_ACTIVE;
16252ad756a6SMartin Matuska 	spa->spa_mode = mode;
1626eda14cbcSMatt Macy 	spa->spa_read_spacemaps = spa_mode_readable_spacemaps;
1627eda14cbcSMatt Macy 
1628eda14cbcSMatt Macy 	spa->spa_normal_class = metaslab_class_create(spa, msp);
1629eda14cbcSMatt Macy 	spa->spa_log_class = metaslab_class_create(spa, msp);
163081b22a98SMartin Matuska 	spa->spa_embedded_log_class = metaslab_class_create(spa, msp);
1631eda14cbcSMatt Macy 	spa->spa_special_class = metaslab_class_create(spa, msp);
16322ad756a6SMartin Matuska 	spa->spa_dedup_class = metaslab_class_create(spa, msp);
16332ad756a6SMartin Matuska 
16342ad756a6SMartin Matuska 	/* Try to create a covering process */
16352ad756a6SMartin Matuska 	mutex_enter(&spa->spa_proc_lock);
16362ad756a6SMartin Matuska 	ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1637eda14cbcSMatt Macy 	ASSERT(spa->spa_proc == &p0);
1638eda14cbcSMatt Macy 	spa->spa_did = 0;
1639eda14cbcSMatt Macy 
1640eda14cbcSMatt Macy #ifdef HAVE_SPA_THREAD
1641eda14cbcSMatt Macy 	/* Only create a process if we're going to be around a while. */
1642eda14cbcSMatt Macy 	if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1643eda14cbcSMatt Macy 		if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1644eda14cbcSMatt Macy 		    NULL, 0) == 0) {
1645eda14cbcSMatt Macy 			spa->spa_proc_state = SPA_PROC_CREATED;
1646eda14cbcSMatt Macy 			while (spa->spa_proc_state == SPA_PROC_CREATED) {
1647eda14cbcSMatt Macy 				cv_wait(&spa->spa_proc_cv,
1648eda14cbcSMatt Macy 				    &spa->spa_proc_lock);
1649eda14cbcSMatt Macy 			}
1650eda14cbcSMatt Macy 			ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1651eda14cbcSMatt Macy 			ASSERT(spa->spa_proc != &p0);
1652eda14cbcSMatt Macy 			ASSERT(spa->spa_did != 0);
1653eda14cbcSMatt Macy 		} else {
1654eda14cbcSMatt Macy #ifdef _KERNEL
1655eda14cbcSMatt Macy 			cmn_err(CE_WARN,
1656eda14cbcSMatt Macy 			    "Couldn't create process for zfs pool \"%s\"\n",
1657eda14cbcSMatt Macy 			    spa->spa_name);
1658eda14cbcSMatt Macy #endif
1659eda14cbcSMatt Macy 		}
1660eda14cbcSMatt Macy 	}
1661eda14cbcSMatt Macy #endif /* HAVE_SPA_THREAD */
1662eda14cbcSMatt Macy 	mutex_exit(&spa->spa_proc_lock);
1663eda14cbcSMatt Macy 
1664eda14cbcSMatt Macy 	/* If we didn't create a process, we need to create our taskqs. */
1665eda14cbcSMatt Macy 	if (spa->spa_proc == &p0) {
1666eda14cbcSMatt Macy 		spa_create_zio_taskqs(spa);
1667eda14cbcSMatt Macy 	}
1668eda14cbcSMatt Macy 
1669eda14cbcSMatt Macy 	for (size_t i = 0; i < TXG_SIZE; i++) {
1670eda14cbcSMatt Macy 		spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
1671eda14cbcSMatt Macy 		    ZIO_FLAG_CANFAIL);
1672eda14cbcSMatt Macy 	}
1673eda14cbcSMatt Macy 
1674eda14cbcSMatt Macy 	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1675eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_config_dirty_node));
1676eda14cbcSMatt Macy 	list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1677eda14cbcSMatt Macy 	    offsetof(objset_t, os_evicting_node));
1678eda14cbcSMatt Macy 	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1679eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_state_dirty_node));
1680eda14cbcSMatt Macy 
1681eda14cbcSMatt Macy 	txg_list_create(&spa->spa_vdev_txg_list, spa,
1682eda14cbcSMatt Macy 	    offsetof(struct vdev, vdev_txg_node));
1683eda14cbcSMatt Macy 
1684eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_scrub,
1685eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1686eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1687eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_last,
1688eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1689eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1690eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_healed,
1691eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1692eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1693eda14cbcSMatt Macy 
1694271171e0SMartin Matuska 	spa_activate_os(spa);
1695271171e0SMartin Matuska 
1696271171e0SMartin Matuska 	spa_keystore_init(&spa->spa_keystore);
1697eda14cbcSMatt Macy 
1698c03c5b1cSMartin Matuska 	/*
1699c03c5b1cSMartin Matuska 	 * This taskq is used to perform zvol-minor-related tasks
1700eda14cbcSMatt Macy 	 * asynchronously. This has several advantages, including easy
1701eda14cbcSMatt Macy 	 * resolution of various deadlocks.
1702eda14cbcSMatt Macy 	 *
1703eda14cbcSMatt Macy 	 * The taskq must be single threaded to ensure tasks are always
1704eda14cbcSMatt Macy 	 * processed in the order in which they were dispatched.
1705180f8225SMatt Macy 	 *
1706eda14cbcSMatt Macy 	 * A taskq per pool allows one to keep the pools independent.
1707eda14cbcSMatt Macy 	 * This way if one pool is suspended, it will not impact another.
1708eda14cbcSMatt Macy 	 *
1709eda14cbcSMatt Macy 	 * The preferred location to dispatch a zvol minor task is a sync
1710eda14cbcSMatt Macy 	 * task. In this context, there is easy access to the spa_t and minimal
1711eda14cbcSMatt Macy 	 * error handling is required because the sync task must succeed.
1712eda14cbcSMatt Macy 	 */
1713eda14cbcSMatt Macy 	spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1714eda14cbcSMatt Macy 	    1, INT_MAX, 0);
1715eda14cbcSMatt Macy 
1716eda14cbcSMatt Macy 	/*
1717eda14cbcSMatt Macy 	 * The taskq to preload metaslabs.
1718eda14cbcSMatt Macy 	 */
1719eda14cbcSMatt Macy 	spa->spa_metaslab_taskq = taskq_create("z_metaslab",
1720eda14cbcSMatt Macy 	    metaslab_preload_pct, maxclsyspri, 1, INT_MAX,
1721b2526e8bSMartin Matuska 	    TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1722b2526e8bSMartin Matuska 
1723b2526e8bSMartin Matuska 	/*
1724b2526e8bSMartin Matuska 	 * Taskq dedicated to prefetcher threads: this is used to prevent the
1725b2526e8bSMartin Matuska 	 * pool traverse code from monopolizing the global (and limited)
1726b2526e8bSMartin Matuska 	 * system_taskq by inappropriately scheduling long running tasks on it.
1727b2526e8bSMartin Matuska 	 */
1728eda14cbcSMatt Macy 	spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
1729eda14cbcSMatt Macy 	    defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1730eda14cbcSMatt Macy 
1731eda14cbcSMatt Macy 	/*
17327877fdebSMatt Macy 	 * The taskq to upgrade datasets in this pool. Currently used by
17337877fdebSMatt Macy 	 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
1734eda14cbcSMatt Macy 	 */
1735eda14cbcSMatt Macy 	spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
1736eda14cbcSMatt Macy 	    defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1737eda14cbcSMatt Macy }
1738eda14cbcSMatt Macy 
17397877fdebSMatt Macy /*
17407877fdebSMatt Macy  * Opposite of spa_activate().
1741eda14cbcSMatt Macy  */
1742eda14cbcSMatt Macy static void
spa_deactivate(spa_t * spa)1743eda14cbcSMatt Macy spa_deactivate(spa_t *spa)
1744eda14cbcSMatt Macy {
1745eda14cbcSMatt Macy 	ASSERT(spa->spa_sync_on == B_FALSE);
1746eda14cbcSMatt Macy 	ASSERT(spa->spa_dsl_pool == NULL);
1747eda14cbcSMatt Macy 	ASSERT(spa->spa_root_vdev == NULL);
1748eda14cbcSMatt Macy 	ASSERT(spa->spa_async_zio_root == NULL);
1749eda14cbcSMatt Macy 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1750eda14cbcSMatt Macy 
1751eda14cbcSMatt Macy 	spa_evicting_os_wait(spa);
1752eda14cbcSMatt Macy 
1753eda14cbcSMatt Macy 	if (spa->spa_zvol_taskq) {
1754eda14cbcSMatt Macy 		taskq_destroy(spa->spa_zvol_taskq);
1755eda14cbcSMatt Macy 		spa->spa_zvol_taskq = NULL;
1756eda14cbcSMatt Macy 	}
1757eda14cbcSMatt Macy 
1758eda14cbcSMatt Macy 	if (spa->spa_metaslab_taskq) {
1759eda14cbcSMatt Macy 		taskq_destroy(spa->spa_metaslab_taskq);
1760eda14cbcSMatt Macy 		spa->spa_metaslab_taskq = NULL;
1761eda14cbcSMatt Macy 	}
1762b2526e8bSMartin Matuska 
1763b2526e8bSMartin Matuska 	if (spa->spa_prefetch_taskq) {
1764b2526e8bSMartin Matuska 		taskq_destroy(spa->spa_prefetch_taskq);
1765b2526e8bSMartin Matuska 		spa->spa_prefetch_taskq = NULL;
1766b2526e8bSMartin Matuska 	}
1767eda14cbcSMatt Macy 
1768eda14cbcSMatt Macy 	if (spa->spa_upgrade_taskq) {
1769eda14cbcSMatt Macy 		taskq_destroy(spa->spa_upgrade_taskq);
1770eda14cbcSMatt Macy 		spa->spa_upgrade_taskq = NULL;
1771eda14cbcSMatt Macy 	}
1772eda14cbcSMatt Macy 
1773eda14cbcSMatt Macy 	txg_list_destroy(&spa->spa_vdev_txg_list);
1774eda14cbcSMatt Macy 
1775eda14cbcSMatt Macy 	list_destroy(&spa->spa_config_dirty_list);
1776eda14cbcSMatt Macy 	list_destroy(&spa->spa_evicting_os_list);
1777eda14cbcSMatt Macy 	list_destroy(&spa->spa_state_dirty_list);
1778eda14cbcSMatt Macy 
1779eda14cbcSMatt Macy 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
1780eda14cbcSMatt Macy 
1781eda14cbcSMatt Macy 	for (int t = 0; t < ZIO_TYPES; t++) {
1782eda14cbcSMatt Macy 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1783eda14cbcSMatt Macy 			spa_taskqs_fini(spa, t, q);
1784eda14cbcSMatt Macy 		}
1785eda14cbcSMatt Macy 	}
1786eda14cbcSMatt Macy 
1787eda14cbcSMatt Macy 	for (size_t i = 0; i < TXG_SIZE; i++) {
1788eda14cbcSMatt Macy 		ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
1789eda14cbcSMatt Macy 		VERIFY0(zio_wait(spa->spa_txg_zio[i]));
1790eda14cbcSMatt Macy 		spa->spa_txg_zio[i] = NULL;
1791eda14cbcSMatt Macy 	}
1792eda14cbcSMatt Macy 
1793eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_normal_class);
1794eda14cbcSMatt Macy 	spa->spa_normal_class = NULL;
1795eda14cbcSMatt Macy 
1796eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_log_class);
1797eda14cbcSMatt Macy 	spa->spa_log_class = NULL;
1798eda14cbcSMatt Macy 
1799eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_embedded_log_class);
1800eda14cbcSMatt Macy 	spa->spa_embedded_log_class = NULL;
1801eda14cbcSMatt Macy 
1802eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_special_class);
1803184c1b94SMartin Matuska 	spa->spa_special_class = NULL;
1804184c1b94SMartin Matuska 
1805184c1b94SMartin Matuska 	metaslab_class_destroy(spa->spa_dedup_class);
1806eda14cbcSMatt Macy 	spa->spa_dedup_class = NULL;
1807eda14cbcSMatt Macy 
1808eda14cbcSMatt Macy 	/*
1809eda14cbcSMatt Macy 	 * If this was part of an import or the open otherwise failed, we may
1810eda14cbcSMatt Macy 	 * still have errors left in the queues.  Empty them just in case.
1811eda14cbcSMatt Macy 	 */
1812eda14cbcSMatt Macy 	spa_errlog_drain(spa);
1813eda14cbcSMatt Macy 	avl_destroy(&spa->spa_errlist_scrub);
1814eda14cbcSMatt Macy 	avl_destroy(&spa->spa_errlist_last);
1815eda14cbcSMatt Macy 	avl_destroy(&spa->spa_errlist_healed);
1816eda14cbcSMatt Macy 
1817eda14cbcSMatt Macy 	spa_keystore_fini(&spa->spa_keystore);
1818eda14cbcSMatt Macy 
1819271171e0SMartin Matuska 	spa->spa_state = POOL_STATE_UNINITIALIZED;
1820eda14cbcSMatt Macy 
1821eda14cbcSMatt Macy 	mutex_enter(&spa->spa_proc_lock);
1822eda14cbcSMatt Macy 	if (spa->spa_proc_state != SPA_PROC_NONE) {
1823eda14cbcSMatt Macy 		ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1824eda14cbcSMatt Macy 		spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1825eda14cbcSMatt Macy 		cv_broadcast(&spa->spa_proc_cv);
1826eda14cbcSMatt Macy 		while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1827eda14cbcSMatt Macy 			ASSERT(spa->spa_proc != &p0);
1828eda14cbcSMatt Macy 			cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1829eda14cbcSMatt Macy 		}
1830eda14cbcSMatt Macy 		ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1831eda14cbcSMatt Macy 		spa->spa_proc_state = SPA_PROC_NONE;
1832eda14cbcSMatt Macy 	}
1833eda14cbcSMatt Macy 	ASSERT(spa->spa_proc == &p0);
1834eda14cbcSMatt Macy 	mutex_exit(&spa->spa_proc_lock);
1835eda14cbcSMatt Macy 
1836eda14cbcSMatt Macy 	/*
1837eda14cbcSMatt Macy 	 * We want to make sure spa_thread() has actually exited the ZFS
1838eda14cbcSMatt Macy 	 * module, so that the module can't be unloaded out from underneath
1839eda14cbcSMatt Macy 	 * it.
1840eda14cbcSMatt Macy 	 */
1841eda14cbcSMatt Macy 	if (spa->spa_did != 0) {
1842eda14cbcSMatt Macy 		thread_join(spa->spa_did);
1843eda14cbcSMatt Macy 		spa->spa_did = 0;
1844eda14cbcSMatt Macy 	}
1845eda14cbcSMatt Macy 
1846eda14cbcSMatt Macy 	spa_deactivate_os(spa);
1847eda14cbcSMatt Macy 
1848eda14cbcSMatt Macy }
1849c03c5b1cSMartin Matuska 
1850c03c5b1cSMartin Matuska /*
1851c03c5b1cSMartin Matuska  * Verify a pool configuration, and construct the vdev tree appropriately.  This
1852eda14cbcSMatt Macy  * will create all the necessary vdevs in the appropriate layout, with each vdev
1853eda14cbcSMatt Macy  * in the CLOSED state.  This will prep the pool before open/creation/import.
1854eda14cbcSMatt Macy  * All vdev validation is done by the vdev_alloc() routine.
1855eda14cbcSMatt Macy  */
1856eda14cbcSMatt Macy int
spa_config_parse(spa_t * spa,vdev_t ** vdp,nvlist_t * nv,vdev_t * parent,uint_t id,int atype)1857eda14cbcSMatt Macy spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1858eda14cbcSMatt Macy     uint_t id, int atype)
1859eda14cbcSMatt Macy {
1860eda14cbcSMatt Macy 	nvlist_t **child;
1861eda14cbcSMatt Macy 	uint_t children;
1862eda14cbcSMatt Macy 	int error;
1863eda14cbcSMatt Macy 
1864eda14cbcSMatt Macy 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1865eda14cbcSMatt Macy 		return (error);
1866eda14cbcSMatt Macy 
1867eda14cbcSMatt Macy 	if ((*vdp)->vdev_ops->vdev_op_leaf)
1868eda14cbcSMatt Macy 		return (0);
1869eda14cbcSMatt Macy 
1870eda14cbcSMatt Macy 	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1871eda14cbcSMatt Macy 	    &child, &children);
1872eda14cbcSMatt Macy 
1873eda14cbcSMatt Macy 	if (error == ENOENT)
1874eda14cbcSMatt Macy 		return (0);
1875eda14cbcSMatt Macy 
1876eda14cbcSMatt Macy 	if (error) {
1877eda14cbcSMatt Macy 		vdev_free(*vdp);
1878eda14cbcSMatt Macy 		*vdp = NULL;
1879eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
1880eda14cbcSMatt Macy 	}
1881eda14cbcSMatt Macy 
1882eda14cbcSMatt Macy 	for (int c = 0; c < children; c++) {
1883eda14cbcSMatt Macy 		vdev_t *vd;
1884eda14cbcSMatt Macy 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1885eda14cbcSMatt Macy 		    atype)) != 0) {
1886eda14cbcSMatt Macy 			vdev_free(*vdp);
1887eda14cbcSMatt Macy 			*vdp = NULL;
1888eda14cbcSMatt Macy 			return (error);
1889eda14cbcSMatt Macy 		}
1890eda14cbcSMatt Macy 	}
1891eda14cbcSMatt Macy 
1892eda14cbcSMatt Macy 	ASSERT(*vdp != NULL);
1893eda14cbcSMatt Macy 
1894eda14cbcSMatt Macy 	return (0);
1895eda14cbcSMatt Macy }
1896eda14cbcSMatt Macy 
1897eda14cbcSMatt Macy static boolean_t
spa_should_flush_logs_on_unload(spa_t * spa)1898eda14cbcSMatt Macy spa_should_flush_logs_on_unload(spa_t *spa)
1899eda14cbcSMatt Macy {
1900eda14cbcSMatt Macy 	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
1901eda14cbcSMatt Macy 		return (B_FALSE);
1902eda14cbcSMatt Macy 
1903eda14cbcSMatt Macy 	if (!spa_writeable(spa))
1904eda14cbcSMatt Macy 		return (B_FALSE);
1905eda14cbcSMatt Macy 
1906eda14cbcSMatt Macy 	if (!spa->spa_sync_on)
1907eda14cbcSMatt Macy 		return (B_FALSE);
1908eda14cbcSMatt Macy 
1909eda14cbcSMatt Macy 	if (spa_state(spa) != POOL_STATE_EXPORTED)
1910eda14cbcSMatt Macy 		return (B_FALSE);
1911eda14cbcSMatt Macy 
1912eda14cbcSMatt Macy 	if (zfs_keep_log_spacemaps_at_export)
1913eda14cbcSMatt Macy 		return (B_FALSE);
1914eda14cbcSMatt Macy 
1915eda14cbcSMatt Macy 	return (B_TRUE);
1916eda14cbcSMatt Macy }
1917eda14cbcSMatt Macy 
1918eda14cbcSMatt Macy /*
1919eda14cbcSMatt Macy  * Opens a transaction that will set the flag that will instruct
1920eda14cbcSMatt Macy  * spa_sync to attempt to flush all the metaslabs for that txg.
1921eda14cbcSMatt Macy  */
1922eda14cbcSMatt Macy static void
spa_unload_log_sm_flush_all(spa_t * spa)1923eda14cbcSMatt Macy spa_unload_log_sm_flush_all(spa_t *spa)
1924eda14cbcSMatt Macy {
1925eda14cbcSMatt Macy 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
1926eda14cbcSMatt Macy 	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
1927eda14cbcSMatt Macy 
1928eda14cbcSMatt Macy 	ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
1929eda14cbcSMatt Macy 	spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
1930eda14cbcSMatt Macy 
1931eda14cbcSMatt Macy 	dmu_tx_commit(tx);
1932eda14cbcSMatt Macy 	txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
1933eda14cbcSMatt Macy }
1934eda14cbcSMatt Macy 
1935eda14cbcSMatt Macy static void
spa_unload_log_sm_metadata(spa_t * spa)1936eda14cbcSMatt Macy spa_unload_log_sm_metadata(spa_t *spa)
1937eda14cbcSMatt Macy {
1938eda14cbcSMatt Macy 	void *cookie = NULL;
1939eda14cbcSMatt Macy 	spa_log_sm_t *sls;
1940eda14cbcSMatt Macy 	log_summary_entry_t *e;
1941eda14cbcSMatt Macy 
1942eda14cbcSMatt Macy 	while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
1943eda14cbcSMatt Macy 	    &cookie)) != NULL) {
19444e8d558cSMartin Matuska 		VERIFY0(sls->sls_mscount);
19454e8d558cSMartin Matuska 		kmem_free(sls, sizeof (spa_log_sm_t));
1946eda14cbcSMatt Macy 	}
1947eda14cbcSMatt Macy 
1948eda14cbcSMatt Macy 	while ((e = list_remove_head(&spa->spa_log_summary)) != NULL) {
1949eda14cbcSMatt Macy 		VERIFY0(e->lse_mscount);
1950eda14cbcSMatt Macy 		kmem_free(e, sizeof (log_summary_entry_t));
1951eda14cbcSMatt Macy 	}
19524e8d558cSMartin Matuska 
1953eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_nblocks = 0;
1954eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_memused = 0;
1955eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_blocklimit = 0;
1956eda14cbcSMatt Macy }
1957eda14cbcSMatt Macy 
1958eda14cbcSMatt Macy static void
spa_destroy_aux_threads(spa_t * spa)1959eda14cbcSMatt Macy spa_destroy_aux_threads(spa_t *spa)
1960eda14cbcSMatt Macy {
1961eda14cbcSMatt Macy 	if (spa->spa_condense_zthr != NULL) {
1962eda14cbcSMatt Macy 		zthr_destroy(spa->spa_condense_zthr);
1963eda14cbcSMatt Macy 		spa->spa_condense_zthr = NULL;
1964eda14cbcSMatt Macy 	}
1965eda14cbcSMatt Macy 	if (spa->spa_checkpoint_discard_zthr != NULL) {
1966eda14cbcSMatt Macy 		zthr_destroy(spa->spa_checkpoint_discard_zthr);
1967eda14cbcSMatt Macy 		spa->spa_checkpoint_discard_zthr = NULL;
1968eda14cbcSMatt Macy 	}
1969eda14cbcSMatt Macy 	if (spa->spa_livelist_delete_zthr != NULL) {
1970eda14cbcSMatt Macy 		zthr_destroy(spa->spa_livelist_delete_zthr);
1971eda14cbcSMatt Macy 		spa->spa_livelist_delete_zthr = NULL;
1972eda14cbcSMatt Macy 	}
1973eda14cbcSMatt Macy 	if (spa->spa_livelist_condense_zthr != NULL) {
1974eda14cbcSMatt Macy 		zthr_destroy(spa->spa_livelist_condense_zthr);
1975eda14cbcSMatt Macy 		spa->spa_livelist_condense_zthr = NULL;
1976eda14cbcSMatt Macy 	}
1977eda14cbcSMatt Macy 	if (spa->spa_raidz_expand_zthr != NULL) {
1978eda14cbcSMatt Macy 		zthr_destroy(spa->spa_raidz_expand_zthr);
1979eda14cbcSMatt Macy 		spa->spa_raidz_expand_zthr = NULL;
1980eda14cbcSMatt Macy 	}
1981e716630dSMartin Matuska }
1982e716630dSMartin Matuska 
1983e716630dSMartin Matuska /*
1984e716630dSMartin Matuska  * Opposite of spa_load().
1985eda14cbcSMatt Macy  */
1986eda14cbcSMatt Macy static void
spa_unload(spa_t * spa)1987eda14cbcSMatt Macy spa_unload(spa_t *spa)
1988eda14cbcSMatt Macy {
1989eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1990eda14cbcSMatt Macy 	ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
1991eda14cbcSMatt Macy 
1992eda14cbcSMatt Macy 	spa_import_progress_remove(spa_guid(spa));
1993eda14cbcSMatt Macy 	spa_load_note(spa, "UNLOADING");
1994eda14cbcSMatt Macy 
1995eda14cbcSMatt Macy 	spa_wake_waiters(spa);
1996eda14cbcSMatt Macy 
1997eda14cbcSMatt Macy 	/*
1998eda14cbcSMatt Macy 	 * If we have set the spa_final_txg, we have already performed the
1999eda14cbcSMatt Macy 	 * tasks below in spa_export_common(). We should not redo it here since
2000eda14cbcSMatt Macy 	 * we delay the final TXGs beyond what spa_final_txg is set at.
2001eda14cbcSMatt Macy 	 */
2002c03c5b1cSMartin Matuska 	if (spa->spa_final_txg == UINT64_MAX) {
2003c03c5b1cSMartin Matuska 		/*
2004c03c5b1cSMartin Matuska 		 * If the log space map feature is enabled and the pool is
2005c03c5b1cSMartin Matuska 		 * getting exported (but not destroyed), we want to spend some
2006c03c5b1cSMartin Matuska 		 * time flushing as many metaslabs as we can in an attempt to
2007c03c5b1cSMartin Matuska 		 * destroy log space maps and save import time.
2008c03c5b1cSMartin Matuska 		 */
2009c03c5b1cSMartin Matuska 		if (spa_should_flush_logs_on_unload(spa))
2010c03c5b1cSMartin Matuska 			spa_unload_log_sm_flush_all(spa);
2011c03c5b1cSMartin Matuska 
2012eda14cbcSMatt Macy 		/*
2013eda14cbcSMatt Macy 		 * Stop async tasks.
2014eda14cbcSMatt Macy 		 */
2015eda14cbcSMatt Macy 		spa_async_suspend(spa);
2016eda14cbcSMatt Macy 
2017eda14cbcSMatt Macy 		if (spa->spa_root_vdev) {
2018eda14cbcSMatt Macy 			vdev_t *root_vdev = spa->spa_root_vdev;
2019eda14cbcSMatt Macy 			vdev_initialize_stop_all(root_vdev,
2020eda14cbcSMatt Macy 			    VDEV_INITIALIZE_ACTIVE);
2021eda14cbcSMatt Macy 			vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
2022eda14cbcSMatt Macy 			vdev_autotrim_stop_all(spa);
2023c03c5b1cSMartin Matuska 			vdev_rebuild_stop_all(spa);
2024c03c5b1cSMartin Matuska 		}
2025eda14cbcSMatt Macy 	}
2026eda14cbcSMatt Macy 
2027eda14cbcSMatt Macy 	/*
2028eda14cbcSMatt Macy 	 * Stop syncing.
2029c03c5b1cSMartin Matuska 	 */
2030eda14cbcSMatt Macy 	if (spa->spa_sync_on) {
2031eda14cbcSMatt Macy 		txg_sync_stop(spa->spa_dsl_pool);
2032eda14cbcSMatt Macy 		spa->spa_sync_on = B_FALSE;
2033eda14cbcSMatt Macy 	}
2034eda14cbcSMatt Macy 
2035eda14cbcSMatt Macy 	/*
2036eda14cbcSMatt Macy 	 * This ensures that there is no async metaslab prefetching
2037eda14cbcSMatt Macy 	 * while we attempt to unload the spa.
2038eda14cbcSMatt Macy 	 */
2039eda14cbcSMatt Macy 	taskq_wait(spa->spa_metaslab_taskq);
2040eda14cbcSMatt Macy 
2041eda14cbcSMatt Macy 	if (spa->spa_mmp.mmp_thread)
2042eda14cbcSMatt Macy 		mmp_thread_stop(spa);
2043b2526e8bSMartin Matuska 
2044eda14cbcSMatt Macy 	/*
2045eda14cbcSMatt Macy 	 * Wait for any outstanding async I/O to complete.
2046eda14cbcSMatt Macy 	 */
2047eda14cbcSMatt Macy 	if (spa->spa_async_zio_root != NULL) {
2048eda14cbcSMatt Macy 		for (int i = 0; i < max_ncpus; i++)
2049eda14cbcSMatt Macy 			(void) zio_wait(spa->spa_async_zio_root[i]);
2050eda14cbcSMatt Macy 		kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
2051eda14cbcSMatt Macy 		spa->spa_async_zio_root = NULL;
2052eda14cbcSMatt Macy 	}
2053eda14cbcSMatt Macy 
2054eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL) {
2055eda14cbcSMatt Macy 		spa_vdev_removal_destroy(spa->spa_vdev_removal);
2056eda14cbcSMatt Macy 		spa->spa_vdev_removal = NULL;
2057eda14cbcSMatt Macy 	}
2058eda14cbcSMatt Macy 
2059eda14cbcSMatt Macy 	spa_destroy_aux_threads(spa);
2060eda14cbcSMatt Macy 
2061eda14cbcSMatt Macy 	spa_condense_fini(spa);
2062eda14cbcSMatt Macy 
2063eda14cbcSMatt Macy 	bpobj_close(&spa->spa_deferred_bpobj);
2064eda14cbcSMatt Macy 
2065eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
2066eda14cbcSMatt Macy 
2067eda14cbcSMatt Macy 	/*
2068eda14cbcSMatt Macy 	 * Close all vdevs.
2069eda14cbcSMatt Macy 	 */
2070eda14cbcSMatt Macy 	if (spa->spa_root_vdev)
2071eda14cbcSMatt Macy 		vdev_free(spa->spa_root_vdev);
2072eda14cbcSMatt Macy 	ASSERT(spa->spa_root_vdev == NULL);
2073eda14cbcSMatt Macy 
2074eda14cbcSMatt Macy 	/*
2075eda14cbcSMatt Macy 	 * Close the dsl pool.
2076eda14cbcSMatt Macy 	 */
2077eda14cbcSMatt Macy 	if (spa->spa_dsl_pool) {
2078eda14cbcSMatt Macy 		dsl_pool_close(spa->spa_dsl_pool);
2079eda14cbcSMatt Macy 		spa->spa_dsl_pool = NULL;
2080eda14cbcSMatt Macy 		spa->spa_meta_objset = NULL;
2081eda14cbcSMatt Macy 	}
2082eda14cbcSMatt Macy 
2083eda14cbcSMatt Macy 	ddt_unload(spa);
2084eda14cbcSMatt Macy 	brt_unload(spa);
2085eda14cbcSMatt Macy 	spa_unload_log_sm_metadata(spa);
2086eda14cbcSMatt Macy 
2087eda14cbcSMatt Macy 	/*
20882a58b312SMartin Matuska 	 * Drop and purge level 2 cache
2089eda14cbcSMatt Macy 	 */
2090eda14cbcSMatt Macy 	spa_l2cache_drop(spa);
2091eda14cbcSMatt Macy 
2092eda14cbcSMatt Macy 	if (spa->spa_spares.sav_vdevs) {
2093eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
2094eda14cbcSMatt Macy 			vdev_free(spa->spa_spares.sav_vdevs[i]);
2095eda14cbcSMatt Macy 		kmem_free(spa->spa_spares.sav_vdevs,
2096c9539b89SMartin Matuska 		    spa->spa_spares.sav_count * sizeof (void *));
2097eda14cbcSMatt Macy 		spa->spa_spares.sav_vdevs = NULL;
2098eda14cbcSMatt Macy 	}
2099eda14cbcSMatt Macy 	if (spa->spa_spares.sav_config) {
2100eda14cbcSMatt Macy 		nvlist_free(spa->spa_spares.sav_config);
2101eda14cbcSMatt Macy 		spa->spa_spares.sav_config = NULL;
2102eda14cbcSMatt Macy 	}
2103eda14cbcSMatt Macy 	spa->spa_spares.sav_count = 0;
2104eda14cbcSMatt Macy 
2105eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_vdevs) {
2106eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
2107eda14cbcSMatt Macy 			vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
2108eda14cbcSMatt Macy 			vdev_free(spa->spa_l2cache.sav_vdevs[i]);
2109c9539b89SMartin Matuska 		}
2110eda14cbcSMatt Macy 		kmem_free(spa->spa_l2cache.sav_vdevs,
2111eda14cbcSMatt Macy 		    spa->spa_l2cache.sav_count * sizeof (void *));
2112eda14cbcSMatt Macy 		spa->spa_l2cache.sav_vdevs = NULL;
2113eda14cbcSMatt Macy 	}
2114eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_config) {
2115eda14cbcSMatt Macy 		nvlist_free(spa->spa_l2cache.sav_config);
2116eda14cbcSMatt Macy 		spa->spa_l2cache.sav_config = NULL;
2117eda14cbcSMatt Macy 	}
2118eda14cbcSMatt Macy 	spa->spa_l2cache.sav_count = 0;
2119eda14cbcSMatt Macy 
2120eda14cbcSMatt Macy 	spa->spa_async_suspended = 0;
2121eda14cbcSMatt Macy 
2122eda14cbcSMatt Macy 	spa->spa_indirect_vdevs_loaded = B_FALSE;
2123eda14cbcSMatt Macy 
2124eda14cbcSMatt Macy 	if (spa->spa_comment != NULL) {
2125eda14cbcSMatt Macy 		spa_strfree(spa->spa_comment);
2126eda14cbcSMatt Macy 		spa->spa_comment = NULL;
2127eda14cbcSMatt Macy 	}
2128eda14cbcSMatt Macy 	if (spa->spa_compatibility != NULL) {
2129eda14cbcSMatt Macy 		spa_strfree(spa->spa_compatibility);
2130eda14cbcSMatt Macy 		spa->spa_compatibility = NULL;
2131eda14cbcSMatt Macy 	}
2132ee36e25aSMartin Matuska 
2133ee36e25aSMartin Matuska 	spa->spa_raidz_expand = NULL;
2134ee36e25aSMartin Matuska 
2135ee36e25aSMartin Matuska 	spa_config_exit(spa, SCL_ALL, spa);
2136eda14cbcSMatt Macy }
2137e716630dSMartin Matuska 
2138e716630dSMartin Matuska /*
2139eda14cbcSMatt Macy  * Load (or re-load) the current list of vdevs describing the active spares for
2140eda14cbcSMatt Macy  * this pool.  When this is called, we have some form of basic information in
2141eda14cbcSMatt Macy  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
2142eda14cbcSMatt Macy  * then re-generate a more complete list including status information.
2143eda14cbcSMatt Macy  */
2144eda14cbcSMatt Macy void
spa_load_spares(spa_t * spa)2145eda14cbcSMatt Macy spa_load_spares(spa_t *spa)
2146eda14cbcSMatt Macy {
2147eda14cbcSMatt Macy 	nvlist_t **spares;
2148eda14cbcSMatt Macy 	uint_t nspares;
2149eda14cbcSMatt Macy 	int i;
2150eda14cbcSMatt Macy 	vdev_t *vd, *tvd;
2151eda14cbcSMatt Macy 
2152eda14cbcSMatt Macy #ifndef _KERNEL
2153eda14cbcSMatt Macy 	/*
2154eda14cbcSMatt Macy 	 * zdb opens both the current state of the pool and the
2155eda14cbcSMatt Macy 	 * checkpointed state (if present), with a different spa_t.
2156eda14cbcSMatt Macy 	 *
2157eda14cbcSMatt Macy 	 * As spare vdevs are shared among open pools, we skip loading
2158eda14cbcSMatt Macy 	 * them when we load the checkpointed state of the pool.
2159eda14cbcSMatt Macy 	 */
2160eda14cbcSMatt Macy 	if (!spa_writeable(spa))
2161eda14cbcSMatt Macy 		return;
2162eda14cbcSMatt Macy #endif
2163eda14cbcSMatt Macy 
2164eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2165eda14cbcSMatt Macy 
2166eda14cbcSMatt Macy 	/*
2167eda14cbcSMatt Macy 	 * First, close and free any existing spare vdevs.
2168eda14cbcSMatt Macy 	 */
2169eda14cbcSMatt Macy 	if (spa->spa_spares.sav_vdevs) {
2170eda14cbcSMatt Macy 		for (i = 0; i < spa->spa_spares.sav_count; i++) {
2171eda14cbcSMatt Macy 			vd = spa->spa_spares.sav_vdevs[i];
2172eda14cbcSMatt Macy 
2173c9539b89SMartin Matuska 			/* Undo the call to spa_activate() below */
2174eda14cbcSMatt Macy 			if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
2175eda14cbcSMatt Macy 			    B_FALSE)) != NULL && tvd->vdev_isspare)
2176eda14cbcSMatt Macy 				spa_spare_remove(tvd);
2177eda14cbcSMatt Macy 			vdev_close(vd);
2178eda14cbcSMatt Macy 			vdev_free(vd);
2179eda14cbcSMatt Macy 		}
2180eda14cbcSMatt Macy 
2181eda14cbcSMatt Macy 		kmem_free(spa->spa_spares.sav_vdevs,
2182eda14cbcSMatt Macy 		    spa->spa_spares.sav_count * sizeof (void *));
2183eda14cbcSMatt Macy 	}
2184eda14cbcSMatt Macy 
2185eda14cbcSMatt Macy 	if (spa->spa_spares.sav_config == NULL)
2186eda14cbcSMatt Macy 		nspares = 0;
2187c9539b89SMartin Matuska 	else
2188eda14cbcSMatt Macy 		VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2189eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, &spares, &nspares));
2190eda14cbcSMatt Macy 
2191eda14cbcSMatt Macy 	spa->spa_spares.sav_count = (int)nspares;
219281b22a98SMartin Matuska 	spa->spa_spares.sav_vdevs = NULL;
219381b22a98SMartin Matuska 
2194eda14cbcSMatt Macy 	if (nspares == 0)
2195eda14cbcSMatt Macy 		return;
2196eda14cbcSMatt Macy 
2197eda14cbcSMatt Macy 	/*
2198eda14cbcSMatt Macy 	 * Construct the array of vdevs, opening them to get status in the
2199eda14cbcSMatt Macy 	 * process.   For each spare, there is potentially two different vdev_t
2200eda14cbcSMatt Macy 	 * structures associated with it: one in the list of spares (used only
2201eda14cbcSMatt Macy 	 * for basic validation purposes) and one in the active vdev
2202eda14cbcSMatt Macy 	 * configuration (if it's spared in).  During this phase we open and
2203eda14cbcSMatt Macy 	 * validate each vdev on the spare list.  If the vdev also exists in the
2204eda14cbcSMatt Macy 	 * active configuration, then we also mark this vdev as an active spare.
2205eda14cbcSMatt Macy 	 */
2206eda14cbcSMatt Macy 	spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
2207eda14cbcSMatt Macy 	    KM_SLEEP);
2208eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
2209eda14cbcSMatt Macy 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
2210eda14cbcSMatt Macy 		    VDEV_ALLOC_SPARE) == 0);
2211eda14cbcSMatt Macy 		ASSERT(vd != NULL);
2212eda14cbcSMatt Macy 
2213eda14cbcSMatt Macy 		spa->spa_spares.sav_vdevs[i] = vd;
2214eda14cbcSMatt Macy 
2215eda14cbcSMatt Macy 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
2216eda14cbcSMatt Macy 		    B_FALSE)) != NULL) {
2217eda14cbcSMatt Macy 			if (!tvd->vdev_isspare)
2218eda14cbcSMatt Macy 				spa_spare_add(tvd);
2219eda14cbcSMatt Macy 
2220eda14cbcSMatt Macy 			/*
2221eda14cbcSMatt Macy 			 * We only mark the spare active if we were successfully
2222eda14cbcSMatt Macy 			 * able to load the vdev.  Otherwise, importing a pool
2223eda14cbcSMatt Macy 			 * with a bad active spare would result in strange
2224eda14cbcSMatt Macy 			 * behavior, because multiple pool would think the spare
2225eda14cbcSMatt Macy 			 * is actively in use.
2226eda14cbcSMatt Macy 			 *
2227eda14cbcSMatt Macy 			 * There is a vulnerability here to an equally bizarre
2228eda14cbcSMatt Macy 			 * circumstance, where a dead active spare is later
2229eda14cbcSMatt Macy 			 * brought back to life (onlined or otherwise).  Given
2230eda14cbcSMatt Macy 			 * the rarity of this scenario, and the extra complexity
2231eda14cbcSMatt Macy 			 * it adds, we ignore the possibility.
2232eda14cbcSMatt Macy 			 */
2233eda14cbcSMatt Macy 			if (!vdev_is_dead(tvd))
2234eda14cbcSMatt Macy 				spa_spare_activate(tvd);
2235eda14cbcSMatt Macy 		}
2236eda14cbcSMatt Macy 
2237eda14cbcSMatt Macy 		vd->vdev_top = vd;
2238eda14cbcSMatt Macy 		vd->vdev_aux = &spa->spa_spares;
2239eda14cbcSMatt Macy 
2240eda14cbcSMatt Macy 		if (vdev_open(vd) != 0)
2241eda14cbcSMatt Macy 			continue;
2242eda14cbcSMatt Macy 
2243eda14cbcSMatt Macy 		if (vdev_validate_aux(vd) == 0)
2244eda14cbcSMatt Macy 			spa_spare_add(vd);
2245eda14cbcSMatt Macy 	}
2246eda14cbcSMatt Macy 
2247eda14cbcSMatt Macy 	/*
2248eda14cbcSMatt Macy 	 * Recompute the stashed list of spares, with status information
2249eda14cbcSMatt Macy 	 * this time.
2250eda14cbcSMatt Macy 	 */
2251eda14cbcSMatt Macy 	fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES);
2252eda14cbcSMatt Macy 
2253eda14cbcSMatt Macy 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
2254eda14cbcSMatt Macy 	    KM_SLEEP);
225581b22a98SMartin Matuska 	for (i = 0; i < spa->spa_spares.sav_count; i++)
2256eda14cbcSMatt Macy 		spares[i] = vdev_config_generate(spa,
2257eda14cbcSMatt Macy 		    spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
2258eda14cbcSMatt Macy 	fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
2259eda14cbcSMatt Macy 	    ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
2260eda14cbcSMatt Macy 	    spa->spa_spares.sav_count);
2261eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_spares.sav_count; i++)
226281b22a98SMartin Matuska 		nvlist_free(spares[i]);
2263681ce946SMartin Matuska 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
2264681ce946SMartin Matuska }
2265eda14cbcSMatt Macy 
2266eda14cbcSMatt Macy /*
2267eda14cbcSMatt Macy  * Load (or re-load) the current list of vdevs describing the active l2cache for
2268eda14cbcSMatt Macy  * this pool.  When this is called, we have some form of basic information in
2269eda14cbcSMatt Macy  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
2270eda14cbcSMatt Macy  * then re-generate a more complete list including status information.
2271eda14cbcSMatt Macy  * Devices which are already active have their details maintained, and are
2272eda14cbcSMatt Macy  * not re-opened.
2273eda14cbcSMatt Macy  */
2274eda14cbcSMatt Macy void
spa_load_l2cache(spa_t * spa)2275eda14cbcSMatt Macy spa_load_l2cache(spa_t *spa)
2276eda14cbcSMatt Macy {
2277eda14cbcSMatt Macy 	nvlist_t **l2cache = NULL;
2278eda14cbcSMatt Macy 	uint_t nl2cache;
2279eda14cbcSMatt Macy 	int i, j, oldnvdevs;
2280eda14cbcSMatt Macy 	uint64_t guid;
2281eda14cbcSMatt Macy 	vdev_t *vd, **oldvdevs, **newvdevs;
2282eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
2283eda14cbcSMatt Macy 
2284eda14cbcSMatt Macy #ifndef _KERNEL
2285eda14cbcSMatt Macy 	/*
2286eda14cbcSMatt Macy 	 * zdb opens both the current state of the pool and the
2287eda14cbcSMatt Macy 	 * checkpointed state (if present), with a different spa_t.
2288eda14cbcSMatt Macy 	 *
2289eda14cbcSMatt Macy 	 * As L2 caches are part of the ARC which is shared among open
2290eda14cbcSMatt Macy 	 * pools, we skip loading them when we load the checkpointed
2291eda14cbcSMatt Macy 	 * state of the pool.
2292eda14cbcSMatt Macy 	 */
2293eda14cbcSMatt Macy 	if (!spa_writeable(spa))
2294eda14cbcSMatt Macy 		return;
2295eda14cbcSMatt Macy #endif
2296eda14cbcSMatt Macy 
2297eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2298eda14cbcSMatt Macy 
2299eda14cbcSMatt Macy 	oldvdevs = sav->sav_vdevs;
2300eda14cbcSMatt Macy 	oldnvdevs = sav->sav_count;
2301eda14cbcSMatt Macy 	sav->sav_vdevs = NULL;
2302eda14cbcSMatt Macy 	sav->sav_count = 0;
2303eda14cbcSMatt Macy 
2304eda14cbcSMatt Macy 	if (sav->sav_config == NULL) {
2305eda14cbcSMatt Macy 		nl2cache = 0;
2306eda14cbcSMatt Macy 		newvdevs = NULL;
2307eda14cbcSMatt Macy 		goto out;
2308eda14cbcSMatt Macy 	}
2309eda14cbcSMatt Macy 
2310eda14cbcSMatt Macy 	VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
2311eda14cbcSMatt Macy 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
2312eda14cbcSMatt Macy 	newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
2313eda14cbcSMatt Macy 
231481b22a98SMartin Matuska 	/*
231581b22a98SMartin Matuska 	 * Process new nvlist of vdevs.
2316eda14cbcSMatt Macy 	 */
2317eda14cbcSMatt Macy 	for (i = 0; i < nl2cache; i++) {
2318eda14cbcSMatt Macy 		guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID);
2319eda14cbcSMatt Macy 
2320eda14cbcSMatt Macy 		newvdevs[i] = NULL;
2321eda14cbcSMatt Macy 		for (j = 0; j < oldnvdevs; j++) {
232281b22a98SMartin Matuska 			vd = oldvdevs[j];
2323eda14cbcSMatt Macy 			if (vd != NULL && guid == vd->vdev_guid) {
2324eda14cbcSMatt Macy 				/*
2325eda14cbcSMatt Macy 				 * Retain previous vdev for add/remove ops.
2326eda14cbcSMatt Macy 				 */
2327eda14cbcSMatt Macy 				newvdevs[i] = vd;
2328eda14cbcSMatt Macy 				oldvdevs[j] = NULL;
2329eda14cbcSMatt Macy 				break;
2330eda14cbcSMatt Macy 			}
2331eda14cbcSMatt Macy 		}
2332eda14cbcSMatt Macy 
2333eda14cbcSMatt Macy 		if (newvdevs[i] == NULL) {
2334eda14cbcSMatt Macy 			/*
2335eda14cbcSMatt Macy 			 * Create new vdev
2336eda14cbcSMatt Macy 			 */
2337eda14cbcSMatt Macy 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
2338eda14cbcSMatt Macy 			    VDEV_ALLOC_L2CACHE) == 0);
2339eda14cbcSMatt Macy 			ASSERT(vd != NULL);
2340eda14cbcSMatt Macy 			newvdevs[i] = vd;
2341eda14cbcSMatt Macy 
2342eda14cbcSMatt Macy 			/*
2343eda14cbcSMatt Macy 			 * Commit this vdev as an l2cache device,
2344eda14cbcSMatt Macy 			 * even if it fails to open.
2345eda14cbcSMatt Macy 			 */
2346eda14cbcSMatt Macy 			spa_l2cache_add(vd);
2347eda14cbcSMatt Macy 
2348eda14cbcSMatt Macy 			vd->vdev_top = vd;
2349eda14cbcSMatt Macy 			vd->vdev_aux = sav;
2350eda14cbcSMatt Macy 
2351eda14cbcSMatt Macy 			spa_l2cache_activate(vd);
2352eda14cbcSMatt Macy 
2353eda14cbcSMatt Macy 			if (vdev_open(vd) != 0)
2354eda14cbcSMatt Macy 				continue;
2355eda14cbcSMatt Macy 
2356eda14cbcSMatt Macy 			(void) vdev_validate_aux(vd);
2357eda14cbcSMatt Macy 
2358eda14cbcSMatt Macy 			if (!vdev_is_dead(vd))
2359eda14cbcSMatt Macy 				l2arc_add_vdev(spa, vd);
2360eda14cbcSMatt Macy 
2361eda14cbcSMatt Macy 			/*
2362eda14cbcSMatt Macy 			 * Upon cache device addition to a pool or pool
2363eda14cbcSMatt Macy 			 * creation with a cache device or if the header
2364eda14cbcSMatt Macy 			 * of the device is invalid we issue an async
2365eda14cbcSMatt Macy 			 * TRIM command for the whole device which will
2366eda14cbcSMatt Macy 			 * execute if l2arc_trim_ahead > 0.
2367eda14cbcSMatt Macy 			 */
2368eda14cbcSMatt Macy 			spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
2369eda14cbcSMatt Macy 		}
2370eda14cbcSMatt Macy 	}
2371eda14cbcSMatt Macy 
2372eda14cbcSMatt Macy 	sav->sav_vdevs = newvdevs;
2373eda14cbcSMatt Macy 	sav->sav_count = (int)nl2cache;
2374eda14cbcSMatt Macy 
2375eda14cbcSMatt Macy 	/*
2376eda14cbcSMatt Macy 	 * Recompute the stashed list of l2cache devices, with status
2377eda14cbcSMatt Macy 	 * information this time.
2378eda14cbcSMatt Macy 	 */
2379eda14cbcSMatt Macy 	fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE);
2380eda14cbcSMatt Macy 
2381eda14cbcSMatt Macy 	if (sav->sav_count > 0)
2382eda14cbcSMatt Macy 		l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
238381b22a98SMartin Matuska 		    KM_SLEEP);
2384eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++)
2385eda14cbcSMatt Macy 		l2cache[i] = vdev_config_generate(spa,
2386eda14cbcSMatt Macy 		    sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
2387eda14cbcSMatt Macy 	fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
2388eda14cbcSMatt Macy 	    (const nvlist_t * const *)l2cache, sav->sav_count);
2389eda14cbcSMatt Macy 
2390eda14cbcSMatt Macy out:
2391681ce946SMartin Matuska 	/*
2392681ce946SMartin Matuska 	 * Purge vdevs that were dropped
2393eda14cbcSMatt Macy 	 */
2394eda14cbcSMatt Macy 	if (oldvdevs) {
2395eda14cbcSMatt Macy 		for (i = 0; i < oldnvdevs; i++) {
2396eda14cbcSMatt Macy 			uint64_t pool;
2397eda14cbcSMatt Macy 
2398c9539b89SMartin Matuska 			vd = oldvdevs[i];
2399eda14cbcSMatt Macy 			if (vd != NULL) {
2400eda14cbcSMatt Macy 				ASSERT(vd->vdev_isl2cache);
2401eda14cbcSMatt Macy 
2402eda14cbcSMatt Macy 				if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
2403eda14cbcSMatt Macy 				    pool != 0ULL && l2arc_vdev_present(vd))
2404eda14cbcSMatt Macy 					l2arc_remove_vdev(vd);
2405eda14cbcSMatt Macy 				vdev_clear_stats(vd);
2406eda14cbcSMatt Macy 				vdev_free(vd);
2407eda14cbcSMatt Macy 			}
2408eda14cbcSMatt Macy 		}
2409eda14cbcSMatt Macy 
2410eda14cbcSMatt Macy 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
2411eda14cbcSMatt Macy 	}
2412eda14cbcSMatt Macy 
2413eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++)
2414eda14cbcSMatt Macy 		nvlist_free(l2cache[i]);
2415c9539b89SMartin Matuska 	if (sav->sav_count)
2416eda14cbcSMatt Macy 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
2417eda14cbcSMatt Macy }
2418eda14cbcSMatt Macy 
2419eda14cbcSMatt Macy static int
load_nvlist(spa_t * spa,uint64_t obj,nvlist_t ** value)2420eda14cbcSMatt Macy load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
2421eda14cbcSMatt Macy {
2422eda14cbcSMatt Macy 	dmu_buf_t *db;
2423eda14cbcSMatt Macy 	char *packed = NULL;
2424eda14cbcSMatt Macy 	size_t nvsize = 0;
2425eda14cbcSMatt Macy 	int error;
2426eda14cbcSMatt Macy 	*value = NULL;
2427eda14cbcSMatt Macy 
2428eda14cbcSMatt Macy 	error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
2429eda14cbcSMatt Macy 	if (error)
2430eda14cbcSMatt Macy 		return (error);
2431eda14cbcSMatt Macy 
2432eda14cbcSMatt Macy 	nvsize = *(uint64_t *)db->db_data;
2433eda14cbcSMatt Macy 	dmu_buf_rele(db, FTAG);
2434eda14cbcSMatt Macy 
2435eda14cbcSMatt Macy 	packed = vmem_alloc(nvsize, KM_SLEEP);
2436eda14cbcSMatt Macy 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
2437eda14cbcSMatt Macy 	    DMU_READ_PREFETCH);
2438eda14cbcSMatt Macy 	if (error == 0)
2439eda14cbcSMatt Macy 		error = nvlist_unpack(packed, nvsize, value, 0);
2440eda14cbcSMatt Macy 	vmem_free(packed, nvsize);
2441eda14cbcSMatt Macy 
2442eda14cbcSMatt Macy 	return (error);
2443eda14cbcSMatt Macy }
2444eda14cbcSMatt Macy 
2445eda14cbcSMatt Macy /*
2446eda14cbcSMatt Macy  * Concrete top-level vdevs that are not missing and are not logs. At every
2447eda14cbcSMatt Macy  * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
2448eda14cbcSMatt Macy  */
2449eda14cbcSMatt Macy static uint64_t
spa_healthy_core_tvds(spa_t * spa)2450eda14cbcSMatt Macy spa_healthy_core_tvds(spa_t *spa)
2451eda14cbcSMatt Macy {
2452eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2453eda14cbcSMatt Macy 	uint64_t tvds = 0;
2454eda14cbcSMatt Macy 
2455eda14cbcSMatt Macy 	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
2456eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[i];
2457eda14cbcSMatt Macy 		if (vd->vdev_islog)
2458eda14cbcSMatt Macy 			continue;
2459eda14cbcSMatt Macy 		if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
2460eda14cbcSMatt Macy 			tvds++;
2461eda14cbcSMatt Macy 	}
2462eda14cbcSMatt Macy 
2463eda14cbcSMatt Macy 	return (tvds);
2464eda14cbcSMatt Macy }
2465eda14cbcSMatt Macy 
2466eda14cbcSMatt Macy /*
2467eda14cbcSMatt Macy  * Checks to see if the given vdev could not be opened, in which case we post a
2468eda14cbcSMatt Macy  * sysevent to notify the autoreplace code that the device has been removed.
2469eda14cbcSMatt Macy  */
2470eda14cbcSMatt Macy static void
spa_check_removed(vdev_t * vd)2471eda14cbcSMatt Macy spa_check_removed(vdev_t *vd)
2472eda14cbcSMatt Macy {
2473eda14cbcSMatt Macy 	for (uint64_t c = 0; c < vd->vdev_children; c++)
2474eda14cbcSMatt Macy 		spa_check_removed(vd->vdev_child[c]);
2475eda14cbcSMatt Macy 
2476eda14cbcSMatt Macy 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
2477eda14cbcSMatt Macy 	    vdev_is_concrete(vd)) {
2478eda14cbcSMatt Macy 		zfs_post_autoreplace(vd->vdev_spa, vd);
2479eda14cbcSMatt Macy 		spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
2480eda14cbcSMatt Macy 	}
2481eda14cbcSMatt Macy }
2482eda14cbcSMatt Macy 
2483eda14cbcSMatt Macy static int
spa_check_for_missing_logs(spa_t * spa)2484eda14cbcSMatt Macy spa_check_for_missing_logs(spa_t *spa)
2485eda14cbcSMatt Macy {
2486eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2487eda14cbcSMatt Macy 
2488eda14cbcSMatt Macy 	/*
2489eda14cbcSMatt Macy 	 * If we're doing a normal import, then build up any additional
2490eda14cbcSMatt Macy 	 * diagnostic information about missing log devices.
2491eda14cbcSMatt Macy 	 * We'll pass this up to the user for further processing.
2492eda14cbcSMatt Macy 	 */
2493eda14cbcSMatt Macy 	if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
2494eda14cbcSMatt Macy 		nvlist_t **child, *nv;
2495eda14cbcSMatt Macy 		uint64_t idx = 0;
2496eda14cbcSMatt Macy 
2497eda14cbcSMatt Macy 		child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
2498eda14cbcSMatt Macy 		    KM_SLEEP);
2499eda14cbcSMatt Macy 		nv = fnvlist_alloc();
2500eda14cbcSMatt Macy 
2501eda14cbcSMatt Macy 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2502eda14cbcSMatt Macy 			vdev_t *tvd = rvd->vdev_child[c];
250381b22a98SMartin Matuska 
2504eda14cbcSMatt Macy 			/*
2505eda14cbcSMatt Macy 			 * We consider a device as missing only if it failed
2506eda14cbcSMatt Macy 			 * to open (i.e. offline or faulted is not considered
2507eda14cbcSMatt Macy 			 * as missing).
2508eda14cbcSMatt Macy 			 */
2509eda14cbcSMatt Macy 			if (tvd->vdev_islog &&
2510eda14cbcSMatt Macy 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2511eda14cbcSMatt Macy 				child[idx++] = vdev_config_generate(spa, tvd,
2512eda14cbcSMatt Macy 				    B_FALSE, VDEV_CONFIG_MISSING);
2513eda14cbcSMatt Macy 			}
2514eda14cbcSMatt Macy 		}
2515eda14cbcSMatt Macy 
2516eda14cbcSMatt Macy 		if (idx > 0) {
2517eda14cbcSMatt Macy 			fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2518eda14cbcSMatt Macy 			    (const nvlist_t * const *)child, idx);
2519eda14cbcSMatt Macy 			fnvlist_add_nvlist(spa->spa_load_info,
2520eda14cbcSMatt Macy 			    ZPOOL_CONFIG_MISSING_DEVICES, nv);
2521681ce946SMartin Matuska 
2522681ce946SMartin Matuska 			for (uint64_t i = 0; i < idx; i++)
2523eda14cbcSMatt Macy 				nvlist_free(child[i]);
2524eda14cbcSMatt Macy 		}
2525eda14cbcSMatt Macy 		nvlist_free(nv);
2526eda14cbcSMatt Macy 		kmem_free(child, rvd->vdev_children * sizeof (char **));
2527eda14cbcSMatt Macy 
2528eda14cbcSMatt Macy 		if (idx > 0) {
2529eda14cbcSMatt Macy 			spa_load_failed(spa, "some log devices are missing");
2530eda14cbcSMatt Macy 			vdev_dbgmsg_print_tree(rvd, 2);
2531eda14cbcSMatt Macy 			return (SET_ERROR(ENXIO));
2532eda14cbcSMatt Macy 		}
2533eda14cbcSMatt Macy 	} else {
2534eda14cbcSMatt Macy 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2535eda14cbcSMatt Macy 			vdev_t *tvd = rvd->vdev_child[c];
2536eda14cbcSMatt Macy 
2537eda14cbcSMatt Macy 			if (tvd->vdev_islog &&
2538eda14cbcSMatt Macy 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2539eda14cbcSMatt Macy 				spa_set_log_state(spa, SPA_LOG_CLEAR);
2540eda14cbcSMatt Macy 				spa_load_note(spa, "some log devices are "
2541eda14cbcSMatt Macy 				    "missing, ZIL is dropped.");
2542eda14cbcSMatt Macy 				vdev_dbgmsg_print_tree(rvd, 2);
2543eda14cbcSMatt Macy 				break;
2544eda14cbcSMatt Macy 			}
2545eda14cbcSMatt Macy 		}
2546eda14cbcSMatt Macy 	}
2547eda14cbcSMatt Macy 
2548eda14cbcSMatt Macy 	return (0);
2549eda14cbcSMatt Macy }
2550eda14cbcSMatt Macy 
2551eda14cbcSMatt Macy /*
2552eda14cbcSMatt Macy  * Check for missing log devices
2553eda14cbcSMatt Macy  */
2554eda14cbcSMatt Macy static boolean_t
spa_check_logs(spa_t * spa)2555eda14cbcSMatt Macy spa_check_logs(spa_t *spa)
2556eda14cbcSMatt Macy {
2557eda14cbcSMatt Macy 	boolean_t rv = B_FALSE;
2558eda14cbcSMatt Macy 	dsl_pool_t *dp = spa_get_dsl(spa);
2559eda14cbcSMatt Macy 
2560eda14cbcSMatt Macy 	switch (spa->spa_log_state) {
2561eda14cbcSMatt Macy 	default:
2562eda14cbcSMatt Macy 		break;
2563eda14cbcSMatt Macy 	case SPA_LOG_MISSING:
2564eda14cbcSMatt Macy 		/* need to recheck in case slog has been restored */
2565eda14cbcSMatt Macy 	case SPA_LOG_UNKNOWN:
2566eda14cbcSMatt Macy 		rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2567eda14cbcSMatt Macy 		    zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
2568eda14cbcSMatt Macy 		if (rv)
2569eda14cbcSMatt Macy 			spa_set_log_state(spa, SPA_LOG_MISSING);
2570eda14cbcSMatt Macy 		break;
2571eda14cbcSMatt Macy 	}
2572eda14cbcSMatt Macy 	return (rv);
2573eda14cbcSMatt Macy }
2574eda14cbcSMatt Macy 
2575eda14cbcSMatt Macy /*
2576eda14cbcSMatt Macy  * Passivate any log vdevs (note, does not apply to embedded log metaslabs).
2577eda14cbcSMatt Macy  */
2578eda14cbcSMatt Macy static boolean_t
spa_passivate_log(spa_t * spa)2579184c1b94SMartin Matuska spa_passivate_log(spa_t *spa)
2580184c1b94SMartin Matuska {
2581184c1b94SMartin Matuska 	vdev_t *rvd = spa->spa_root_vdev;
2582eda14cbcSMatt Macy 	boolean_t slog_found = B_FALSE;
2583eda14cbcSMatt Macy 
2584eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2585eda14cbcSMatt Macy 
2586eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
2587eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
2588eda14cbcSMatt Macy 
2589eda14cbcSMatt Macy 		if (tvd->vdev_islog) {
2590eda14cbcSMatt Macy 			ASSERT3P(tvd->vdev_log_mg, ==, NULL);
2591eda14cbcSMatt Macy 			metaslab_group_passivate(tvd->vdev_mg);
2592eda14cbcSMatt Macy 			slog_found = B_TRUE;
2593eda14cbcSMatt Macy 		}
2594184c1b94SMartin Matuska 	}
2595184c1b94SMartin Matuska 
2596eda14cbcSMatt Macy 	return (slog_found);
2597eda14cbcSMatt Macy }
2598eda14cbcSMatt Macy 
2599eda14cbcSMatt Macy /*
2600eda14cbcSMatt Macy  * Activate any log vdevs (note, does not apply to embedded log metaslabs).
2601eda14cbcSMatt Macy  */
2602eda14cbcSMatt Macy static void
spa_activate_log(spa_t * spa)2603184c1b94SMartin Matuska spa_activate_log(spa_t *spa)
2604184c1b94SMartin Matuska {
2605184c1b94SMartin Matuska 	vdev_t *rvd = spa->spa_root_vdev;
2606eda14cbcSMatt Macy 
2607eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2608eda14cbcSMatt Macy 
2609eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
2610eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
2611eda14cbcSMatt Macy 
2612eda14cbcSMatt Macy 		if (tvd->vdev_islog) {
2613eda14cbcSMatt Macy 			ASSERT3P(tvd->vdev_log_mg, ==, NULL);
2614eda14cbcSMatt Macy 			metaslab_group_activate(tvd->vdev_mg);
2615eda14cbcSMatt Macy 		}
2616184c1b94SMartin Matuska 	}
2617184c1b94SMartin Matuska }
2618184c1b94SMartin Matuska 
2619184c1b94SMartin Matuska int
spa_reset_logs(spa_t * spa)2620eda14cbcSMatt Macy spa_reset_logs(spa_t *spa)
2621eda14cbcSMatt Macy {
2622eda14cbcSMatt Macy 	int error;
2623eda14cbcSMatt Macy 
2624eda14cbcSMatt Macy 	error = dmu_objset_find(spa_name(spa), zil_reset,
2625eda14cbcSMatt Macy 	    NULL, DS_FIND_CHILDREN);
2626eda14cbcSMatt Macy 	if (error == 0) {
2627eda14cbcSMatt Macy 		/*
2628eda14cbcSMatt Macy 		 * We successfully offlined the log device, sync out the
2629eda14cbcSMatt Macy 		 * current txg so that the "stubby" block can be removed
2630eda14cbcSMatt Macy 		 * by zil_sync().
2631eda14cbcSMatt Macy 		 */
2632eda14cbcSMatt Macy 		txg_wait_synced(spa->spa_dsl_pool, 0);
2633eda14cbcSMatt Macy 	}
2634eda14cbcSMatt Macy 	return (error);
2635eda14cbcSMatt Macy }
2636eda14cbcSMatt Macy 
2637eda14cbcSMatt Macy static void
spa_aux_check_removed(spa_aux_vdev_t * sav)2638eda14cbcSMatt Macy spa_aux_check_removed(spa_aux_vdev_t *sav)
2639eda14cbcSMatt Macy {
2640eda14cbcSMatt Macy 	for (int i = 0; i < sav->sav_count; i++)
2641eda14cbcSMatt Macy 		spa_check_removed(sav->sav_vdevs[i]);
2642eda14cbcSMatt Macy }
2643eda14cbcSMatt Macy 
2644eda14cbcSMatt Macy void
spa_claim_notify(zio_t * zio)2645eda14cbcSMatt Macy spa_claim_notify(zio_t *zio)
2646eda14cbcSMatt Macy {
2647eda14cbcSMatt Macy 	spa_t *spa = zio->io_spa;
2648eda14cbcSMatt Macy 
2649eda14cbcSMatt Macy 	if (zio->io_error)
2650eda14cbcSMatt Macy 		return;
2651eda14cbcSMatt Macy 
2652eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);	/* any mutex will do */
2653eda14cbcSMatt Macy 	if (spa->spa_claim_max_txg < BP_GET_LOGICAL_BIRTH(zio->io_bp))
2654eda14cbcSMatt Macy 		spa->spa_claim_max_txg = BP_GET_LOGICAL_BIRTH(zio->io_bp);
2655eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
2656eda14cbcSMatt Macy }
2657eda14cbcSMatt Macy 
2658eda14cbcSMatt Macy typedef struct spa_load_error {
2659eda14cbcSMatt Macy 	boolean_t	sle_verify_data;
2660eda14cbcSMatt Macy 	uint64_t	sle_meta_count;
2661eda14cbcSMatt Macy 	uint64_t	sle_data_count;
2662eda14cbcSMatt Macy } spa_load_error_t;
2663c03c5b1cSMartin Matuska 
2664eda14cbcSMatt Macy static void
spa_load_verify_done(zio_t * zio)2665eda14cbcSMatt Macy spa_load_verify_done(zio_t *zio)
2666eda14cbcSMatt Macy {
2667eda14cbcSMatt Macy 	blkptr_t *bp = zio->io_bp;
2668eda14cbcSMatt Macy 	spa_load_error_t *sle = zio->io_private;
2669eda14cbcSMatt Macy 	dmu_object_type_t type = BP_GET_TYPE(bp);
2670eda14cbcSMatt Macy 	int error = zio->io_error;
2671eda14cbcSMatt Macy 	spa_t *spa = zio->io_spa;
2672eda14cbcSMatt Macy 
2673eda14cbcSMatt Macy 	abd_free(zio->io_abd);
2674eda14cbcSMatt Macy 	if (error) {
2675eda14cbcSMatt Macy 		if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
2676eda14cbcSMatt Macy 		    type != DMU_OT_INTENT_LOG)
2677eda14cbcSMatt Macy 			atomic_inc_64(&sle->sle_meta_count);
2678eda14cbcSMatt Macy 		else
2679eda14cbcSMatt Macy 			atomic_inc_64(&sle->sle_data_count);
2680eda14cbcSMatt Macy 	}
2681eda14cbcSMatt Macy 
2682eda14cbcSMatt Macy 	mutex_enter(&spa->spa_scrub_lock);
2683eda14cbcSMatt Macy 	spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
2684eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_scrub_io_cv);
2685eda14cbcSMatt Macy 	mutex_exit(&spa->spa_scrub_lock);
2686eda14cbcSMatt Macy }
2687eda14cbcSMatt Macy 
2688eda14cbcSMatt Macy /*
2689eda14cbcSMatt Macy  * Maximum number of inflight bytes is the log2 fraction of the arc size.
2690eda14cbcSMatt Macy  * By default, we set it to 1/16th of the arc.
2691eda14cbcSMatt Macy  */
2692eda14cbcSMatt Macy static uint_t spa_load_verify_shift = 4;
2693eda14cbcSMatt Macy static int spa_load_verify_metadata = B_TRUE;
2694eda14cbcSMatt Macy static int spa_load_verify_data = B_TRUE;
2695eda14cbcSMatt Macy 
2696be181ee2SMartin Matuska static int
spa_load_verify_cb(spa_t * spa,zilog_t * zilog,const blkptr_t * bp,const zbookmark_phys_t * zb,const dnode_phys_t * dnp,void * arg)2697e92ffd9bSMartin Matuska spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2698e92ffd9bSMartin Matuska     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
2699eda14cbcSMatt Macy {
2700eda14cbcSMatt Macy 	zio_t *rio = arg;
2701eda14cbcSMatt Macy 	spa_load_error_t *sle = rio->io_private;
2702eda14cbcSMatt Macy 
2703eda14cbcSMatt Macy 	(void) zilog, (void) dnp;
2704c03c5b1cSMartin Matuska 
2705c03c5b1cSMartin Matuska 	/*
2706c03c5b1cSMartin Matuska 	 * Note: normally this routine will not be called if
2707e92ffd9bSMartin Matuska 	 * spa_load_verify_metadata is not set.  However, it may be useful
2708e92ffd9bSMartin Matuska 	 * to manually set the flag after the traversal has begun.
2709eda14cbcSMatt Macy 	 */
2710eda14cbcSMatt Macy 	if (!spa_load_verify_metadata)
2711eda14cbcSMatt Macy 		return (0);
2712eda14cbcSMatt Macy 
2713eda14cbcSMatt Macy 	/*
2714eda14cbcSMatt Macy 	 * Sanity check the block pointer in order to detect obvious damage
2715eda14cbcSMatt Macy 	 * before using the contents in subsequent checks or in zio_read().
2716e3aa18adSMartin Matuska 	 * When damaged consider it to be a metadata error since we cannot
2717e3aa18adSMartin Matuska 	 * trust the BP_GET_TYPE and BP_GET_LEVEL values.
2718e3aa18adSMartin Matuska 	 */
2719e3aa18adSMartin Matuska 	if (!zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
2720e3aa18adSMartin Matuska 		atomic_inc_64(&sle->sle_meta_count);
2721e3aa18adSMartin Matuska 		return (0);
2722e3aa18adSMartin Matuska 	}
2723e639e0d2SMartin Matuska 
2724e3aa18adSMartin Matuska 	if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
2725e3aa18adSMartin Matuska 	    BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
2726e3aa18adSMartin Matuska 		return (0);
2727e3aa18adSMartin Matuska 
2728e3aa18adSMartin Matuska 	if (!BP_IS_METADATA(bp) &&
2729e3aa18adSMartin Matuska 	    (!spa_load_verify_data || !sle->sle_verify_data))
2730e3aa18adSMartin Matuska 		return (0);
2731e3aa18adSMartin Matuska 
2732c03c5b1cSMartin Matuska 	uint64_t maxinflight_bytes =
2733c03c5b1cSMartin Matuska 	    arc_target_bytes() >> spa_load_verify_shift;
2734eda14cbcSMatt Macy 	size_t size = BP_GET_PSIZE(bp);
2735eda14cbcSMatt Macy 
2736eda14cbcSMatt Macy 	mutex_enter(&spa->spa_scrub_lock);
2737eda14cbcSMatt Macy 	while (spa->spa_load_verify_bytes >= maxinflight_bytes)
2738eda14cbcSMatt Macy 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2739eda14cbcSMatt Macy 	spa->spa_load_verify_bytes += size;
2740eda14cbcSMatt Macy 	mutex_exit(&spa->spa_scrub_lock);
2741eda14cbcSMatt Macy 
2742eda14cbcSMatt Macy 	zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
2743eda14cbcSMatt Macy 	    spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2744eda14cbcSMatt Macy 	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2745eda14cbcSMatt Macy 	    ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2746eda14cbcSMatt Macy 	return (0);
2747eda14cbcSMatt Macy }
2748eda14cbcSMatt Macy 
2749eda14cbcSMatt Macy static int
verify_dataset_name_len(dsl_pool_t * dp,dsl_dataset_t * ds,void * arg)2750eda14cbcSMatt Macy verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2751eda14cbcSMatt Macy {
2752eda14cbcSMatt Macy 	(void) dp, (void) arg;
2753eda14cbcSMatt Macy 
2754eda14cbcSMatt Macy 	if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2755eda14cbcSMatt Macy 		return (SET_ERROR(ENAMETOOLONG));
2756e92ffd9bSMartin Matuska 
2757e92ffd9bSMartin Matuska 	return (0);
2758eda14cbcSMatt Macy }
2759eda14cbcSMatt Macy 
2760eda14cbcSMatt Macy static int
spa_load_verify(spa_t * spa)2761eda14cbcSMatt Macy spa_load_verify(spa_t *spa)
2762eda14cbcSMatt Macy {
2763eda14cbcSMatt Macy 	zio_t *rio;
2764eda14cbcSMatt Macy 	spa_load_error_t sle = { 0 };
2765eda14cbcSMatt Macy 	zpool_load_policy_t policy;
2766eda14cbcSMatt Macy 	boolean_t verify_ok = B_FALSE;
2767eda14cbcSMatt Macy 	int error = 0;
2768eda14cbcSMatt Macy 
2769eda14cbcSMatt Macy 	zpool_get_load_policy(spa->spa_config, &policy);
2770eda14cbcSMatt Macy 
2771eda14cbcSMatt Macy 	if (policy.zlp_rewind & ZPOOL_NEVER_REWIND ||
2772eda14cbcSMatt Macy 	    policy.zlp_maxmeta == UINT64_MAX)
2773eda14cbcSMatt Macy 		return (0);
2774eda14cbcSMatt Macy 
2775c03c5b1cSMartin Matuska 	dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2776c03c5b1cSMartin Matuska 	error = dmu_objset_find_dp(spa->spa_dsl_pool,
2777eda14cbcSMatt Macy 	    spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2778eda14cbcSMatt Macy 	    DS_FIND_CHILDREN);
2779eda14cbcSMatt Macy 	dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2780eda14cbcSMatt Macy 	if (error != 0)
2781eda14cbcSMatt Macy 		return (error);
2782eda14cbcSMatt Macy 
2783eda14cbcSMatt Macy 	/*
2784eda14cbcSMatt Macy 	 * Verify data only if we are rewinding or error limit was set.
2785eda14cbcSMatt Macy 	 * Otherwise nothing except dbgmsg care about it to waste time.
2786eda14cbcSMatt Macy 	 */
2787c03c5b1cSMartin Matuska 	sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) ||
2788c03c5b1cSMartin Matuska 	    (policy.zlp_maxdata < UINT64_MAX);
2789c03c5b1cSMartin Matuska 
2790c03c5b1cSMartin Matuska 	rio = zio_root(spa, NULL, &sle,
2791c03c5b1cSMartin Matuska 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2792c03c5b1cSMartin Matuska 
2793c03c5b1cSMartin Matuska 	if (spa_load_verify_metadata) {
2794eda14cbcSMatt Macy 		if (spa->spa_extreme_rewind) {
2795eda14cbcSMatt Macy 			spa_load_note(spa, "performing a complete scan of the "
2796eda14cbcSMatt Macy 			    "pool since extreme rewind is on. This may take "
2797eda14cbcSMatt Macy 			    "a very long time.\n  (spa_load_verify_data=%u, "
2798eda14cbcSMatt Macy 			    "spa_load_verify_metadata=%u)",
2799eda14cbcSMatt Macy 			    spa_load_verify_data, spa_load_verify_metadata);
2800eda14cbcSMatt Macy 		}
2801eda14cbcSMatt Macy 
2802eda14cbcSMatt Macy 		error = traverse_pool(spa, spa->spa_verify_min_txg,
2803eda14cbcSMatt Macy 		    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
2804eda14cbcSMatt Macy 		    TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
2805eda14cbcSMatt Macy 	}
2806eda14cbcSMatt Macy 
2807eda14cbcSMatt Macy 	(void) zio_wait(rio);
2808eda14cbcSMatt Macy 	ASSERT0(spa->spa_load_verify_bytes);
2809eda14cbcSMatt Macy 
2810eda14cbcSMatt Macy 	spa->spa_load_meta_errors = sle.sle_meta_count;
2811eda14cbcSMatt Macy 	spa->spa_load_data_errors = sle.sle_data_count;
2812eda14cbcSMatt Macy 
2813eda14cbcSMatt Macy 	if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
2814eda14cbcSMatt Macy 		spa_load_note(spa, "spa_load_verify found %llu metadata errors "
2815eda14cbcSMatt Macy 		    "and %llu data errors", (u_longlong_t)sle.sle_meta_count,
2816eda14cbcSMatt Macy 		    (u_longlong_t)sle.sle_data_count);
2817eda14cbcSMatt Macy 	}
2818eda14cbcSMatt Macy 
2819eda14cbcSMatt Macy 	if (spa_load_verify_dryrun ||
2820eda14cbcSMatt Macy 	    (!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
2821eda14cbcSMatt Macy 	    sle.sle_data_count <= policy.zlp_maxdata)) {
2822eda14cbcSMatt Macy 		int64_t loss = 0;
2823eda14cbcSMatt Macy 
2824eda14cbcSMatt Macy 		verify_ok = B_TRUE;
2825eda14cbcSMatt Macy 		spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2826eda14cbcSMatt Macy 		spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2827eda14cbcSMatt Macy 
2828eda14cbcSMatt Macy 		loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2829eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME,
2830eda14cbcSMatt Macy 		    spa->spa_load_txg_ts);
2831eda14cbcSMatt Macy 		fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME,
2832eda14cbcSMatt Macy 		    loss);
283381b22a98SMartin Matuska 		fnvlist_add_uint64(spa->spa_load_info,
283481b22a98SMartin Matuska 		    ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count);
283581b22a98SMartin Matuska 		fnvlist_add_uint64(spa->spa_load_info,
283681b22a98SMartin Matuska 		    ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count);
283781b22a98SMartin Matuska 	} else {
2838c03c5b1cSMartin Matuska 		spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2839c03c5b1cSMartin Matuska 	}
284081b22a98SMartin Matuska 
2841eda14cbcSMatt Macy 	if (spa_load_verify_dryrun)
2842eda14cbcSMatt Macy 		return (0);
2843eda14cbcSMatt Macy 
2844eda14cbcSMatt Macy 	if (error) {
2845eda14cbcSMatt Macy 		if (error != ENXIO && error != EIO)
2846eda14cbcSMatt Macy 			error = SET_ERROR(EIO);
2847eda14cbcSMatt Macy 		return (error);
2848eda14cbcSMatt Macy 	}
2849eda14cbcSMatt Macy 
2850eda14cbcSMatt Macy 	return (verify_ok ? 0 : EIO);
2851eda14cbcSMatt Macy }
2852eda14cbcSMatt Macy 
2853eda14cbcSMatt Macy /*
2854eda14cbcSMatt Macy  * Find a value in the pool props object.
2855eda14cbcSMatt Macy  */
2856eda14cbcSMatt Macy static void
spa_prop_find(spa_t * spa,zpool_prop_t prop,uint64_t * val)2857eda14cbcSMatt Macy spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2858eda14cbcSMatt Macy {
2859eda14cbcSMatt Macy 	(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2860eda14cbcSMatt Macy 	    zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2861eda14cbcSMatt Macy }
2862eda14cbcSMatt Macy 
2863eda14cbcSMatt Macy /*
2864eda14cbcSMatt Macy  * Find a value in the pool directory object.
2865eda14cbcSMatt Macy  */
2866eda14cbcSMatt Macy static int
spa_dir_prop(spa_t * spa,const char * name,uint64_t * val,boolean_t log_enoent)2867eda14cbcSMatt Macy spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
2868eda14cbcSMatt Macy {
2869eda14cbcSMatt Macy 	int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2870eda14cbcSMatt Macy 	    name, sizeof (uint64_t), 1, val);
2871eda14cbcSMatt Macy 
2872eda14cbcSMatt Macy 	if (error != 0 && (error != ENOENT || log_enoent)) {
2873eda14cbcSMatt Macy 		spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
2874eda14cbcSMatt Macy 		    "[error=%d]", name, error);
2875eda14cbcSMatt Macy 	}
2876eda14cbcSMatt Macy 
2877eda14cbcSMatt Macy 	return (error);
2878eda14cbcSMatt Macy }
2879eda14cbcSMatt Macy 
2880eda14cbcSMatt Macy static int
spa_vdev_err(vdev_t * vdev,vdev_aux_t aux,int err)2881eda14cbcSMatt Macy spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2882eda14cbcSMatt Macy {
2883eda14cbcSMatt Macy 	vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2884eda14cbcSMatt Macy 	return (SET_ERROR(err));
2885eda14cbcSMatt Macy }
2886eda14cbcSMatt Macy 
2887eda14cbcSMatt Macy boolean_t
spa_livelist_delete_check(spa_t * spa)2888eda14cbcSMatt Macy spa_livelist_delete_check(spa_t *spa)
2889eda14cbcSMatt Macy {
2890eda14cbcSMatt Macy 	return (spa->spa_livelists_to_delete != 0);
2891eda14cbcSMatt Macy }
2892eda14cbcSMatt Macy 
2893eda14cbcSMatt Macy static boolean_t
spa_livelist_delete_cb_check(void * arg,zthr_t * z)2894eda14cbcSMatt Macy spa_livelist_delete_cb_check(void *arg, zthr_t *z)
2895eda14cbcSMatt Macy {
2896eda14cbcSMatt Macy 	(void) z;
2897eda14cbcSMatt Macy 	spa_t *spa = arg;
2898eda14cbcSMatt Macy 	return (spa_livelist_delete_check(spa));
2899eda14cbcSMatt Macy }
2900e92ffd9bSMartin Matuska 
2901eda14cbcSMatt Macy static int
delete_blkptr_cb(void * arg,const blkptr_t * bp,dmu_tx_t * tx)2902eda14cbcSMatt Macy delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
2903eda14cbcSMatt Macy {
2904eda14cbcSMatt Macy 	spa_t *spa = arg;
2905eda14cbcSMatt Macy 	zio_free(spa, tx->tx_txg, bp);
2906eda14cbcSMatt Macy 	dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
2907eda14cbcSMatt Macy 	    -bp_get_dsize_sync(spa, bp),
2908eda14cbcSMatt Macy 	    -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
2909eda14cbcSMatt Macy 	return (0);
2910eda14cbcSMatt Macy }
2911eda14cbcSMatt Macy 
2912eda14cbcSMatt Macy static int
dsl_get_next_livelist_obj(objset_t * os,uint64_t zap_obj,uint64_t * llp)2913eda14cbcSMatt Macy dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
2914eda14cbcSMatt Macy {
2915eda14cbcSMatt Macy 	int err;
2916eda14cbcSMatt Macy 	zap_cursor_t zc;
2917eda14cbcSMatt Macy 	zap_attribute_t za;
2918eda14cbcSMatt Macy 	zap_cursor_init(&zc, os, zap_obj);
2919eda14cbcSMatt Macy 	err = zap_cursor_retrieve(&zc, &za);
2920eda14cbcSMatt Macy 	zap_cursor_fini(&zc);
2921eda14cbcSMatt Macy 	if (err == 0)
2922eda14cbcSMatt Macy 		*llp = za.za_first_integer;
2923eda14cbcSMatt Macy 	return (err);
2924eda14cbcSMatt Macy }
2925eda14cbcSMatt Macy 
2926eda14cbcSMatt Macy /*
2927eda14cbcSMatt Macy  * Components of livelist deletion that must be performed in syncing
2928eda14cbcSMatt Macy  * context: freeing block pointers and updating the pool-wide data
2929eda14cbcSMatt Macy  * structures to indicate how much work is left to do
2930eda14cbcSMatt Macy  */
2931eda14cbcSMatt Macy typedef struct sublist_delete_arg {
2932eda14cbcSMatt Macy 	spa_t *spa;
2933eda14cbcSMatt Macy 	dsl_deadlist_t *ll;
2934eda14cbcSMatt Macy 	uint64_t key;
2935eda14cbcSMatt Macy 	bplist_t *to_free;
2936eda14cbcSMatt Macy } sublist_delete_arg_t;
2937eda14cbcSMatt Macy 
2938eda14cbcSMatt Macy static void
sublist_delete_sync(void * arg,dmu_tx_t * tx)2939eda14cbcSMatt Macy sublist_delete_sync(void *arg, dmu_tx_t *tx)
2940eda14cbcSMatt Macy {
2941eda14cbcSMatt Macy 	sublist_delete_arg_t *sda = arg;
2942eda14cbcSMatt Macy 	spa_t *spa = sda->spa;
2943eda14cbcSMatt Macy 	dsl_deadlist_t *ll = sda->ll;
2944eda14cbcSMatt Macy 	uint64_t key = sda->key;
2945eda14cbcSMatt Macy 	bplist_t *to_free = sda->to_free;
2946eda14cbcSMatt Macy 
2947eda14cbcSMatt Macy 	bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
2948eda14cbcSMatt Macy 	dsl_deadlist_remove_entry(ll, key, tx);
2949eda14cbcSMatt Macy }
2950eda14cbcSMatt Macy 
2951eda14cbcSMatt Macy typedef struct livelist_delete_arg {
2952eda14cbcSMatt Macy 	spa_t *spa;
2953eda14cbcSMatt Macy 	uint64_t ll_obj;
2954eda14cbcSMatt Macy 	uint64_t zap_obj;
2955eda14cbcSMatt Macy } livelist_delete_arg_t;
2956eda14cbcSMatt Macy 
2957eda14cbcSMatt Macy static void
livelist_delete_sync(void * arg,dmu_tx_t * tx)2958eda14cbcSMatt Macy livelist_delete_sync(void *arg, dmu_tx_t *tx)
2959eda14cbcSMatt Macy {
2960eda14cbcSMatt Macy 	livelist_delete_arg_t *lda = arg;
2961eda14cbcSMatt Macy 	spa_t *spa = lda->spa;
2962eda14cbcSMatt Macy 	uint64_t ll_obj = lda->ll_obj;
2963eda14cbcSMatt Macy 	uint64_t zap_obj = lda->zap_obj;
2964eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
2965eda14cbcSMatt Macy 	uint64_t count;
2966eda14cbcSMatt Macy 
2967eda14cbcSMatt Macy 	/* free the livelist and decrement the feature count */
2968eda14cbcSMatt Macy 	VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
2969eda14cbcSMatt Macy 	dsl_deadlist_free(mos, ll_obj, tx);
2970eda14cbcSMatt Macy 	spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
2971eda14cbcSMatt Macy 	VERIFY0(zap_count(mos, zap_obj, &count));
2972eda14cbcSMatt Macy 	if (count == 0) {
2973eda14cbcSMatt Macy 		/* no more livelists to delete */
2974eda14cbcSMatt Macy 		VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
2975eda14cbcSMatt Macy 		    DMU_POOL_DELETED_CLONES, tx));
2976eda14cbcSMatt Macy 		VERIFY0(zap_destroy(mos, zap_obj, tx));
2977eda14cbcSMatt Macy 		spa->spa_livelists_to_delete = 0;
2978eda14cbcSMatt Macy 		spa_notify_waiters(spa);
2979eda14cbcSMatt Macy 	}
2980eda14cbcSMatt Macy }
2981eda14cbcSMatt Macy 
2982eda14cbcSMatt Macy /*
2983eda14cbcSMatt Macy  * Load in the value for the livelist to be removed and open it. Then,
2984eda14cbcSMatt Macy  * load its first sublist and determine which block pointers should actually
2985eda14cbcSMatt Macy  * be freed. Then, call a synctask which performs the actual frees and updates
2986eda14cbcSMatt Macy  * the pool-wide livelist data.
2987eda14cbcSMatt Macy  */
2988eda14cbcSMatt Macy static void
spa_livelist_delete_cb(void * arg,zthr_t * z)2989eda14cbcSMatt Macy spa_livelist_delete_cb(void *arg, zthr_t *z)
2990eda14cbcSMatt Macy {
2991eda14cbcSMatt Macy 	spa_t *spa = arg;
2992eda14cbcSMatt Macy 	uint64_t ll_obj = 0, count;
2993eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
2994eda14cbcSMatt Macy 	uint64_t zap_obj = spa->spa_livelists_to_delete;
2995eda14cbcSMatt Macy 	/*
2996eda14cbcSMatt Macy 	 * Determine the next livelist to delete. This function should only
2997eda14cbcSMatt Macy 	 * be called if there is at least one deleted clone.
2998eda14cbcSMatt Macy 	 */
2999eda14cbcSMatt Macy 	VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
3000eda14cbcSMatt Macy 	VERIFY0(zap_count(mos, ll_obj, &count));
3001eda14cbcSMatt Macy 	if (count > 0) {
3002eda14cbcSMatt Macy 		dsl_deadlist_t *ll;
3003eda14cbcSMatt Macy 		dsl_deadlist_entry_t *dle;
3004eda14cbcSMatt Macy 		bplist_t to_free;
3005eda14cbcSMatt Macy 		ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
30062c48331dSMatt Macy 		dsl_deadlist_open(ll, mos, ll_obj);
3007eda14cbcSMatt Macy 		dle = dsl_deadlist_first(ll);
3008eda14cbcSMatt Macy 		ASSERT3P(dle, !=, NULL);
30092c48331dSMatt Macy 		bplist_create(&to_free);
30102c48331dSMatt Macy 		int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
30112c48331dSMatt Macy 		    z, NULL);
3012eda14cbcSMatt Macy 		if (err == 0) {
3013eda14cbcSMatt Macy 			sublist_delete_arg_t sync_arg = {
3014eda14cbcSMatt Macy 			    .spa = spa,
3015eda14cbcSMatt Macy 			    .ll = ll,
3016eda14cbcSMatt Macy 			    .key = dle->dle_mintxg,
3017eda14cbcSMatt Macy 			    .to_free = &to_free
3018eda14cbcSMatt Macy 			};
30192c48331dSMatt Macy 			zfs_dbgmsg("deleting sublist (id %llu) from"
3020eda14cbcSMatt Macy 			    " livelist %llu, %lld remaining",
3021eda14cbcSMatt Macy 			    (u_longlong_t)dle->dle_bpobj.bpo_object,
3022eda14cbcSMatt Macy 			    (u_longlong_t)ll_obj, (longlong_t)count - 1);
3023eda14cbcSMatt Macy 			VERIFY0(dsl_sync_task(spa_name(spa), NULL,
302433b8c039SMartin Matuska 			    sublist_delete_sync, &sync_arg, 0,
302533b8c039SMartin Matuska 			    ZFS_SPACE_CHECK_DESTROY));
302633b8c039SMartin Matuska 		} else {
3027eda14cbcSMatt Macy 			VERIFY3U(err, ==, EINTR);
3028eda14cbcSMatt Macy 		}
3029eda14cbcSMatt Macy 		bplist_clear(&to_free);
3030eda14cbcSMatt Macy 		bplist_destroy(&to_free);
3031eda14cbcSMatt Macy 		dsl_deadlist_close(ll);
3032eda14cbcSMatt Macy 		kmem_free(ll, sizeof (dsl_deadlist_t));
3033eda14cbcSMatt Macy 	} else {
3034eda14cbcSMatt Macy 		livelist_delete_arg_t sync_arg = {
30352c48331dSMatt Macy 		    .spa = spa,
30362c48331dSMatt Macy 		    .ll_obj = ll_obj,
3037eda14cbcSMatt Macy 		    .zap_obj = zap_obj
3038eda14cbcSMatt Macy 		};
3039eda14cbcSMatt Macy 		zfs_dbgmsg("deletion of livelist %llu completed",
3040eda14cbcSMatt Macy 		    (u_longlong_t)ll_obj);
3041eda14cbcSMatt Macy 		VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
3042eda14cbcSMatt Macy 		    &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
304333b8c039SMartin Matuska 	}
304433b8c039SMartin Matuska }
3045eda14cbcSMatt Macy 
3046eda14cbcSMatt Macy static void
spa_start_livelist_destroy_thread(spa_t * spa)3047eda14cbcSMatt Macy spa_start_livelist_destroy_thread(spa_t *spa)
3048eda14cbcSMatt Macy {
3049eda14cbcSMatt Macy 	ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
3050eda14cbcSMatt Macy 	spa->spa_livelist_delete_zthr =
3051eda14cbcSMatt Macy 	    zthr_create("z_livelist_destroy",
3052eda14cbcSMatt Macy 	    spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
3053eda14cbcSMatt Macy 	    minclsyspri);
3054eda14cbcSMatt Macy }
3055eda14cbcSMatt Macy 
30562faf504dSMartin Matuska typedef struct livelist_new_arg {
30572faf504dSMartin Matuska 	bplist_t *allocs;
3058eda14cbcSMatt Macy 	bplist_t *frees;
3059eda14cbcSMatt Macy } livelist_new_arg_t;
3060eda14cbcSMatt Macy 
3061eda14cbcSMatt Macy static int
livelist_track_new_cb(void * arg,const blkptr_t * bp,boolean_t bp_freed,dmu_tx_t * tx)3062eda14cbcSMatt Macy livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
3063eda14cbcSMatt Macy     dmu_tx_t *tx)
3064eda14cbcSMatt Macy {
3065eda14cbcSMatt Macy 	ASSERT(tx == NULL);
3066eda14cbcSMatt Macy 	livelist_new_arg_t *lna = arg;
3067eda14cbcSMatt Macy 	if (bp_freed) {
3068eda14cbcSMatt Macy 		bplist_append(lna->frees, bp);
3069eda14cbcSMatt Macy 	} else {
3070eda14cbcSMatt Macy 		bplist_append(lna->allocs, bp);
3071eda14cbcSMatt Macy 		zfs_livelist_condense_new_alloc++;
3072eda14cbcSMatt Macy 	}
3073eda14cbcSMatt Macy 	return (0);
3074eda14cbcSMatt Macy }
3075eda14cbcSMatt Macy 
3076eda14cbcSMatt Macy typedef struct livelist_condense_arg {
3077eda14cbcSMatt Macy 	spa_t *spa;
3078eda14cbcSMatt Macy 	bplist_t to_keep;
3079eda14cbcSMatt Macy 	uint64_t first_size;
3080eda14cbcSMatt Macy 	uint64_t next_size;
3081eda14cbcSMatt Macy } livelist_condense_arg_t;
3082eda14cbcSMatt Macy 
3083eda14cbcSMatt Macy static void
spa_livelist_condense_sync(void * arg,dmu_tx_t * tx)3084eda14cbcSMatt Macy spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
3085eda14cbcSMatt Macy {
3086eda14cbcSMatt Macy 	livelist_condense_arg_t *lca = arg;
3087eda14cbcSMatt Macy 	spa_t *spa = lca->spa;
3088eda14cbcSMatt Macy 	bplist_t new_frees;
3089eda14cbcSMatt Macy 	dsl_dataset_t *ds = spa->spa_to_condense.ds;
3090eda14cbcSMatt Macy 
3091eda14cbcSMatt Macy 	/* Have we been cancelled? */
3092eda14cbcSMatt Macy 	if (spa->spa_to_condense.cancelled) {
3093eda14cbcSMatt Macy 		zfs_livelist_condense_sync_cancel++;
3094eda14cbcSMatt Macy 		goto out;
3095eda14cbcSMatt Macy 	}
3096eda14cbcSMatt Macy 
3097eda14cbcSMatt Macy 	dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
3098eda14cbcSMatt Macy 	dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
3099eda14cbcSMatt Macy 	dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
3100eda14cbcSMatt Macy 
3101eda14cbcSMatt Macy 	/*
3102eda14cbcSMatt Macy 	 * It's possible that the livelist was changed while the zthr was
3103eda14cbcSMatt Macy 	 * running. Therefore, we need to check for new blkptrs in the two
3104eda14cbcSMatt Macy 	 * entries being condensed and continue to track them in the livelist.
3105eda14cbcSMatt Macy 	 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
3106eda14cbcSMatt Macy 	 * it's possible that the newly added blkptrs are FREEs or ALLOCs so
3107eda14cbcSMatt Macy 	 * we need to sort them into two different bplists.
3108eda14cbcSMatt Macy 	 */
3109eda14cbcSMatt Macy 	uint64_t first_obj = first->dle_bpobj.bpo_object;
3110eda14cbcSMatt Macy 	uint64_t next_obj = next->dle_bpobj.bpo_object;
3111eda14cbcSMatt Macy 	uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
3112eda14cbcSMatt Macy 	uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
3113eda14cbcSMatt Macy 
3114eda14cbcSMatt Macy 	bplist_create(&new_frees);
3115eda14cbcSMatt Macy 	livelist_new_arg_t new_bps = {
3116eda14cbcSMatt Macy 	    .allocs = &lca->to_keep,
3117eda14cbcSMatt Macy 	    .frees = &new_frees,
3118eda14cbcSMatt Macy 	};
3119eda14cbcSMatt Macy 
3120eda14cbcSMatt Macy 	if (cur_first_size > lca->first_size) {
3121eda14cbcSMatt Macy 		VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
3122eda14cbcSMatt Macy 		    livelist_track_new_cb, &new_bps, lca->first_size));
3123eda14cbcSMatt Macy 	}
3124eda14cbcSMatt Macy 	if (cur_next_size > lca->next_size) {
3125eda14cbcSMatt Macy 		VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
3126eda14cbcSMatt Macy 		    livelist_track_new_cb, &new_bps, lca->next_size));
3127eda14cbcSMatt Macy 	}
3128eda14cbcSMatt Macy 
3129eda14cbcSMatt Macy 	dsl_deadlist_clear_entry(first, ll, tx);
3130eda14cbcSMatt Macy 	ASSERT(bpobj_is_empty(&first->dle_bpobj));
3131eda14cbcSMatt Macy 	dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
3132eda14cbcSMatt Macy 
3133eda14cbcSMatt Macy 	bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
3134eda14cbcSMatt Macy 	bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
3135eda14cbcSMatt Macy 	bplist_destroy(&new_frees);
3136eda14cbcSMatt Macy 
3137eda14cbcSMatt Macy 	char dsname[ZFS_MAX_DATASET_NAME_LEN];
3138eda14cbcSMatt Macy 	dsl_dataset_name(ds, dsname);
3139eda14cbcSMatt Macy 	zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
3140eda14cbcSMatt Macy 	    "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
3141eda14cbcSMatt Macy 	    "(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
3142eda14cbcSMatt Macy 	    (u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
3143eda14cbcSMatt Macy 	    (u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
3144eda14cbcSMatt Macy 	    (u_longlong_t)cur_next_size,
314533b8c039SMartin Matuska 	    (u_longlong_t)first->dle_bpobj.bpo_object,
314633b8c039SMartin Matuska 	    (u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
314733b8c039SMartin Matuska out:
314833b8c039SMartin Matuska 	dmu_buf_rele(ds->ds_dbuf, spa);
314933b8c039SMartin Matuska 	spa->spa_to_condense.ds = NULL;
315033b8c039SMartin Matuska 	bplist_clear(&lca->to_keep);
3151eda14cbcSMatt Macy 	bplist_destroy(&lca->to_keep);
3152eda14cbcSMatt Macy 	kmem_free(lca, sizeof (livelist_condense_arg_t));
3153eda14cbcSMatt Macy 	spa->spa_to_condense.syncing = B_FALSE;
3154eda14cbcSMatt Macy }
3155eda14cbcSMatt Macy 
3156eda14cbcSMatt Macy static void
spa_livelist_condense_cb(void * arg,zthr_t * t)3157eda14cbcSMatt Macy spa_livelist_condense_cb(void *arg, zthr_t *t)
3158eda14cbcSMatt Macy {
3159eda14cbcSMatt Macy 	while (zfs_livelist_condense_zthr_pause &&
3160eda14cbcSMatt Macy 	    !(zthr_has_waiters(t) || zthr_iscancelled(t)))
3161eda14cbcSMatt Macy 		delay(1);
3162eda14cbcSMatt Macy 
3163eda14cbcSMatt Macy 	spa_t *spa = arg;
3164eda14cbcSMatt Macy 	dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
3165eda14cbcSMatt Macy 	dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
3166eda14cbcSMatt Macy 	uint64_t first_size, next_size;
3167eda14cbcSMatt Macy 
3168eda14cbcSMatt Macy 	livelist_condense_arg_t *lca =
3169eda14cbcSMatt Macy 	    kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
3170eda14cbcSMatt Macy 	bplist_create(&lca->to_keep);
3171eda14cbcSMatt Macy 
3172eda14cbcSMatt Macy 	/*
3173eda14cbcSMatt Macy 	 * Process the livelists (matching FREEs and ALLOCs) in open context
3174eda14cbcSMatt Macy 	 * so we have minimal work in syncing context to condense.
3175eda14cbcSMatt Macy 	 *
3176eda14cbcSMatt Macy 	 * We save bpobj sizes (first_size and next_size) to use later in
3177eda14cbcSMatt Macy 	 * syncing context to determine if entries were added to these sublists
3178eda14cbcSMatt Macy 	 * while in open context. This is possible because the clone is still
3179eda14cbcSMatt Macy 	 * active and open for normal writes and we want to make sure the new,
3180eda14cbcSMatt Macy 	 * unprocessed blockpointers are inserted into the livelist normally.
3181eda14cbcSMatt Macy 	 *
3182eda14cbcSMatt Macy 	 * Note that dsl_process_sub_livelist() both stores the size number of
3183eda14cbcSMatt Macy 	 * blockpointers and iterates over them while the bpobj's lock held, so
3184eda14cbcSMatt Macy 	 * the sizes returned to us are consistent which what was actually
3185eda14cbcSMatt Macy 	 * processed.
3186eda14cbcSMatt Macy 	 */
3187eda14cbcSMatt Macy 	int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
3188eda14cbcSMatt Macy 	    &first_size);
3189eda14cbcSMatt Macy 	if (err == 0)
3190eda14cbcSMatt Macy 		err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
3191eda14cbcSMatt Macy 		    t, &next_size);
3192eda14cbcSMatt Macy 
3193eda14cbcSMatt Macy 	if (err == 0) {
3194eda14cbcSMatt Macy 		while (zfs_livelist_condense_sync_pause &&
3195eda14cbcSMatt Macy 		    !(zthr_has_waiters(t) || zthr_iscancelled(t)))
3196eda14cbcSMatt Macy 			delay(1);
3197eda14cbcSMatt Macy 
3198eda14cbcSMatt Macy 		dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
3199eda14cbcSMatt Macy 		dmu_tx_mark_netfree(tx);
3200eda14cbcSMatt Macy 		dmu_tx_hold_space(tx, 1);
3201eda14cbcSMatt Macy 		err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
3202eda14cbcSMatt Macy 		if (err == 0) {
3203eda14cbcSMatt Macy 			/*
3204eda14cbcSMatt Macy 			 * Prevent the condense zthr restarting before
3205eda14cbcSMatt Macy 			 * the synctask completes.
3206eda14cbcSMatt Macy 			 */
3207eda14cbcSMatt Macy 			spa->spa_to_condense.syncing = B_TRUE;
3208eda14cbcSMatt Macy 			lca->spa = spa;
3209eda14cbcSMatt Macy 			lca->first_size = first_size;
3210eda14cbcSMatt Macy 			lca->next_size = next_size;
3211eda14cbcSMatt Macy 			dsl_sync_task_nowait(spa_get_dsl(spa),
3212eda14cbcSMatt Macy 			    spa_livelist_condense_sync, lca, tx);
3213eda14cbcSMatt Macy 			dmu_tx_commit(tx);
3214eda14cbcSMatt Macy 			return;
3215eda14cbcSMatt Macy 		}
32162c48331dSMatt Macy 	}
3217eda14cbcSMatt Macy 	/*
3218eda14cbcSMatt Macy 	 * Condensing can not continue: either it was externally stopped or
3219eda14cbcSMatt Macy 	 * we were unable to assign to a tx because the pool has run out of
3220eda14cbcSMatt Macy 	 * space. In the second case, we'll just end up trying to condense
3221eda14cbcSMatt Macy 	 * again in a later txg.
3222eda14cbcSMatt Macy 	 */
3223eda14cbcSMatt Macy 	ASSERT(err != 0);
3224eda14cbcSMatt Macy 	bplist_clear(&lca->to_keep);
3225eda14cbcSMatt Macy 	bplist_destroy(&lca->to_keep);
3226eda14cbcSMatt Macy 	kmem_free(lca, sizeof (livelist_condense_arg_t));
3227eda14cbcSMatt Macy 	dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
3228eda14cbcSMatt Macy 	spa->spa_to_condense.ds = NULL;
3229eda14cbcSMatt Macy 	if (err == EINTR)
3230eda14cbcSMatt Macy 		zfs_livelist_condense_zthr_cancel++;
3231eda14cbcSMatt Macy }
3232eda14cbcSMatt Macy 
3233eda14cbcSMatt Macy /*
3234eda14cbcSMatt Macy  * Check that there is something to condense but that a condense is not
3235eda14cbcSMatt Macy  * already in progress and that condensing has not been cancelled.
3236eda14cbcSMatt Macy  */
3237eda14cbcSMatt Macy static boolean_t
spa_livelist_condense_cb_check(void * arg,zthr_t * z)3238eda14cbcSMatt Macy spa_livelist_condense_cb_check(void *arg, zthr_t *z)
3239eda14cbcSMatt Macy {
3240eda14cbcSMatt Macy 	(void) z;
3241eda14cbcSMatt Macy 	spa_t *spa = arg;
3242eda14cbcSMatt Macy 	if ((spa->spa_to_condense.ds != NULL) &&
3243eda14cbcSMatt Macy 	    (spa->spa_to_condense.syncing == B_FALSE) &&
3244e92ffd9bSMartin Matuska 	    (spa->spa_to_condense.cancelled == B_FALSE)) {
3245eda14cbcSMatt Macy 		return (B_TRUE);
3246eda14cbcSMatt Macy 	}
3247eda14cbcSMatt Macy 	return (B_FALSE);
3248eda14cbcSMatt Macy }
3249eda14cbcSMatt Macy 
3250eda14cbcSMatt Macy static void
spa_start_livelist_condensing_thread(spa_t * spa)3251eda14cbcSMatt Macy spa_start_livelist_condensing_thread(spa_t *spa)
3252eda14cbcSMatt Macy {
3253eda14cbcSMatt Macy 	spa->spa_to_condense.ds = NULL;
3254eda14cbcSMatt Macy 	spa->spa_to_condense.first = NULL;
3255eda14cbcSMatt Macy 	spa->spa_to_condense.next = NULL;
3256eda14cbcSMatt Macy 	spa->spa_to_condense.syncing = B_FALSE;
3257eda14cbcSMatt Macy 	spa->spa_to_condense.cancelled = B_FALSE;
3258eda14cbcSMatt Macy 
3259eda14cbcSMatt Macy 	ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
3260eda14cbcSMatt Macy 	spa->spa_livelist_condense_zthr =
3261eda14cbcSMatt Macy 	    zthr_create("z_livelist_condense",
3262eda14cbcSMatt Macy 	    spa_livelist_condense_cb_check,
3263eda14cbcSMatt Macy 	    spa_livelist_condense_cb, spa, minclsyspri);
3264eda14cbcSMatt Macy }
3265eda14cbcSMatt Macy 
3266eda14cbcSMatt Macy static void
spa_spawn_aux_threads(spa_t * spa)32672faf504dSMartin Matuska spa_spawn_aux_threads(spa_t *spa)
3268eda14cbcSMatt Macy {
3269eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
3270eda14cbcSMatt Macy 
3271eda14cbcSMatt Macy 	spa_start_raidz_expansion_thread(spa);
3272eda14cbcSMatt Macy 	spa_start_indirect_condensing_thread(spa);
3273eda14cbcSMatt Macy 	spa_start_livelist_destroy_thread(spa);
3274eda14cbcSMatt Macy 	spa_start_livelist_condensing_thread(spa);
3275eda14cbcSMatt Macy 
3276eda14cbcSMatt Macy 	ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
3277e716630dSMartin Matuska 	spa->spa_checkpoint_discard_zthr =
3278eda14cbcSMatt Macy 	    zthr_create("z_checkpoint_discard",
3279eda14cbcSMatt Macy 	    spa_checkpoint_discard_thread_check,
3280eda14cbcSMatt Macy 	    spa_checkpoint_discard_thread, spa, minclsyspri);
3281eda14cbcSMatt Macy }
3282eda14cbcSMatt Macy 
3283eda14cbcSMatt Macy /*
3284eda14cbcSMatt Macy  * Fix up config after a partly-completed split.  This is done with the
3285eda14cbcSMatt Macy  * ZPOOL_CONFIG_SPLIT nvlist.  Both the splitting pool and the split-off
32862faf504dSMartin Matuska  * pool have that entry in their config, but only the splitting one contains
3287eda14cbcSMatt Macy  * a list of all the guids of the vdevs that are being split off.
3288eda14cbcSMatt Macy  *
3289eda14cbcSMatt Macy  * This function determines what to do with that list: either rejoin
3290eda14cbcSMatt Macy  * all the disks to the pool, or complete the splitting process.  To attempt
3291eda14cbcSMatt Macy  * the rejoin, each disk that is offlined is marked online again, and
3292eda14cbcSMatt Macy  * we do a reopen() call.  If the vdev label for every disk that was
3293eda14cbcSMatt Macy  * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
3294eda14cbcSMatt Macy  * then we call vdev_split() on each disk, and complete the split.
3295eda14cbcSMatt Macy  *
3296eda14cbcSMatt Macy  * Otherwise we leave the config alone, with all the vdevs in place in
3297eda14cbcSMatt Macy  * the original pool.
3298eda14cbcSMatt Macy  */
3299eda14cbcSMatt Macy static void
spa_try_repair(spa_t * spa,nvlist_t * config)3300eda14cbcSMatt Macy spa_try_repair(spa_t *spa, nvlist_t *config)
3301eda14cbcSMatt Macy {
3302eda14cbcSMatt Macy 	uint_t extracted;
3303eda14cbcSMatt Macy 	uint64_t *glist;
3304eda14cbcSMatt Macy 	uint_t i, gcount;
3305eda14cbcSMatt Macy 	nvlist_t *nvl;
3306eda14cbcSMatt Macy 	vdev_t **vd;
3307eda14cbcSMatt Macy 	boolean_t attempt_reopen;
3308eda14cbcSMatt Macy 
3309eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
3310eda14cbcSMatt Macy 		return;
3311eda14cbcSMatt Macy 
3312eda14cbcSMatt Macy 	/* check that the config is complete */
3313eda14cbcSMatt Macy 	if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
3314eda14cbcSMatt Macy 	    &glist, &gcount) != 0)
3315eda14cbcSMatt Macy 		return;
3316eda14cbcSMatt Macy 
3317eda14cbcSMatt Macy 	vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
3318eda14cbcSMatt Macy 
3319eda14cbcSMatt Macy 	/* attempt to online all the vdevs & validate */
3320eda14cbcSMatt Macy 	attempt_reopen = B_TRUE;
3321eda14cbcSMatt Macy 	for (i = 0; i < gcount; i++) {
3322eda14cbcSMatt Macy 		if (glist[i] == 0)	/* vdev is hole */
3323eda14cbcSMatt Macy 			continue;
3324eda14cbcSMatt Macy 
3325eda14cbcSMatt Macy 		vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
3326eda14cbcSMatt Macy 		if (vd[i] == NULL) {
3327eda14cbcSMatt Macy 			/*
3328eda14cbcSMatt Macy 			 * Don't bother attempting to reopen the disks;
3329eda14cbcSMatt Macy 			 * just do the split.
3330eda14cbcSMatt Macy 			 */
3331eda14cbcSMatt Macy 			attempt_reopen = B_FALSE;
3332eda14cbcSMatt Macy 		} else {
3333eda14cbcSMatt Macy 			/* attempt to re-online it */
3334eda14cbcSMatt Macy 			vd[i]->vdev_offline = B_FALSE;
3335eda14cbcSMatt Macy 		}
3336eda14cbcSMatt Macy 	}
3337eda14cbcSMatt Macy 
3338eda14cbcSMatt Macy 	if (attempt_reopen) {
3339eda14cbcSMatt Macy 		vdev_reopen(spa->spa_root_vdev);
3340eda14cbcSMatt Macy 
3341eda14cbcSMatt Macy 		/* check each device to see what state it's in */
3342eda14cbcSMatt Macy 		for (extracted = 0, i = 0; i < gcount; i++) {
3343eda14cbcSMatt Macy 			if (vd[i] != NULL &&
3344eda14cbcSMatt Macy 			    vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
3345eda14cbcSMatt Macy 				break;
3346eda14cbcSMatt Macy 			++extracted;
3347eda14cbcSMatt Macy 		}
3348eda14cbcSMatt Macy 	}
3349eda14cbcSMatt Macy 
3350eda14cbcSMatt Macy 	/*
3351eda14cbcSMatt Macy 	 * If every disk has been moved to the new pool, or if we never
3352eda14cbcSMatt Macy 	 * even attempted to look at them, then we split them off for
3353eda14cbcSMatt Macy 	 * good.
3354eda14cbcSMatt Macy 	 */
3355eda14cbcSMatt Macy 	if (!attempt_reopen || gcount == extracted) {
3356eda14cbcSMatt Macy 		for (i = 0; i < gcount; i++)
3357eda14cbcSMatt Macy 			if (vd[i] != NULL)
3358eda14cbcSMatt Macy 				vdev_split(vd[i]);
3359eda14cbcSMatt Macy 		vdev_reopen(spa->spa_root_vdev);
3360eda14cbcSMatt Macy 	}
3361eda14cbcSMatt Macy 
3362eda14cbcSMatt Macy 	kmem_free(vd, gcount * sizeof (vdev_t *));
3363eda14cbcSMatt Macy }
3364eda14cbcSMatt Macy 
3365eda14cbcSMatt Macy static int
spa_load(spa_t * spa,spa_load_state_t state,spa_import_type_t type)3366eda14cbcSMatt Macy spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
3367eda14cbcSMatt Macy {
3368eda14cbcSMatt Macy 	const char *ereport = FM_EREPORT_ZFS_POOL;
3369eda14cbcSMatt Macy 	int error;
3370eda14cbcSMatt Macy 
3371eda14cbcSMatt Macy 	spa->spa_load_state = state;
3372eda14cbcSMatt Macy 	(void) spa_import_progress_set_state(spa_guid(spa),
3373eda14cbcSMatt Macy 	    spa_load_state(spa));
3374a0b956f5SMartin Matuska 	spa_import_progress_set_notes(spa, "spa_load()");
3375eda14cbcSMatt Macy 
3376eda14cbcSMatt Macy 	gethrestime(&spa->spa_loaded_ts);
3377eda14cbcSMatt Macy 	error = spa_load_impl(spa, type, &ereport);
3378eda14cbcSMatt Macy 
3379eda14cbcSMatt Macy 	/*
33803494f7c0SMartin Matuska 	 * Don't count references from objsets that are already closed
3381eda14cbcSMatt Macy 	 * and are making their way through the eviction process.
3382eda14cbcSMatt Macy 	 */
3383eda14cbcSMatt Macy 	spa_evicting_os_wait(spa);
3384eda14cbcSMatt Macy 	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
3385eda14cbcSMatt Macy 	if (error) {
3386eda14cbcSMatt Macy 		if (error != EEXIST) {
3387eda14cbcSMatt Macy 			spa->spa_loaded_ts.tv_sec = 0;
3388eda14cbcSMatt Macy 			spa->spa_loaded_ts.tv_nsec = 0;
3389eda14cbcSMatt Macy 		}
3390eda14cbcSMatt Macy 		if (error != EBADF) {
3391eda14cbcSMatt Macy 			(void) zfs_ereport_post(ereport, spa,
3392eda14cbcSMatt Macy 			    NULL, NULL, NULL, 0);
3393eda14cbcSMatt Macy 		}
3394eda14cbcSMatt Macy 	}
3395eda14cbcSMatt Macy 	spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
3396eda14cbcSMatt Macy 	spa->spa_ena = 0;
3397eac7052fSMatt Macy 
33982c48331dSMatt Macy 	(void) spa_import_progress_set_state(spa_guid(spa),
3399eda14cbcSMatt Macy 	    spa_load_state(spa));
3400eda14cbcSMatt Macy 
3401eda14cbcSMatt Macy 	return (error);
3402eda14cbcSMatt Macy }
3403eda14cbcSMatt Macy 
3404eda14cbcSMatt Macy #ifdef ZFS_DEBUG
3405eda14cbcSMatt Macy /*
3406eda14cbcSMatt Macy  * Count the number of per-vdev ZAPs associated with all of the vdevs in the
3407eda14cbcSMatt Macy  * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
3408eda14cbcSMatt Macy  * spa's per-vdev ZAP list.
3409eda14cbcSMatt Macy  */
3410eda14cbcSMatt Macy static uint64_t
vdev_count_verify_zaps(vdev_t * vd)3411eda14cbcSMatt Macy vdev_count_verify_zaps(vdev_t *vd)
3412eda14cbcSMatt Macy {
3413eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
3414eda14cbcSMatt Macy 	uint64_t total = 0;
3415eda14cbcSMatt Macy 
3416eda14cbcSMatt Macy 	if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2) &&
3417eda14cbcSMatt Macy 	    vd->vdev_root_zap != 0) {
3418eda14cbcSMatt Macy 		total++;
3419eda14cbcSMatt Macy 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
3420eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, vd->vdev_root_zap));
3421eda14cbcSMatt Macy 	}
3422d411c1d6SMartin Matuska 	if (vd->vdev_top_zap != 0) {
3423d411c1d6SMartin Matuska 		total++;
3424d411c1d6SMartin Matuska 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
3425d411c1d6SMartin Matuska 		    spa->spa_all_vdev_zaps, vd->vdev_top_zap));
3426d411c1d6SMartin Matuska 	}
3427d411c1d6SMartin Matuska 	if (vd->vdev_leaf_zap != 0) {
3428eda14cbcSMatt Macy 		total++;
3429eda14cbcSMatt Macy 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
3430eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
3431eda14cbcSMatt Macy 	}
3432eda14cbcSMatt Macy 
3433eda14cbcSMatt Macy 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
3434eda14cbcSMatt Macy 		total += vdev_count_verify_zaps(vd->vdev_child[i]);
3435eda14cbcSMatt Macy 	}
3436eda14cbcSMatt Macy 
3437eda14cbcSMatt Macy 	return (total);
3438eda14cbcSMatt Macy }
3439eda14cbcSMatt Macy #else
3440eda14cbcSMatt Macy #define	vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0)
3441eda14cbcSMatt Macy #endif
3442eda14cbcSMatt Macy 
3443eda14cbcSMatt Macy /*
3444eda14cbcSMatt Macy  * Determine whether the activity check is required.
3445e92ffd9bSMartin Matuska  */
3446e92ffd9bSMartin Matuska static boolean_t
spa_activity_check_required(spa_t * spa,uberblock_t * ub,nvlist_t * label,nvlist_t * config)3447eda14cbcSMatt Macy spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
3448eda14cbcSMatt Macy     nvlist_t *config)
3449eda14cbcSMatt Macy {
3450eda14cbcSMatt Macy 	uint64_t state = 0;
3451eda14cbcSMatt Macy 	uint64_t hostid = 0;
3452eda14cbcSMatt Macy 	uint64_t tryconfig_txg = 0;
3453eda14cbcSMatt Macy 	uint64_t tryconfig_timestamp = 0;
3454eda14cbcSMatt Macy 	uint16_t tryconfig_mmp_seq = 0;
3455eda14cbcSMatt Macy 	nvlist_t *nvinfo;
3456eda14cbcSMatt Macy 
3457eda14cbcSMatt Macy 	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
3458eda14cbcSMatt Macy 		nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3459eda14cbcSMatt Macy 		(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
3460eda14cbcSMatt Macy 		    &tryconfig_txg);
3461eda14cbcSMatt Macy 		(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
3462eda14cbcSMatt Macy 		    &tryconfig_timestamp);
3463eda14cbcSMatt Macy 		(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
3464eda14cbcSMatt Macy 		    &tryconfig_mmp_seq);
3465eda14cbcSMatt Macy 	}
3466eda14cbcSMatt Macy 
3467eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
3468eda14cbcSMatt Macy 
3469eda14cbcSMatt Macy 	/*
3470eda14cbcSMatt Macy 	 * Disable the MMP activity check - This is used by zdb which
3471eda14cbcSMatt Macy 	 * is intended to be used on potentially active pools.
3472eda14cbcSMatt Macy 	 */
3473eda14cbcSMatt Macy 	if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
3474eda14cbcSMatt Macy 		return (B_FALSE);
3475eda14cbcSMatt Macy 
3476eda14cbcSMatt Macy 	/*
3477eda14cbcSMatt Macy 	 * Skip the activity check when the MMP feature is disabled.
3478eda14cbcSMatt Macy 	 */
3479eda14cbcSMatt Macy 	if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
3480eda14cbcSMatt Macy 		return (B_FALSE);
3481eda14cbcSMatt Macy 
3482eda14cbcSMatt Macy 	/*
3483eda14cbcSMatt Macy 	 * If the tryconfig_ values are nonzero, they are the results of an
3484eda14cbcSMatt Macy 	 * earlier tryimport.  If they all match the uberblock we just found,
3485eda14cbcSMatt Macy 	 * then the pool has not changed and we return false so we do not test
3486eda14cbcSMatt Macy 	 * a second time.
3487eda14cbcSMatt Macy 	 */
3488eda14cbcSMatt Macy 	if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
3489eda14cbcSMatt Macy 	    tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
3490eda14cbcSMatt Macy 	    tryconfig_mmp_seq && tryconfig_mmp_seq ==
3491eda14cbcSMatt Macy 	    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
3492eda14cbcSMatt Macy 		return (B_FALSE);
3493eda14cbcSMatt Macy 
3494eda14cbcSMatt Macy 	/*
3495eda14cbcSMatt Macy 	 * Allow the activity check to be skipped when importing the pool
3496eda14cbcSMatt Macy 	 * on the same host which last imported it.  Since the hostid from
3497eda14cbcSMatt Macy 	 * configuration may be stale use the one read from the label.
3498eda14cbcSMatt Macy 	 */
3499eda14cbcSMatt Macy 	if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
3500eda14cbcSMatt Macy 		hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
3501eda14cbcSMatt Macy 
3502eda14cbcSMatt Macy 	if (hostid == spa_get_hostid(spa))
3503eda14cbcSMatt Macy 		return (B_FALSE);
3504eda14cbcSMatt Macy 
3505eda14cbcSMatt Macy 	/*
3506eda14cbcSMatt Macy 	 * Skip the activity test when the pool was cleanly exported.
3507eda14cbcSMatt Macy 	 */
3508eda14cbcSMatt Macy 	if (state != POOL_STATE_ACTIVE)
3509eda14cbcSMatt Macy 		return (B_FALSE);
3510eda14cbcSMatt Macy 
3511eda14cbcSMatt Macy 	return (B_TRUE);
3512eda14cbcSMatt Macy }
3513eda14cbcSMatt Macy 
3514eda14cbcSMatt Macy /*
3515eda14cbcSMatt Macy  * Nanoseconds the activity check must watch for changes on-disk.
3516eda14cbcSMatt Macy  */
3517eda14cbcSMatt Macy static uint64_t
spa_activity_check_duration(spa_t * spa,uberblock_t * ub)3518eda14cbcSMatt Macy spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
3519eda14cbcSMatt Macy {
3520eda14cbcSMatt Macy 	uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
3521eda14cbcSMatt Macy 	uint64_t multihost_interval = MSEC2NSEC(
3522eda14cbcSMatt Macy 	    MMP_INTERVAL_OK(zfs_multihost_interval));
3523eda14cbcSMatt Macy 	uint64_t import_delay = MAX(NANOSEC, import_intervals *
3524eda14cbcSMatt Macy 	    multihost_interval);
3525eda14cbcSMatt Macy 
3526eda14cbcSMatt Macy 	/*
3527eda14cbcSMatt Macy 	 * Local tunables determine a minimum duration except for the case
3528eda14cbcSMatt Macy 	 * where we know when the remote host will suspend the pool if MMP
3529eda14cbcSMatt Macy 	 * writes do not land.
3530eda14cbcSMatt Macy 	 *
3531eda14cbcSMatt Macy 	 * See Big Theory comment at the top of mmp.c for the reasoning behind
3532eda14cbcSMatt Macy 	 * these cases and times.
3533eda14cbcSMatt Macy 	 */
3534eda14cbcSMatt Macy 
3535eda14cbcSMatt Macy 	ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
3536eda14cbcSMatt Macy 
3537eda14cbcSMatt Macy 	if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3538eda14cbcSMatt Macy 	    MMP_FAIL_INT(ub) > 0) {
3539eda14cbcSMatt Macy 
3540eda14cbcSMatt Macy 		/* MMP on remote host will suspend pool after failed writes */
3541eda14cbcSMatt Macy 		import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
3542eda14cbcSMatt Macy 		    MMP_IMPORT_SAFETY_FACTOR / 100;
3543eda14cbcSMatt Macy 
3544eda14cbcSMatt Macy 		zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
3545eda14cbcSMatt Macy 		    "mmp_fails=%llu ub_mmp mmp_interval=%llu "
3546eda14cbcSMatt Macy 		    "import_intervals=%llu", (u_longlong_t)import_delay,
3547eda14cbcSMatt Macy 		    (u_longlong_t)MMP_FAIL_INT(ub),
3548eda14cbcSMatt Macy 		    (u_longlong_t)MMP_INTERVAL(ub),
3549eda14cbcSMatt Macy 		    (u_longlong_t)import_intervals);
3550eda14cbcSMatt Macy 
3551eda14cbcSMatt Macy 	} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
355233b8c039SMartin Matuska 	    MMP_FAIL_INT(ub) == 0) {
355333b8c039SMartin Matuska 
355433b8c039SMartin Matuska 		/* MMP on remote host will never suspend pool */
355533b8c039SMartin Matuska 		import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
3556eda14cbcSMatt Macy 		    ub->ub_mmp_delay) * import_intervals);
3557eda14cbcSMatt Macy 
3558eda14cbcSMatt Macy 		zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
3559eda14cbcSMatt Macy 		    "mmp_interval=%llu ub_mmp_delay=%llu "
3560eda14cbcSMatt Macy 		    "import_intervals=%llu", (u_longlong_t)import_delay,
3561eda14cbcSMatt Macy 		    (u_longlong_t)MMP_INTERVAL(ub),
3562eda14cbcSMatt Macy 		    (u_longlong_t)ub->ub_mmp_delay,
3563eda14cbcSMatt Macy 		    (u_longlong_t)import_intervals);
3564eda14cbcSMatt Macy 
3565eda14cbcSMatt Macy 	} else if (MMP_VALID(ub)) {
356633b8c039SMartin Matuska 		/*
356733b8c039SMartin Matuska 		 * zfs-0.7 compatibility case
356833b8c039SMartin Matuska 		 */
356933b8c039SMartin Matuska 
3570eda14cbcSMatt Macy 		import_delay = MAX(import_delay, (multihost_interval +
3571eda14cbcSMatt Macy 		    ub->ub_mmp_delay) * import_intervals);
3572eda14cbcSMatt Macy 
3573eda14cbcSMatt Macy 		zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
3574eda14cbcSMatt Macy 		    "import_intervals=%llu leaves=%u",
3575eda14cbcSMatt Macy 		    (u_longlong_t)import_delay,
3576eda14cbcSMatt Macy 		    (u_longlong_t)ub->ub_mmp_delay,
3577eda14cbcSMatt Macy 		    (u_longlong_t)import_intervals,
3578eda14cbcSMatt Macy 		    vdev_count_leaves(spa));
3579eda14cbcSMatt Macy 	} else {
358033b8c039SMartin Matuska 		/* Using local tunings is the only reasonable option */
358133b8c039SMartin Matuska 		zfs_dbgmsg("pool last imported on non-MMP aware "
358233b8c039SMartin Matuska 		    "host using import_delay=%llu multihost_interval=%llu "
358333b8c039SMartin Matuska 		    "import_intervals=%llu", (u_longlong_t)import_delay,
3584eda14cbcSMatt Macy 		    (u_longlong_t)multihost_interval,
3585eda14cbcSMatt Macy 		    (u_longlong_t)import_intervals);
3586eda14cbcSMatt Macy 	}
3587eda14cbcSMatt Macy 
3588eda14cbcSMatt Macy 	return (import_delay);
358933b8c039SMartin Matuska }
359033b8c039SMartin Matuska 
359133b8c039SMartin Matuska /*
3592eda14cbcSMatt Macy  * Remote host activity check.
3593eda14cbcSMatt Macy  *
3594eda14cbcSMatt Macy  * error results:
3595eda14cbcSMatt Macy  *          0 - no activity detected
3596eda14cbcSMatt Macy  *  EREMOTEIO - remote activity detected
3597eda14cbcSMatt Macy  *      EINTR - user canceled the operation
3598eda14cbcSMatt Macy  */
3599eda14cbcSMatt Macy static int
spa_activity_check(spa_t * spa,uberblock_t * ub,nvlist_t * config,boolean_t importing)3600eda14cbcSMatt Macy spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config,
3601eda14cbcSMatt Macy     boolean_t importing)
3602eda14cbcSMatt Macy {
3603eda14cbcSMatt Macy 	uint64_t txg = ub->ub_txg;
3604eda14cbcSMatt Macy 	uint64_t timestamp = ub->ub_timestamp;
3605eda14cbcSMatt Macy 	uint64_t mmp_config = ub->ub_mmp_config;
3606eda14cbcSMatt Macy 	uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
3607eda14cbcSMatt Macy 	uint64_t import_delay;
3608eda14cbcSMatt Macy 	hrtime_t import_expire, now;
36093494f7c0SMartin Matuska 	nvlist_t *mmp_label = NULL;
3610eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
3611eda14cbcSMatt Macy 	kcondvar_t cv;
3612eda14cbcSMatt Macy 	kmutex_t mtx;
3613eda14cbcSMatt Macy 	int error = 0;
3614eda14cbcSMatt Macy 
3615eda14cbcSMatt Macy 	cv_init(&cv, NULL, CV_DEFAULT, NULL);
3616eda14cbcSMatt Macy 	mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
3617eda14cbcSMatt Macy 	mutex_enter(&mtx);
3618eda14cbcSMatt Macy 
3619eda14cbcSMatt Macy 	/*
3620eda14cbcSMatt Macy 	 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
3621eda14cbcSMatt Macy 	 * during the earlier tryimport.  If the txg recorded there is 0 then
3622eda14cbcSMatt Macy 	 * the pool is known to be active on another host.
3623eda14cbcSMatt Macy 	 *
3624eda14cbcSMatt Macy 	 * Otherwise, the pool might be in use on another host.  Check for
3625eda14cbcSMatt Macy 	 * changes in the uberblocks on disk if necessary.
3626eda14cbcSMatt Macy 	 */
3627eda14cbcSMatt Macy 	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
3628eda14cbcSMatt Macy 		nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
3629eda14cbcSMatt Macy 		    ZPOOL_CONFIG_LOAD_INFO);
3630eda14cbcSMatt Macy 
3631eda14cbcSMatt Macy 		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
3632eda14cbcSMatt Macy 		    fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
3633eda14cbcSMatt Macy 			vdev_uberblock_load(rvd, ub, &mmp_label);
3634eda14cbcSMatt Macy 			error = SET_ERROR(EREMOTEIO);
3635eda14cbcSMatt Macy 			goto out;
3636eda14cbcSMatt Macy 		}
3637eda14cbcSMatt Macy 	}
3638eda14cbcSMatt Macy 
3639eda14cbcSMatt Macy 	import_delay = spa_activity_check_duration(spa, ub);
3640eda14cbcSMatt Macy 
3641eda14cbcSMatt Macy 	/* Add a small random factor in case of simultaneous imports (0-25%) */
3642eda14cbcSMatt Macy 	import_delay += import_delay * random_in_range(250) / 1000;
364333b8c039SMartin Matuska 
3644eda14cbcSMatt Macy 	import_expire = gethrtime() + import_delay;
3645eda14cbcSMatt Macy 
3646eda14cbcSMatt Macy 	if (importing) {
36473494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa, "Checking MMP activity, "
36483494f7c0SMartin Matuska 		    "waiting %llu ms", (u_longlong_t)NSEC2MSEC(import_delay));
36493494f7c0SMartin Matuska 	}
36503494f7c0SMartin Matuska 
36513494f7c0SMartin Matuska 	int iterations = 0;
36523494f7c0SMartin Matuska 	while ((now = gethrtime()) < import_expire) {
36533494f7c0SMartin Matuska 		if (importing && iterations++ % 30 == 0) {
36543494f7c0SMartin Matuska 			spa_import_progress_set_notes(spa, "Checking MMP "
36553494f7c0SMartin Matuska 			    "activity, %llu ms remaining",
36563494f7c0SMartin Matuska 			    (u_longlong_t)NSEC2MSEC(import_expire - now));
36573494f7c0SMartin Matuska 		}
3658eda14cbcSMatt Macy 
3659eda14cbcSMatt Macy 		if (importing) {
3660eda14cbcSMatt Macy 			(void) spa_import_progress_set_mmp_check(spa_guid(spa),
3661eda14cbcSMatt Macy 			    NSEC2SEC(import_expire - gethrtime()));
3662eda14cbcSMatt Macy 		}
3663eda14cbcSMatt Macy 
3664eda14cbcSMatt Macy 		vdev_uberblock_load(rvd, ub, &mmp_label);
3665eda14cbcSMatt Macy 
3666eda14cbcSMatt Macy 		if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
3667eda14cbcSMatt Macy 		    mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
3668eda14cbcSMatt Macy 			zfs_dbgmsg("multihost activity detected "
366933b8c039SMartin Matuska 			    "txg %llu ub_txg  %llu "
367033b8c039SMartin Matuska 			    "timestamp %llu ub_timestamp  %llu "
367133b8c039SMartin Matuska 			    "mmp_config %#llx ub_mmp_config %#llx",
367233b8c039SMartin Matuska 			    (u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
367333b8c039SMartin Matuska 			    (u_longlong_t)timestamp,
3674eda14cbcSMatt Macy 			    (u_longlong_t)ub->ub_timestamp,
3675eda14cbcSMatt Macy 			    (u_longlong_t)mmp_config,
3676eda14cbcSMatt Macy 			    (u_longlong_t)ub->ub_mmp_config);
3677eda14cbcSMatt Macy 
3678eda14cbcSMatt Macy 			error = SET_ERROR(EREMOTEIO);
3679eda14cbcSMatt Macy 			break;
3680eda14cbcSMatt Macy 		}
3681eda14cbcSMatt Macy 
3682eda14cbcSMatt Macy 		if (mmp_label) {
3683eda14cbcSMatt Macy 			nvlist_free(mmp_label);
3684eda14cbcSMatt Macy 			mmp_label = NULL;
3685eda14cbcSMatt Macy 		}
3686eda14cbcSMatt Macy 
3687eda14cbcSMatt Macy 		error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
3688eda14cbcSMatt Macy 		if (error != -1) {
3689eda14cbcSMatt Macy 			error = SET_ERROR(EINTR);
3690eda14cbcSMatt Macy 			break;
3691eda14cbcSMatt Macy 		}
3692eda14cbcSMatt Macy 		error = 0;
3693eda14cbcSMatt Macy 	}
3694eda14cbcSMatt Macy 
3695eda14cbcSMatt Macy out:
3696eda14cbcSMatt Macy 	mutex_exit(&mtx);
3697eda14cbcSMatt Macy 	mutex_destroy(&mtx);
3698eda14cbcSMatt Macy 	cv_destroy(&cv);
3699eda14cbcSMatt Macy 
3700eda14cbcSMatt Macy 	/*
3701eda14cbcSMatt Macy 	 * If the pool is determined to be active store the status in the
3702eda14cbcSMatt Macy 	 * spa->spa_load_info nvlist.  If the remote hostname or hostid are
3703eda14cbcSMatt Macy 	 * available from configuration read from disk store them as well.
3704eda14cbcSMatt Macy 	 * This allows 'zpool import' to generate a more useful message.
3705eda14cbcSMatt Macy 	 *
3706eda14cbcSMatt Macy 	 * ZPOOL_CONFIG_MMP_STATE    - observed pool status (mandatory)
3707eda14cbcSMatt Macy 	 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
3708a0b956f5SMartin Matuska 	 * ZPOOL_CONFIG_MMP_HOSTID   - hostid from the active pool
3709eda14cbcSMatt Macy 	 */
3710eda14cbcSMatt Macy 	if (error == EREMOTEIO) {
3711eda14cbcSMatt Macy 		const char *hostname = "<unknown>";
3712eda14cbcSMatt Macy 		uint64_t hostid = 0;
3713eda14cbcSMatt Macy 
3714eda14cbcSMatt Macy 		if (mmp_label) {
3715eda14cbcSMatt Macy 			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
3716eda14cbcSMatt Macy 				hostname = fnvlist_lookup_string(mmp_label,
3717eda14cbcSMatt Macy 				    ZPOOL_CONFIG_HOSTNAME);
3718eda14cbcSMatt Macy 				fnvlist_add_string(spa->spa_load_info,
3719eda14cbcSMatt Macy 				    ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
3720eda14cbcSMatt Macy 			}
3721eda14cbcSMatt Macy 
3722eda14cbcSMatt Macy 			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
3723eda14cbcSMatt Macy 				hostid = fnvlist_lookup_uint64(mmp_label,
3724eda14cbcSMatt Macy 				    ZPOOL_CONFIG_HOSTID);
3725eda14cbcSMatt Macy 				fnvlist_add_uint64(spa->spa_load_info,
3726eda14cbcSMatt Macy 				    ZPOOL_CONFIG_MMP_HOSTID, hostid);
3727eda14cbcSMatt Macy 			}
3728eda14cbcSMatt Macy 		}
3729eda14cbcSMatt Macy 
3730eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
3731eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
3732eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
3733eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_TXG, 0);
3734eda14cbcSMatt Macy 
3735eda14cbcSMatt Macy 		error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
3736eda14cbcSMatt Macy 	}
3737eda14cbcSMatt Macy 
3738eda14cbcSMatt Macy 	if (mmp_label)
3739eda14cbcSMatt Macy 		nvlist_free(mmp_label);
3740eda14cbcSMatt Macy 
3741eda14cbcSMatt Macy 	return (error);
3742eda14cbcSMatt Macy }
3743eda14cbcSMatt Macy 
3744eda14cbcSMatt Macy /*
37452a58b312SMartin Matuska  * Called from zfs_ioc_clear for a pool that was suspended
3746eda14cbcSMatt Macy  * after failing mmp write checks.
3747eda14cbcSMatt Macy  */
3748eda14cbcSMatt Macy boolean_t
spa_mmp_remote_host_activity(spa_t * spa)3749eda14cbcSMatt Macy spa_mmp_remote_host_activity(spa_t *spa)
3750eda14cbcSMatt Macy {
3751eda14cbcSMatt Macy 	ASSERT(spa_multihost(spa) && spa_suspended(spa));
3752eda14cbcSMatt Macy 
3753eda14cbcSMatt Macy 	nvlist_t *best_label;
3754eda14cbcSMatt Macy 	uberblock_t best_ub;
3755eda14cbcSMatt Macy 
3756eda14cbcSMatt Macy 	/*
3757eda14cbcSMatt Macy 	 * Locate the best uberblock on disk
3758eda14cbcSMatt Macy 	 */
3759ac0bf12eSMatt Macy 	vdev_uberblock_load(spa->spa_root_vdev, &best_ub, &best_label);
3760ac0bf12eSMatt Macy 	if (best_label) {
3761eda14cbcSMatt Macy 		/*
3762eda14cbcSMatt Macy 		 * confirm that the best hostid matches our hostid
3763eda14cbcSMatt Macy 		 */
3764eda14cbcSMatt Macy 		if (nvlist_exists(best_label, ZPOOL_CONFIG_HOSTID) &&
3765eda14cbcSMatt Macy 		    spa_get_hostid(spa) !=
3766eda14cbcSMatt Macy 		    fnvlist_lookup_uint64(best_label, ZPOOL_CONFIG_HOSTID)) {
3767eda14cbcSMatt Macy 			nvlist_free(best_label);
3768eda14cbcSMatt Macy 			return (B_TRUE);
3769eda14cbcSMatt Macy 		}
3770eda14cbcSMatt Macy 		nvlist_free(best_label);
3771eda14cbcSMatt Macy 	} else {
3772eda14cbcSMatt Macy 		return (B_TRUE);
3773eda14cbcSMatt Macy 	}
3774eda14cbcSMatt Macy 
3775eda14cbcSMatt Macy 	if (!MMP_VALID(&best_ub) ||
3776eda14cbcSMatt Macy 	    !MMP_FAIL_INT_VALID(&best_ub) ||
3777eda14cbcSMatt Macy 	    MMP_FAIL_INT(&best_ub) == 0) {
3778eda14cbcSMatt Macy 		return (B_TRUE);
3779eda14cbcSMatt Macy 	}
37802a58b312SMartin Matuska 
37812a58b312SMartin Matuska 	if (best_ub.ub_txg != spa->spa_uberblock.ub_txg ||
3782eda14cbcSMatt Macy 	    best_ub.ub_timestamp != spa->spa_uberblock.ub_timestamp) {
3783eda14cbcSMatt Macy 		zfs_dbgmsg("txg mismatch detected during pool clear "
3784eda14cbcSMatt Macy 		    "txg %llu ub_txg %llu timestamp %llu ub_timestamp %llu",
3785eda14cbcSMatt Macy 		    (u_longlong_t)spa->spa_uberblock.ub_txg,
3786eda14cbcSMatt Macy 		    (u_longlong_t)best_ub.ub_txg,
3787eda14cbcSMatt Macy 		    (u_longlong_t)spa->spa_uberblock.ub_timestamp,
3788eda14cbcSMatt Macy 		    (u_longlong_t)best_ub.ub_timestamp);
3789eda14cbcSMatt Macy 		return (B_TRUE);
3790eda14cbcSMatt Macy 	}
3791eda14cbcSMatt Macy 
3792eda14cbcSMatt Macy 	/*
3793eda14cbcSMatt Macy 	 * Perform an activity check looking for any remote writer
3794eda14cbcSMatt Macy 	 */
3795eda14cbcSMatt Macy 	return (spa_activity_check(spa, &spa->spa_uberblock, spa->spa_config,
3796eda14cbcSMatt Macy 	    B_FALSE) != 0);
3797eda14cbcSMatt Macy }
3798eda14cbcSMatt Macy 
3799eda14cbcSMatt Macy static int
spa_verify_host(spa_t * spa,nvlist_t * mos_config)3800eda14cbcSMatt Macy spa_verify_host(spa_t *spa, nvlist_t *mos_config)
3801eda14cbcSMatt Macy {
3802eda14cbcSMatt Macy 	uint64_t hostid;
3803eda14cbcSMatt Macy 	const char *hostname;
3804eda14cbcSMatt Macy 	uint64_t myhostid = 0;
3805eda14cbcSMatt Macy 
3806eda14cbcSMatt Macy 	if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
3807eda14cbcSMatt Macy 	    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
3808eda14cbcSMatt Macy 		hostname = fnvlist_lookup_string(mos_config,
3809eda14cbcSMatt Macy 		    ZPOOL_CONFIG_HOSTNAME);
3810eda14cbcSMatt Macy 
3811eda14cbcSMatt Macy 		myhostid = zone_get_hostid(NULL);
3812eda14cbcSMatt Macy 
3813eda14cbcSMatt Macy 		if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
3814eda14cbcSMatt Macy 			cmn_err(CE_WARN, "pool '%s' could not be "
3815eda14cbcSMatt Macy 			    "loaded as it was last accessed by "
3816eda14cbcSMatt Macy 			    "another system (host: %s hostid: 0x%llx). "
3817eda14cbcSMatt Macy 			    "See: https://openzfs.github.io/openzfs-docs/msg/"
3818eda14cbcSMatt Macy 			    "ZFS-8000-EY",
3819eda14cbcSMatt Macy 			    spa_name(spa), hostname, (u_longlong_t)hostid);
3820eda14cbcSMatt Macy 			spa_load_failed(spa, "hostid verification failed: pool "
3821eda14cbcSMatt Macy 			    "last accessed by host: %s (hostid: 0x%llx)",
3822eda14cbcSMatt Macy 			    hostname, (u_longlong_t)hostid);
3823eda14cbcSMatt Macy 			return (SET_ERROR(EBADF));
3824eda14cbcSMatt Macy 		}
3825eda14cbcSMatt Macy 	}
3826eda14cbcSMatt Macy 
3827eda14cbcSMatt Macy 	return (0);
3828eda14cbcSMatt Macy }
3829eda14cbcSMatt Macy 
3830ee36e25aSMartin Matuska static int
spa_ld_parse_config(spa_t * spa,spa_import_type_t type)3831ee36e25aSMartin Matuska spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
3832ee36e25aSMartin Matuska {
3833ee36e25aSMartin Matuska 	int error = 0;
3834ee36e25aSMartin Matuska 	nvlist_t *nvtree, *nvl, *config = spa->spa_config;
3835eda14cbcSMatt Macy 	int parse;
3836eda14cbcSMatt Macy 	vdev_t *rvd;
3837eda14cbcSMatt Macy 	uint64_t pool_guid;
3838eda14cbcSMatt Macy 	const char *comment;
3839eda14cbcSMatt Macy 	const char *compatibility;
3840eda14cbcSMatt Macy 
3841eda14cbcSMatt Macy 	/*
3842eda14cbcSMatt Macy 	 * Versioning wasn't explicitly added to the label until later, so if
3843eda14cbcSMatt Macy 	 * it's not present treat it as the initial version.
3844eda14cbcSMatt Macy 	 */
3845eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
3846eda14cbcSMatt Macy 	    &spa->spa_ubsync.ub_version) != 0)
3847eda14cbcSMatt Macy 		spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
3848eda14cbcSMatt Macy 
3849eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
3850eda14cbcSMatt Macy 		spa_load_failed(spa, "invalid config provided: '%s' missing",
3851eda14cbcSMatt Macy 		    ZPOOL_CONFIG_POOL_GUID);
3852eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
3853eda14cbcSMatt Macy 	}
3854eda14cbcSMatt Macy 
3855eda14cbcSMatt Macy 	/*
3856eda14cbcSMatt Macy 	 * If we are doing an import, ensure that the pool is not already
3857eda14cbcSMatt Macy 	 * imported by checking if its pool guid already exists in the
3858eda14cbcSMatt Macy 	 * spa namespace.
3859eda14cbcSMatt Macy 	 *
3860eda14cbcSMatt Macy 	 * The only case that we allow an already imported pool to be
3861eda14cbcSMatt Macy 	 * imported again, is when the pool is checkpointed and we want to
3862eda14cbcSMatt Macy 	 * look at its checkpointed state from userland tools like zdb.
3863eda14cbcSMatt Macy 	 */
3864eda14cbcSMatt Macy #ifdef _KERNEL
3865eda14cbcSMatt Macy 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3866eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3867eda14cbcSMatt Macy 	    spa_guid_exists(pool_guid, 0)) {
3868eda14cbcSMatt Macy #else
3869eda14cbcSMatt Macy 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3870eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3871eda14cbcSMatt Macy 	    spa_guid_exists(pool_guid, 0) &&
3872eda14cbcSMatt Macy 	    !spa_importing_readonly_checkpoint(spa)) {
3873eda14cbcSMatt Macy #endif
3874eda14cbcSMatt Macy 		spa_load_failed(spa, "a pool with guid %llu is already open",
3875eda14cbcSMatt Macy 		    (u_longlong_t)pool_guid);
3876eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
3877eda14cbcSMatt Macy 	}
3878eda14cbcSMatt Macy 
3879eda14cbcSMatt Macy 	spa->spa_config_guid = pool_guid;
3880eda14cbcSMatt Macy 
3881eda14cbcSMatt Macy 	nvlist_free(spa->spa_load_info);
3882eda14cbcSMatt Macy 	spa->spa_load_info = fnvlist_alloc();
3883eda14cbcSMatt Macy 
3884eda14cbcSMatt Macy 	ASSERT(spa->spa_comment == NULL);
3885eda14cbcSMatt Macy 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3886eda14cbcSMatt Macy 		spa->spa_comment = spa_strdup(comment);
3887eda14cbcSMatt Macy 
3888eda14cbcSMatt Macy 	ASSERT(spa->spa_compatibility == NULL);
3889eda14cbcSMatt Macy 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
3890eda14cbcSMatt Macy 	    &compatibility) == 0)
3891eda14cbcSMatt Macy 		spa->spa_compatibility = spa_strdup(compatibility);
3892eda14cbcSMatt Macy 
3893eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
3894eda14cbcSMatt Macy 	    &spa->spa_config_txg);
3895eda14cbcSMatt Macy 
3896eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
3897eda14cbcSMatt Macy 		spa->spa_config_splitting = fnvlist_dup(nvl);
3898eda14cbcSMatt Macy 
3899eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
3900eda14cbcSMatt Macy 		spa_load_failed(spa, "invalid config provided: '%s' missing",
3901eda14cbcSMatt Macy 		    ZPOOL_CONFIG_VDEV_TREE);
3902eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
3903eda14cbcSMatt Macy 	}
3904eda14cbcSMatt Macy 
3905eda14cbcSMatt Macy 	/*
3906eda14cbcSMatt Macy 	 * Create "The Godfather" zio to hold all async IOs
3907eda14cbcSMatt Macy 	 */
3908eda14cbcSMatt Macy 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3909eda14cbcSMatt Macy 	    KM_SLEEP);
3910eda14cbcSMatt Macy 	for (int i = 0; i < max_ncpus; i++) {
3911eda14cbcSMatt Macy 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3912eda14cbcSMatt Macy 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3913eda14cbcSMatt Macy 		    ZIO_FLAG_GODFATHER);
3914eda14cbcSMatt Macy 	}
3915eda14cbcSMatt Macy 
3916eda14cbcSMatt Macy 	/*
3917eda14cbcSMatt Macy 	 * Parse the configuration into a vdev tree.  We explicitly set the
3918eda14cbcSMatt Macy 	 * value that will be returned by spa_version() since parsing the
3919eda14cbcSMatt Macy 	 * configuration requires knowing the version number.
3920eda14cbcSMatt Macy 	 */
3921eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3922eda14cbcSMatt Macy 	parse = (type == SPA_IMPORT_EXISTING ?
3923eda14cbcSMatt Macy 	    VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
3924eda14cbcSMatt Macy 	error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
3925eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
3926eda14cbcSMatt Macy 
3927eda14cbcSMatt Macy 	if (error != 0) {
3928eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to parse config [error=%d]",
3929eda14cbcSMatt Macy 		    error);
3930eda14cbcSMatt Macy 		return (error);
3931eda14cbcSMatt Macy 	}
3932eda14cbcSMatt Macy 
3933eda14cbcSMatt Macy 	ASSERT(spa->spa_root_vdev == rvd);
3934eda14cbcSMatt Macy 	ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
3935eda14cbcSMatt Macy 	ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
3936eda14cbcSMatt Macy 
3937eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE) {
3938eda14cbcSMatt Macy 		ASSERT(spa_guid(spa) == pool_guid);
3939eda14cbcSMatt Macy 	}
3940eda14cbcSMatt Macy 
3941eda14cbcSMatt Macy 	return (0);
3942eda14cbcSMatt Macy }
3943eda14cbcSMatt Macy 
3944eda14cbcSMatt Macy /*
3945eda14cbcSMatt Macy  * Recursively open all vdevs in the vdev tree. This function is called twice:
3946eda14cbcSMatt Macy  * first with the untrusted config, then with the trusted config.
3947eda14cbcSMatt Macy  */
3948eda14cbcSMatt Macy static int
3949eda14cbcSMatt Macy spa_ld_open_vdevs(spa_t *spa)
3950eda14cbcSMatt Macy {
3951eda14cbcSMatt Macy 	int error = 0;
3952eda14cbcSMatt Macy 
3953eda14cbcSMatt Macy 	/*
3954eda14cbcSMatt Macy 	 * spa_missing_tvds_allowed defines how many top-level vdevs can be
3955eda14cbcSMatt Macy 	 * missing/unopenable for the root vdev to be still considered openable.
3956eda14cbcSMatt Macy 	 */
3957eda14cbcSMatt Macy 	if (spa->spa_trust_config) {
3958eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
3959eda14cbcSMatt Macy 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
3960eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
3961eda14cbcSMatt Macy 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
3962eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
3963eda14cbcSMatt Macy 	} else {
3964eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = 0;
3965eda14cbcSMatt Macy 	}
3966eda14cbcSMatt Macy 
3967eda14cbcSMatt Macy 	spa->spa_missing_tvds_allowed =
3968eda14cbcSMatt Macy 	    MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
3969eda14cbcSMatt Macy 
3970eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3971eda14cbcSMatt Macy 	error = vdev_open(spa->spa_root_vdev);
3972eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
3973eda14cbcSMatt Macy 
3974eda14cbcSMatt Macy 	if (spa->spa_missing_tvds != 0) {
3975eda14cbcSMatt Macy 		spa_load_note(spa, "vdev tree has %lld missing top-level "
3976eda14cbcSMatt Macy 		    "vdevs.", (u_longlong_t)spa->spa_missing_tvds);
3977eda14cbcSMatt Macy 		if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
3978eda14cbcSMatt Macy 			/*
3979eda14cbcSMatt Macy 			 * Although theoretically we could allow users to open
3980eda14cbcSMatt Macy 			 * incomplete pools in RW mode, we'd need to add a lot
3981eda14cbcSMatt Macy 			 * of extra logic (e.g. adjust pool space to account
3982eda14cbcSMatt Macy 			 * for missing vdevs).
3983eda14cbcSMatt Macy 			 * This limitation also prevents users from accidentally
3984eda14cbcSMatt Macy 			 * opening the pool in RW mode during data recovery and
3985eda14cbcSMatt Macy 			 * damaging it further.
3986eda14cbcSMatt Macy 			 */
3987eda14cbcSMatt Macy 			spa_load_note(spa, "pools with missing top-level "
3988eda14cbcSMatt Macy 			    "vdevs can only be opened in read-only mode.");
3989eda14cbcSMatt Macy 			error = SET_ERROR(ENXIO);
3990eda14cbcSMatt Macy 		} else {
3991eda14cbcSMatt Macy 			spa_load_note(spa, "current settings allow for maximum "
3992eda14cbcSMatt Macy 			    "%lld missing top-level vdevs at this stage.",
3993eda14cbcSMatt Macy 			    (u_longlong_t)spa->spa_missing_tvds_allowed);
3994eda14cbcSMatt Macy 		}
3995eda14cbcSMatt Macy 	}
3996eda14cbcSMatt Macy 	if (error != 0) {
3997eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to open vdev tree [error=%d]",
3998eda14cbcSMatt Macy 		    error);
3999eda14cbcSMatt Macy 	}
4000eda14cbcSMatt Macy 	if (spa->spa_missing_tvds != 0 || error != 0)
4001eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
4002eda14cbcSMatt Macy 
4003eda14cbcSMatt Macy 	return (error);
4004eda14cbcSMatt Macy }
4005eda14cbcSMatt Macy 
4006eda14cbcSMatt Macy /*
4007eda14cbcSMatt Macy  * We need to validate the vdev labels against the configuration that
4008eda14cbcSMatt Macy  * we have in hand. This function is called twice: first with an untrusted
4009eda14cbcSMatt Macy  * config, then with a trusted config. The validation is more strict when the
4010eda14cbcSMatt Macy  * config is trusted.
4011eda14cbcSMatt Macy  */
4012eda14cbcSMatt Macy static int
4013eda14cbcSMatt Macy spa_ld_validate_vdevs(spa_t *spa)
4014eda14cbcSMatt Macy {
4015eda14cbcSMatt Macy 	int error = 0;
4016eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4017eda14cbcSMatt Macy 
4018eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4019eda14cbcSMatt Macy 	error = vdev_validate(rvd);
4020eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
4021eda14cbcSMatt Macy 
4022eda14cbcSMatt Macy 	if (error != 0) {
4023eda14cbcSMatt Macy 		spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
4024eda14cbcSMatt Macy 		return (error);
4025eda14cbcSMatt Macy 	}
4026eda14cbcSMatt Macy 
4027eda14cbcSMatt Macy 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
4028eda14cbcSMatt Macy 		spa_load_failed(spa, "cannot open vdev tree after invalidating "
4029eda14cbcSMatt Macy 		    "some vdevs");
4030eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(rvd, 2);
4031eda14cbcSMatt Macy 		return (SET_ERROR(ENXIO));
4032eda14cbcSMatt Macy 	}
4033eda14cbcSMatt Macy 
4034eda14cbcSMatt Macy 	return (0);
4035eda14cbcSMatt Macy }
4036eda14cbcSMatt Macy 
4037eda14cbcSMatt Macy static void
4038eda14cbcSMatt Macy spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
4039eda14cbcSMatt Macy {
4040eda14cbcSMatt Macy 	spa->spa_state = POOL_STATE_ACTIVE;
4041eda14cbcSMatt Macy 	spa->spa_ubsync = spa->spa_uberblock;
4042eda14cbcSMatt Macy 	spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
4043e716630dSMartin Matuska 	    TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
4044e716630dSMartin Matuska 	spa->spa_first_txg = spa->spa_last_ubsync_txg ?
4045e716630dSMartin Matuska 	    spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
4046e716630dSMartin Matuska 	spa->spa_claim_max_txg = spa->spa_first_txg;
4047e716630dSMartin Matuska 	spa->spa_prev_software_version = ub->ub_software_version;
4048e716630dSMartin Matuska }
4049eda14cbcSMatt Macy 
4050eda14cbcSMatt Macy static int
4051eda14cbcSMatt Macy spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
4052eda14cbcSMatt Macy {
4053eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4054eda14cbcSMatt Macy 	nvlist_t *label;
4055eda14cbcSMatt Macy 	uberblock_t *ub = &spa->spa_uberblock;
4056eda14cbcSMatt Macy 	boolean_t activity_check = B_FALSE;
4057eda14cbcSMatt Macy 
4058eda14cbcSMatt Macy 	/*
4059eda14cbcSMatt Macy 	 * If we are opening the checkpointed state of the pool by
4060eda14cbcSMatt Macy 	 * rewinding to it, at this point we will have written the
4061eda14cbcSMatt Macy 	 * checkpointed uberblock to the vdev labels, so searching
4062eda14cbcSMatt Macy 	 * the labels will find the right uberblock.  However, if
4063eda14cbcSMatt Macy 	 * we are opening the checkpointed state read-only, we have
4064eda14cbcSMatt Macy 	 * not modified the labels. Therefore, we must ignore the
4065eda14cbcSMatt Macy 	 * labels and continue using the spa_uberblock that was set
4066eda14cbcSMatt Macy 	 * by spa_ld_checkpoint_rewind.
4067eda14cbcSMatt Macy 	 *
4068eda14cbcSMatt Macy 	 * Note that it would be fine to ignore the labels when
4069eda14cbcSMatt Macy 	 * rewinding (opening writeable) as well. However, if we
4070eda14cbcSMatt Macy 	 * crash just after writing the labels, we will end up
4071eda14cbcSMatt Macy 	 * searching the labels. Doing so in the common case means
4072eda14cbcSMatt Macy 	 * that this code path gets exercised normally, rather than
4073eda14cbcSMatt Macy 	 * just in the edge case.
4074eda14cbcSMatt Macy 	 */
4075eda14cbcSMatt Macy 	if (ub->ub_checkpoint_txg != 0 &&
4076eda14cbcSMatt Macy 	    spa_importing_readonly_checkpoint(spa)) {
4077eda14cbcSMatt Macy 		spa_ld_select_uberblock_done(spa, ub);
4078eda14cbcSMatt Macy 		return (0);
4079eda14cbcSMatt Macy 	}
4080eda14cbcSMatt Macy 
4081eda14cbcSMatt Macy 	/*
4082eda14cbcSMatt Macy 	 * Find the best uberblock.
4083eda14cbcSMatt Macy 	 */
4084eda14cbcSMatt Macy 	vdev_uberblock_load(rvd, ub, &label);
4085eda14cbcSMatt Macy 
4086eda14cbcSMatt Macy 	/*
4087eda14cbcSMatt Macy 	 * If we weren't able to find a single valid uberblock, return failure.
4088eda14cbcSMatt Macy 	 */
4089eda14cbcSMatt Macy 	if (ub->ub_txg == 0) {
4090eda14cbcSMatt Macy 		nvlist_free(label);
4091eda14cbcSMatt Macy 		spa_load_failed(spa, "no valid uberblock found");
4092eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
4093eda14cbcSMatt Macy 	}
4094eda14cbcSMatt Macy 
4095eda14cbcSMatt Macy 	if (spa->spa_load_max_txg != UINT64_MAX) {
4096eda14cbcSMatt Macy 		(void) spa_import_progress_set_max_txg(spa_guid(spa),
4097eda14cbcSMatt Macy 		    (u_longlong_t)spa->spa_load_max_txg);
4098eda14cbcSMatt Macy 	}
4099eda14cbcSMatt Macy 	spa_load_note(spa, "using uberblock with txg=%llu",
4100eda14cbcSMatt Macy 	    (u_longlong_t)ub->ub_txg);
4101eda14cbcSMatt Macy 	if (ub->ub_raidz_reflow_info != 0) {
4102eda14cbcSMatt Macy 		spa_load_note(spa, "uberblock raidz_reflow_info: "
4103eda14cbcSMatt Macy 		    "state=%u offset=%llu",
4104eda14cbcSMatt Macy 		    (int)RRSS_GET_STATE(ub),
4105eda14cbcSMatt Macy 		    (u_longlong_t)RRSS_GET_OFFSET(ub));
4106eda14cbcSMatt Macy 	}
4107eda14cbcSMatt Macy 
4108eda14cbcSMatt Macy 
4109eda14cbcSMatt Macy 	/*
4110eda14cbcSMatt Macy 	 * For pools which have the multihost property on determine if the
4111eda14cbcSMatt Macy 	 * pool is truly inactive and can be safely imported.  Prevent
4112eda14cbcSMatt Macy 	 * hosts which don't have a hostid set from importing the pool.
4113eda14cbcSMatt Macy 	 */
4114eda14cbcSMatt Macy 	activity_check = spa_activity_check_required(spa, ub, label,
4115eda14cbcSMatt Macy 	    spa->spa_config);
4116eda14cbcSMatt Macy 	if (activity_check) {
4117eda14cbcSMatt Macy 		if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
4118eda14cbcSMatt Macy 		    spa_get_hostid(spa) == 0) {
411981b22a98SMartin Matuska 			nvlist_free(label);
4120eda14cbcSMatt Macy 			fnvlist_add_uint64(spa->spa_load_info,
4121eda14cbcSMatt Macy 			    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
4122eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4123eda14cbcSMatt Macy 		}
4124eda14cbcSMatt Macy 
4125eda14cbcSMatt Macy 		int error =
4126eda14cbcSMatt Macy 		    spa_activity_check(spa, ub, spa->spa_config, B_TRUE);
4127eda14cbcSMatt Macy 		if (error) {
4128eda14cbcSMatt Macy 			nvlist_free(label);
4129eda14cbcSMatt Macy 			return (error);
4130eda14cbcSMatt Macy 		}
4131eda14cbcSMatt Macy 
413281b22a98SMartin Matuska 		fnvlist_add_uint64(spa->spa_load_info,
4133eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
4134eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
4135eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
4136eda14cbcSMatt Macy 		fnvlist_add_uint16(spa->spa_load_info,
4137eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_SEQ,
413881b22a98SMartin Matuska 		    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
413981b22a98SMartin Matuska 	}
4140eda14cbcSMatt Macy 
4141eda14cbcSMatt Macy 	/*
4142eda14cbcSMatt Macy 	 * If the pool has an unsupported version we can't open it.
4143eda14cbcSMatt Macy 	 */
414481b22a98SMartin Matuska 	if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
414581b22a98SMartin Matuska 		nvlist_free(label);
4146eda14cbcSMatt Macy 		spa_load_failed(spa, "version %llu is not supported",
4147eda14cbcSMatt Macy 		    (u_longlong_t)ub->ub_version);
4148eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
4149eda14cbcSMatt Macy 	}
4150eda14cbcSMatt Macy 
4151eda14cbcSMatt Macy 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
4152eda14cbcSMatt Macy 		nvlist_t *features;
4153eda14cbcSMatt Macy 
4154eda14cbcSMatt Macy 		/*
4155eda14cbcSMatt Macy 		 * If we weren't able to find what's necessary for reading the
4156eda14cbcSMatt Macy 		 * MOS in the label, return failure.
4157eda14cbcSMatt Macy 		 */
4158eda14cbcSMatt Macy 		if (label == NULL) {
4159eda14cbcSMatt Macy 			spa_load_failed(spa, "label config unavailable");
4160eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4161eda14cbcSMatt Macy 			    ENXIO));
4162eda14cbcSMatt Macy 		}
4163eda14cbcSMatt Macy 
4164eda14cbcSMatt Macy 		if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
4165eda14cbcSMatt Macy 		    &features) != 0) {
4166eda14cbcSMatt Macy 			nvlist_free(label);
4167eda14cbcSMatt Macy 			spa_load_failed(spa, "invalid label: '%s' missing",
4168eda14cbcSMatt Macy 			    ZPOOL_CONFIG_FEATURES_FOR_READ);
4169eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4170eda14cbcSMatt Macy 			    ENXIO));
4171eda14cbcSMatt Macy 		}
4172eda14cbcSMatt Macy 
4173eda14cbcSMatt Macy 		/*
4174eda14cbcSMatt Macy 		 * Update our in-core representation with the definitive values
4175eda14cbcSMatt Macy 		 * from the label.
4176eda14cbcSMatt Macy 		 */
4177eda14cbcSMatt Macy 		nvlist_free(spa->spa_label_features);
4178eda14cbcSMatt Macy 		spa->spa_label_features = fnvlist_dup(features);
4179eda14cbcSMatt Macy 	}
4180eda14cbcSMatt Macy 
4181eda14cbcSMatt Macy 	nvlist_free(label);
4182eda14cbcSMatt Macy 
4183eda14cbcSMatt Macy 	/*
4184eda14cbcSMatt Macy 	 * Look through entries in the label nvlist's features_for_read. If
4185eda14cbcSMatt Macy 	 * there is a feature listed there which we don't understand then we
4186eda14cbcSMatt Macy 	 * cannot open a pool.
4187eda14cbcSMatt Macy 	 */
4188eda14cbcSMatt Macy 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
4189eda14cbcSMatt Macy 		nvlist_t *unsup_feat;
4190eda14cbcSMatt Macy 
4191eda14cbcSMatt Macy 		unsup_feat = fnvlist_alloc();
4192eda14cbcSMatt Macy 
4193eda14cbcSMatt Macy 		for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
4194eda14cbcSMatt Macy 		    NULL); nvp != NULL;
4195eda14cbcSMatt Macy 		    nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
4196eda14cbcSMatt Macy 			if (!zfeature_is_supported(nvpair_name(nvp))) {
4197eda14cbcSMatt Macy 				fnvlist_add_string(unsup_feat,
4198eda14cbcSMatt Macy 				    nvpair_name(nvp), "");
4199eda14cbcSMatt Macy 			}
4200eda14cbcSMatt Macy 		}
4201eda14cbcSMatt Macy 
4202eda14cbcSMatt Macy 		if (!nvlist_empty(unsup_feat)) {
4203eda14cbcSMatt Macy 			fnvlist_add_nvlist(spa->spa_load_info,
4204eda14cbcSMatt Macy 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
4205eda14cbcSMatt Macy 			nvlist_free(unsup_feat);
4206eda14cbcSMatt Macy 			spa_load_failed(spa, "some features are unsupported");
4207eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
4208eda14cbcSMatt Macy 			    ENOTSUP));
4209eda14cbcSMatt Macy 		}
4210eda14cbcSMatt Macy 
4211eda14cbcSMatt Macy 		nvlist_free(unsup_feat);
4212eda14cbcSMatt Macy 	}
4213eda14cbcSMatt Macy 
4214eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
4215eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4216eda14cbcSMatt Macy 		spa_try_repair(spa, spa->spa_config);
4217eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4218eda14cbcSMatt Macy 		nvlist_free(spa->spa_config_splitting);
4219eda14cbcSMatt Macy 		spa->spa_config_splitting = NULL;
4220eda14cbcSMatt Macy 	}
4221eda14cbcSMatt Macy 
4222eda14cbcSMatt Macy 	/*
4223eda14cbcSMatt Macy 	 * Initialize internal SPA structures.
4224eda14cbcSMatt Macy 	 */
4225eda14cbcSMatt Macy 	spa_ld_select_uberblock_done(spa, ub);
4226eda14cbcSMatt Macy 
4227eda14cbcSMatt Macy 	return (0);
4228eda14cbcSMatt Macy }
4229eda14cbcSMatt Macy 
4230eda14cbcSMatt Macy static int
4231eda14cbcSMatt Macy spa_ld_open_rootbp(spa_t *spa)
4232eda14cbcSMatt Macy {
4233eda14cbcSMatt Macy 	int error = 0;
4234eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4235eda14cbcSMatt Macy 
42367877fdebSMatt Macy 	error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
42377877fdebSMatt Macy 	if (error != 0) {
42387877fdebSMatt Macy 		spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
42397877fdebSMatt Macy 		    "[error=%d]", error);
42407877fdebSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
42417877fdebSMatt Macy 	}
42427877fdebSMatt Macy 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
42437877fdebSMatt Macy 
4244eda14cbcSMatt Macy 	return (0);
4245eda14cbcSMatt Macy }
4246eda14cbcSMatt Macy 
4247eda14cbcSMatt Macy static int
4248eda14cbcSMatt Macy spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
4249eda14cbcSMatt Macy     boolean_t reloading)
4250eda14cbcSMatt Macy {
4251eda14cbcSMatt Macy 	vdev_t *mrvd, *rvd = spa->spa_root_vdev;
4252eda14cbcSMatt Macy 	nvlist_t *nv, *mos_config, *policy;
4253eda14cbcSMatt Macy 	int error = 0, copy_error;
4254eda14cbcSMatt Macy 	uint64_t healthy_tvds, healthy_tvds_mos;
4255eda14cbcSMatt Macy 	uint64_t mos_config_txg;
4256eda14cbcSMatt Macy 
4257eda14cbcSMatt Macy 	if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
4258eda14cbcSMatt Macy 	    != 0)
4259eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4260eda14cbcSMatt Macy 
4261eda14cbcSMatt Macy 	/*
4262eda14cbcSMatt Macy 	 * If we're assembling a pool from a split, the config provided is
4263eda14cbcSMatt Macy 	 * already trusted so there is nothing to do.
4264eda14cbcSMatt Macy 	 */
4265eda14cbcSMatt Macy 	if (type == SPA_IMPORT_ASSEMBLE)
4266eda14cbcSMatt Macy 		return (0);
4267eda14cbcSMatt Macy 
4268eda14cbcSMatt Macy 	healthy_tvds = spa_healthy_core_tvds(spa);
4269eda14cbcSMatt Macy 
4270eda14cbcSMatt Macy 	if (load_nvlist(spa, spa->spa_config_object, &mos_config)
4271eda14cbcSMatt Macy 	    != 0) {
4272eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve MOS config");
4273eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4274b2526e8bSMartin Matuska 	}
4275b2526e8bSMartin Matuska 
4276b2526e8bSMartin Matuska 	/*
4277b2526e8bSMartin Matuska 	 * If we are doing an open, pool owner wasn't verified yet, thus do
4278b2526e8bSMartin Matuska 	 * the verification here.
4279b2526e8bSMartin Matuska 	 */
4280b2526e8bSMartin Matuska 	if (spa->spa_load_state == SPA_LOAD_OPEN) {
4281b2526e8bSMartin Matuska 		error = spa_verify_host(spa, mos_config);
4282b2526e8bSMartin Matuska 		if (error != 0) {
4283b2526e8bSMartin Matuska 			nvlist_free(mos_config);
4284b2526e8bSMartin Matuska 			return (error);
4285b2526e8bSMartin Matuska 		}
4286b2526e8bSMartin Matuska 	}
4287b2526e8bSMartin Matuska 
4288b2526e8bSMartin Matuska 	nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
4289b2526e8bSMartin Matuska 
4290b2526e8bSMartin Matuska 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4291b2526e8bSMartin Matuska 
4292eda14cbcSMatt Macy 	/*
4293eda14cbcSMatt Macy 	 * Build a new vdev tree from the trusted config
4294eda14cbcSMatt Macy 	 */
4295eda14cbcSMatt Macy 	error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
4296eda14cbcSMatt Macy 	if (error != 0) {
4297eda14cbcSMatt Macy 		nvlist_free(mos_config);
4298eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4299eda14cbcSMatt Macy 		spa_load_failed(spa, "spa_config_parse failed [error=%d]",
4300eda14cbcSMatt Macy 		    error);
4301eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4302eda14cbcSMatt Macy 	}
4303eda14cbcSMatt Macy 
4304eda14cbcSMatt Macy 	/*
4305eda14cbcSMatt Macy 	 * Vdev paths in the MOS may be obsolete. If the untrusted config was
4306eda14cbcSMatt Macy 	 * obtained by scanning /dev/dsk, then it will have the right vdev
4307eda14cbcSMatt Macy 	 * paths. We update the trusted MOS config with this information.
4308eda14cbcSMatt Macy 	 * We first try to copy the paths with vdev_copy_path_strict, which
4309eda14cbcSMatt Macy 	 * succeeds only when both configs have exactly the same vdev tree.
4310eda14cbcSMatt Macy 	 * If that fails, we fall back to a more flexible method that has a
4311eda14cbcSMatt Macy 	 * best effort policy.
4312eda14cbcSMatt Macy 	 */
4313eda14cbcSMatt Macy 	copy_error = vdev_copy_path_strict(rvd, mrvd);
4314eda14cbcSMatt Macy 	if (copy_error != 0 || spa_load_print_vdev_tree) {
4315eda14cbcSMatt Macy 		spa_load_note(spa, "provided vdev tree:");
4316eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(rvd, 2);
4317eda14cbcSMatt Macy 		spa_load_note(spa, "MOS vdev tree:");
4318eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(mrvd, 2);
4319eda14cbcSMatt Macy 	}
4320eda14cbcSMatt Macy 	if (copy_error != 0) {
4321eda14cbcSMatt Macy 		spa_load_note(spa, "vdev_copy_path_strict failed, falling "
4322eda14cbcSMatt Macy 		    "back to vdev_copy_path_relaxed");
4323eda14cbcSMatt Macy 		vdev_copy_path_relaxed(rvd, mrvd);
4324eda14cbcSMatt Macy 	}
4325eda14cbcSMatt Macy 
4326eda14cbcSMatt Macy 	vdev_close(rvd);
4327eda14cbcSMatt Macy 	vdev_free(rvd);
4328eda14cbcSMatt Macy 	spa->spa_root_vdev = mrvd;
4329eda14cbcSMatt Macy 	rvd = mrvd;
4330eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
4331eda14cbcSMatt Macy 
4332eda14cbcSMatt Macy 	/*
4333eda14cbcSMatt Macy 	 * If 'zpool import' used a cached config, then the on-disk hostid and
4334eda14cbcSMatt Macy 	 * hostname may be different to the cached config in ways that should
4335eda14cbcSMatt Macy 	 * prevent import.  Userspace can't discover this without a scan, but
4336eda14cbcSMatt Macy 	 * we know, so we add these values to LOAD_INFO so the caller can know
4337eda14cbcSMatt Macy 	 * the difference.
4338eda14cbcSMatt Macy 	 *
4339eda14cbcSMatt Macy 	 * Note that we have to do this before the config is regenerated,
4340eda14cbcSMatt Macy 	 * because the new config will have the hostid and hostname for this
4341eda14cbcSMatt Macy 	 * host, in readiness for import.
4342eda14cbcSMatt Macy 	 */
4343eda14cbcSMatt Macy 	if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTID))
4344eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_HOSTID,
4345eda14cbcSMatt Macy 		    fnvlist_lookup_uint64(mos_config, ZPOOL_CONFIG_HOSTID));
4346eda14cbcSMatt Macy 	if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTNAME))
4347eda14cbcSMatt Macy 		fnvlist_add_string(spa->spa_load_info, ZPOOL_CONFIG_HOSTNAME,
4348eda14cbcSMatt Macy 		    fnvlist_lookup_string(mos_config, ZPOOL_CONFIG_HOSTNAME));
4349eda14cbcSMatt Macy 
4350eda14cbcSMatt Macy 	/*
4351eda14cbcSMatt Macy 	 * We will use spa_config if we decide to reload the spa or if spa_load
4352eda14cbcSMatt Macy 	 * fails and we rewind. We must thus regenerate the config using the
4353eda14cbcSMatt Macy 	 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
4354eda14cbcSMatt Macy 	 * pass settings on how to load the pool and is not stored in the MOS.
4355eda14cbcSMatt Macy 	 * We copy it over to our new, trusted config.
4356eda14cbcSMatt Macy 	 */
4357eda14cbcSMatt Macy 	mos_config_txg = fnvlist_lookup_uint64(mos_config,
4358eda14cbcSMatt Macy 	    ZPOOL_CONFIG_POOL_TXG);
4359eda14cbcSMatt Macy 	nvlist_free(mos_config);
4360eda14cbcSMatt Macy 	mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
4361eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
4362eda14cbcSMatt Macy 	    &policy) == 0)
4363eda14cbcSMatt Macy 		fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
4364eda14cbcSMatt Macy 	spa_config_set(spa, mos_config);
4365eda14cbcSMatt Macy 	spa->spa_config_source = SPA_CONFIG_SRC_MOS;
4366eda14cbcSMatt Macy 
4367eda14cbcSMatt Macy 	/*
4368eda14cbcSMatt Macy 	 * Now that we got the config from the MOS, we should be more strict
4369eda14cbcSMatt Macy 	 * in checking blkptrs and can make assumptions about the consistency
4370eda14cbcSMatt Macy 	 * of the vdev tree. spa_trust_config must be set to true before opening
4371eda14cbcSMatt Macy 	 * vdevs in order for them to be writeable.
4372eda14cbcSMatt Macy 	 */
4373eda14cbcSMatt Macy 	spa->spa_trust_config = B_TRUE;
4374eda14cbcSMatt Macy 
4375eda14cbcSMatt Macy 	/*
4376eda14cbcSMatt Macy 	 * Open and validate the new vdev tree
4377eda14cbcSMatt Macy 	 */
4378eda14cbcSMatt Macy 	error = spa_ld_open_vdevs(spa);
4379eda14cbcSMatt Macy 	if (error != 0)
4380eda14cbcSMatt Macy 		return (error);
4381eda14cbcSMatt Macy 
4382eda14cbcSMatt Macy 	error = spa_ld_validate_vdevs(spa);
4383eda14cbcSMatt Macy 	if (error != 0)
4384eda14cbcSMatt Macy 		return (error);
4385eda14cbcSMatt Macy 
4386eda14cbcSMatt Macy 	if (copy_error != 0 || spa_load_print_vdev_tree) {
4387eda14cbcSMatt Macy 		spa_load_note(spa, "final vdev tree:");
4388eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(rvd, 2);
4389eda14cbcSMatt Macy 	}
4390eda14cbcSMatt Macy 
4391eda14cbcSMatt Macy 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
4392eda14cbcSMatt Macy 	    !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
4393eda14cbcSMatt Macy 		/*
4394eda14cbcSMatt Macy 		 * Sanity check to make sure that we are indeed loading the
4395eda14cbcSMatt Macy 		 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
4396eda14cbcSMatt Macy 		 * in the config provided and they happened to be the only ones
4397eda14cbcSMatt Macy 		 * to have the latest uberblock, we could involuntarily perform
4398eda14cbcSMatt Macy 		 * an extreme rewind.
4399eda14cbcSMatt Macy 		 */
4400eda14cbcSMatt Macy 		healthy_tvds_mos = spa_healthy_core_tvds(spa);
4401eda14cbcSMatt Macy 		if (healthy_tvds_mos - healthy_tvds >=
4402eda14cbcSMatt Macy 		    SPA_SYNC_MIN_VDEVS) {
4403eda14cbcSMatt Macy 			spa_load_note(spa, "config provided misses too many "
4404eda14cbcSMatt Macy 			    "top-level vdevs compared to MOS (%lld vs %lld). ",
4405eda14cbcSMatt Macy 			    (u_longlong_t)healthy_tvds,
4406eda14cbcSMatt Macy 			    (u_longlong_t)healthy_tvds_mos);
4407eda14cbcSMatt Macy 			spa_load_note(spa, "vdev tree:");
4408eda14cbcSMatt Macy 			vdev_dbgmsg_print_tree(rvd, 2);
4409eda14cbcSMatt Macy 			if (reloading) {
4410eda14cbcSMatt Macy 				spa_load_failed(spa, "config was already "
4411eda14cbcSMatt Macy 				    "provided from MOS. Aborting.");
4412eda14cbcSMatt Macy 				return (spa_vdev_err(rvd,
4413eda14cbcSMatt Macy 				    VDEV_AUX_CORRUPT_DATA, EIO));
4414eda14cbcSMatt Macy 			}
4415eda14cbcSMatt Macy 			spa_load_note(spa, "spa must be reloaded using MOS "
4416eda14cbcSMatt Macy 			    "config");
4417eda14cbcSMatt Macy 			return (SET_ERROR(EAGAIN));
4418eda14cbcSMatt Macy 		}
4419eda14cbcSMatt Macy 	}
4420eda14cbcSMatt Macy 
4421eda14cbcSMatt Macy 	error = spa_check_for_missing_logs(spa);
4422eda14cbcSMatt Macy 	if (error != 0)
4423eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
4424eda14cbcSMatt Macy 
4425eda14cbcSMatt Macy 	if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
4426eda14cbcSMatt Macy 		spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
4427eda14cbcSMatt Macy 		    "guid sum (%llu != %llu)",
4428eda14cbcSMatt Macy 		    (u_longlong_t)spa->spa_uberblock.ub_guid_sum,
4429eda14cbcSMatt Macy 		    (u_longlong_t)rvd->vdev_guid_sum);
4430eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
4431eda14cbcSMatt Macy 		    ENXIO));
4432eda14cbcSMatt Macy 	}
4433eda14cbcSMatt Macy 
4434eda14cbcSMatt Macy 	return (0);
4435eda14cbcSMatt Macy }
4436eda14cbcSMatt Macy 
4437eda14cbcSMatt Macy static int
4438eda14cbcSMatt Macy spa_ld_open_indirect_vdev_metadata(spa_t *spa)
4439eda14cbcSMatt Macy {
4440eda14cbcSMatt Macy 	int error = 0;
4441eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4442eda14cbcSMatt Macy 
4443eda14cbcSMatt Macy 	/*
4444eda14cbcSMatt Macy 	 * Everything that we read before spa_remove_init() must be stored
4445eda14cbcSMatt Macy 	 * on concreted vdevs.  Therefore we do this as early as possible.
4446eda14cbcSMatt Macy 	 */
4447eda14cbcSMatt Macy 	error = spa_remove_init(spa);
4448eda14cbcSMatt Macy 	if (error != 0) {
4449eda14cbcSMatt Macy 		spa_load_failed(spa, "spa_remove_init failed [error=%d]",
4450eda14cbcSMatt Macy 		    error);
4451eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4452eda14cbcSMatt Macy 	}
4453eda14cbcSMatt Macy 
4454eda14cbcSMatt Macy 	/*
4455eda14cbcSMatt Macy 	 * Retrieve information needed to condense indirect vdev mappings.
4456eda14cbcSMatt Macy 	 */
4457eda14cbcSMatt Macy 	error = spa_condense_init(spa);
4458eda14cbcSMatt Macy 	if (error != 0) {
4459eda14cbcSMatt Macy 		spa_load_failed(spa, "spa_condense_init failed [error=%d]",
4460eda14cbcSMatt Macy 		    error);
4461eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4462eda14cbcSMatt Macy 	}
4463eda14cbcSMatt Macy 
4464eda14cbcSMatt Macy 	return (0);
4465eda14cbcSMatt Macy }
4466eda14cbcSMatt Macy 
4467eda14cbcSMatt Macy static int
4468eda14cbcSMatt Macy spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
4469eda14cbcSMatt Macy {
4470eda14cbcSMatt Macy 	int error = 0;
4471eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4472eda14cbcSMatt Macy 
4473eda14cbcSMatt Macy 	if (spa_version(spa) >= SPA_VERSION_FEATURES) {
4474eda14cbcSMatt Macy 		boolean_t missing_feat_read = B_FALSE;
4475eda14cbcSMatt Macy 		nvlist_t *unsup_feat, *enabled_feat;
4476eda14cbcSMatt Macy 
4477eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
4478eda14cbcSMatt Macy 		    &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
4479eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4480eda14cbcSMatt Macy 		}
4481eda14cbcSMatt Macy 
4482eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
4483eda14cbcSMatt Macy 		    &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
4484eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4485eda14cbcSMatt Macy 		}
4486eda14cbcSMatt Macy 
4487eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
4488eda14cbcSMatt Macy 		    &spa->spa_feat_desc_obj, B_TRUE) != 0) {
4489eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4490eda14cbcSMatt Macy 		}
4491eda14cbcSMatt Macy 
4492eda14cbcSMatt Macy 		enabled_feat = fnvlist_alloc();
4493eda14cbcSMatt Macy 		unsup_feat = fnvlist_alloc();
4494eda14cbcSMatt Macy 
4495eda14cbcSMatt Macy 		if (!spa_features_check(spa, B_FALSE,
4496eda14cbcSMatt Macy 		    unsup_feat, enabled_feat))
4497eda14cbcSMatt Macy 			missing_feat_read = B_TRUE;
4498eda14cbcSMatt Macy 
4499eda14cbcSMatt Macy 		if (spa_writeable(spa) ||
4500eda14cbcSMatt Macy 		    spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
4501eda14cbcSMatt Macy 			if (!spa_features_check(spa, B_TRUE,
4502eda14cbcSMatt Macy 			    unsup_feat, enabled_feat)) {
4503eda14cbcSMatt Macy 				*missing_feat_writep = B_TRUE;
4504eda14cbcSMatt Macy 			}
4505eda14cbcSMatt Macy 		}
4506eda14cbcSMatt Macy 
4507eda14cbcSMatt Macy 		fnvlist_add_nvlist(spa->spa_load_info,
4508eda14cbcSMatt Macy 		    ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
4509eda14cbcSMatt Macy 
4510eda14cbcSMatt Macy 		if (!nvlist_empty(unsup_feat)) {
4511eda14cbcSMatt Macy 			fnvlist_add_nvlist(spa->spa_load_info,
4512eda14cbcSMatt Macy 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
4513eda14cbcSMatt Macy 		}
4514eda14cbcSMatt Macy 
4515eda14cbcSMatt Macy 		fnvlist_free(enabled_feat);
4516eda14cbcSMatt Macy 		fnvlist_free(unsup_feat);
4517eda14cbcSMatt Macy 
4518eda14cbcSMatt Macy 		if (!missing_feat_read) {
4519eda14cbcSMatt Macy 			fnvlist_add_boolean(spa->spa_load_info,
4520eda14cbcSMatt Macy 			    ZPOOL_CONFIG_CAN_RDONLY);
4521eda14cbcSMatt Macy 		}
4522eda14cbcSMatt Macy 
4523eda14cbcSMatt Macy 		/*
4524eda14cbcSMatt Macy 		 * If the state is SPA_LOAD_TRYIMPORT, our objective is
4525eda14cbcSMatt Macy 		 * twofold: to determine whether the pool is available for
4526eda14cbcSMatt Macy 		 * import in read-write mode and (if it is not) whether the
4527eda14cbcSMatt Macy 		 * pool is available for import in read-only mode. If the pool
4528eda14cbcSMatt Macy 		 * is available for import in read-write mode, it is displayed
4529eda14cbcSMatt Macy 		 * as available in userland; if it is not available for import
4530eda14cbcSMatt Macy 		 * in read-only mode, it is displayed as unavailable in
4531eda14cbcSMatt Macy 		 * userland. If the pool is available for import in read-only
4532eda14cbcSMatt Macy 		 * mode but not read-write mode, it is displayed as unavailable
4533eda14cbcSMatt Macy 		 * in userland with a special note that the pool is actually
4534eda14cbcSMatt Macy 		 * available for open in read-only mode.
4535eda14cbcSMatt Macy 		 *
4536eda14cbcSMatt Macy 		 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
4537eda14cbcSMatt Macy 		 * missing a feature for write, we must first determine whether
4538eda14cbcSMatt Macy 		 * the pool can be opened read-only before returning to
4539eda14cbcSMatt Macy 		 * userland in order to know whether to display the
4540eda14cbcSMatt Macy 		 * abovementioned note.
4541eda14cbcSMatt Macy 		 */
4542eda14cbcSMatt Macy 		if (missing_feat_read || (*missing_feat_writep &&
4543eda14cbcSMatt Macy 		    spa_writeable(spa))) {
4544eda14cbcSMatt Macy 			spa_load_failed(spa, "pool uses unsupported features");
4545eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
4546eda14cbcSMatt Macy 			    ENOTSUP));
4547eda14cbcSMatt Macy 		}
4548eda14cbcSMatt Macy 
4549eda14cbcSMatt Macy 		/*
4550eda14cbcSMatt Macy 		 * Load refcounts for ZFS features from disk into an in-memory
4551eda14cbcSMatt Macy 		 * cache during SPA initialization.
4552eda14cbcSMatt Macy 		 */
4553eda14cbcSMatt Macy 		for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
4554eda14cbcSMatt Macy 			uint64_t refcount;
4555eda14cbcSMatt Macy 
4556eda14cbcSMatt Macy 			error = feature_get_refcount_from_disk(spa,
4557eda14cbcSMatt Macy 			    &spa_feature_table[i], &refcount);
4558eda14cbcSMatt Macy 			if (error == 0) {
4559eda14cbcSMatt Macy 				spa->spa_feat_refcount_cache[i] = refcount;
4560eda14cbcSMatt Macy 			} else if (error == ENOTSUP) {
4561eda14cbcSMatt Macy 				spa->spa_feat_refcount_cache[i] =
4562eda14cbcSMatt Macy 				    SPA_FEATURE_DISABLED;
4563eda14cbcSMatt Macy 			} else {
4564eda14cbcSMatt Macy 				spa_load_failed(spa, "error getting refcount "
4565eda14cbcSMatt Macy 				    "for feature %s [error=%d]",
4566eda14cbcSMatt Macy 				    spa_feature_table[i].fi_guid, error);
4567eda14cbcSMatt Macy 				return (spa_vdev_err(rvd,
4568eda14cbcSMatt Macy 				    VDEV_AUX_CORRUPT_DATA, EIO));
4569eda14cbcSMatt Macy 			}
4570eda14cbcSMatt Macy 		}
4571eda14cbcSMatt Macy 	}
4572eda14cbcSMatt Macy 
4573eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
4574eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
4575eda14cbcSMatt Macy 		    &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
4576eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4577eda14cbcSMatt Macy 	}
4578eda14cbcSMatt Macy 
4579eda14cbcSMatt Macy 	/*
4580eda14cbcSMatt Macy 	 * Encryption was added before bookmark_v2, even though bookmark_v2
4581eda14cbcSMatt Macy 	 * is now a dependency. If this pool has encryption enabled without
4582eda14cbcSMatt Macy 	 * bookmark_v2, trigger an errata message.
4583eda14cbcSMatt Macy 	 */
4584eda14cbcSMatt Macy 	if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
4585eda14cbcSMatt Macy 	    !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
4586eda14cbcSMatt Macy 		spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
4587eda14cbcSMatt Macy 	}
4588eda14cbcSMatt Macy 
4589eda14cbcSMatt Macy 	return (0);
4590eda14cbcSMatt Macy }
4591eda14cbcSMatt Macy 
4592eda14cbcSMatt Macy static int
4593eda14cbcSMatt Macy spa_ld_load_special_directories(spa_t *spa)
4594eda14cbcSMatt Macy {
4595eda14cbcSMatt Macy 	int error = 0;
4596eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4597eda14cbcSMatt Macy 
4598eda14cbcSMatt Macy 	spa->spa_is_initializing = B_TRUE;
4599eda14cbcSMatt Macy 	error = dsl_pool_open(spa->spa_dsl_pool);
4600eda14cbcSMatt Macy 	spa->spa_is_initializing = B_FALSE;
4601eda14cbcSMatt Macy 	if (error != 0) {
4602eda14cbcSMatt Macy 		spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
4603eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4604eda14cbcSMatt Macy 	}
4605eda14cbcSMatt Macy 
4606eda14cbcSMatt Macy 	return (0);
4607eda14cbcSMatt Macy }
4608eda14cbcSMatt Macy 
4609eda14cbcSMatt Macy static int
4610eda14cbcSMatt Macy spa_ld_get_props(spa_t *spa)
4611eda14cbcSMatt Macy {
4612eda14cbcSMatt Macy 	int error = 0;
4613eda14cbcSMatt Macy 	uint64_t obj;
4614eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4615eda14cbcSMatt Macy 
4616eda14cbcSMatt Macy 	/* Grab the checksum salt from the MOS. */
4617eda14cbcSMatt Macy 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4618eda14cbcSMatt Macy 	    DMU_POOL_CHECKSUM_SALT, 1,
4619eda14cbcSMatt Macy 	    sizeof (spa->spa_cksum_salt.zcs_bytes),
4620eda14cbcSMatt Macy 	    spa->spa_cksum_salt.zcs_bytes);
4621eda14cbcSMatt Macy 	if (error == ENOENT) {
4622eda14cbcSMatt Macy 		/* Generate a new salt for subsequent use */
4623eda14cbcSMatt Macy 		(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
4624eda14cbcSMatt Macy 		    sizeof (spa->spa_cksum_salt.zcs_bytes));
4625eda14cbcSMatt Macy 	} else if (error != 0) {
4626eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve checksum salt from "
4627eda14cbcSMatt Macy 		    "MOS [error=%d]", error);
4628eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4629eda14cbcSMatt Macy 	}
4630eda14cbcSMatt Macy 
4631eda14cbcSMatt Macy 	if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
4632eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4633eda14cbcSMatt Macy 	error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
4634eda14cbcSMatt Macy 	if (error != 0) {
4635eda14cbcSMatt Macy 		spa_load_failed(spa, "error opening deferred-frees bpobj "
4636eda14cbcSMatt Macy 		    "[error=%d]", error);
4637eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4638eda14cbcSMatt Macy 	}
4639eda14cbcSMatt Macy 
4640eda14cbcSMatt Macy 	/*
4641eda14cbcSMatt Macy 	 * Load the bit that tells us to use the new accounting function
4642eda14cbcSMatt Macy 	 * (raid-z deflation).  If we have an older pool, this will not
4643eda14cbcSMatt Macy 	 * be present.
4644eda14cbcSMatt Macy 	 */
4645eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
4646eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4647eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4648eda14cbcSMatt Macy 
4649a0b956f5SMartin Matuska 	error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
4650eda14cbcSMatt Macy 	    &spa->spa_creation_version, B_FALSE);
4651eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4652eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4653eda14cbcSMatt Macy 
4654eda14cbcSMatt Macy 	/*
4655eda14cbcSMatt Macy 	 * Load the persistent error log.  If we have an older pool, this will
4656eda14cbcSMatt Macy 	 * not be present.
4657eda14cbcSMatt Macy 	 */
4658eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
4659eda14cbcSMatt Macy 	    B_FALSE);
4660eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4661eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4662eda14cbcSMatt Macy 
4663eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
4664eda14cbcSMatt Macy 	    &spa->spa_errlog_scrub, B_FALSE);
4665eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4666eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4667eda14cbcSMatt Macy 
4668eda14cbcSMatt Macy 	/*
4669eda14cbcSMatt Macy 	 * Load the livelist deletion field. If a livelist is queued for
4670eda14cbcSMatt Macy 	 * deletion, indicate that in the spa
4671eda14cbcSMatt Macy 	 */
4672eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
4673eda14cbcSMatt Macy 	    &spa->spa_livelists_to_delete, B_FALSE);
46741f88aa09SMartin Matuska 	if (error != 0 && error != ENOENT)
4675eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4676eda14cbcSMatt Macy 
4677eda14cbcSMatt Macy 	/*
4678eda14cbcSMatt Macy 	 * Load the history object.  If we have an older pool, this
4679eda14cbcSMatt Macy 	 * will not be present.
4680eda14cbcSMatt Macy 	 */
4681eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
4682eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4683eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4684eda14cbcSMatt Macy 
4685eda14cbcSMatt Macy 	/*
4686eda14cbcSMatt Macy 	 * Load the per-vdev ZAP map. If we have an older pool, this will not
4687eda14cbcSMatt Macy 	 * be present; in this case, defer its creation to a later time to
4688eda14cbcSMatt Macy 	 * avoid dirtying the MOS this early / out of sync context. See
4689eda14cbcSMatt Macy 	 * spa_sync_config_object.
4690eda14cbcSMatt Macy 	 */
4691eda14cbcSMatt Macy 
4692eda14cbcSMatt Macy 	/* The sentinel is only available in the MOS config. */
4693eda14cbcSMatt Macy 	nvlist_t *mos_config;
4694eda14cbcSMatt Macy 	if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
4695eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve MOS config");
4696eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4697eda14cbcSMatt Macy 	}
4698eda14cbcSMatt Macy 
4699eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
4700eda14cbcSMatt Macy 	    &spa->spa_all_vdev_zaps, B_FALSE);
4701eda14cbcSMatt Macy 
4702eda14cbcSMatt Macy 	if (error == ENOENT) {
4703eda14cbcSMatt Macy 		VERIFY(!nvlist_exists(mos_config,
4704eda14cbcSMatt Macy 		    ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
4705eda14cbcSMatt Macy 		spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
4706eda14cbcSMatt Macy 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4707eda14cbcSMatt Macy 	} else if (error != 0) {
4708eda14cbcSMatt Macy 		nvlist_free(mos_config);
4709eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4710eda14cbcSMatt Macy 	} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
4711eda14cbcSMatt Macy 		/*
4712eda14cbcSMatt Macy 		 * An older version of ZFS overwrote the sentinel value, so
4713eda14cbcSMatt Macy 		 * we have orphaned per-vdev ZAPs in the MOS. Defer their
4714eda14cbcSMatt Macy 		 * destruction to later; see spa_sync_config_object.
4715eda14cbcSMatt Macy 		 */
4716eda14cbcSMatt Macy 		spa->spa_avz_action = AVZ_ACTION_DESTROY;
4717eda14cbcSMatt Macy 		/*
4718eda14cbcSMatt Macy 		 * We're assuming that no vdevs have had their ZAPs created
4719eda14cbcSMatt Macy 		 * before this. Better be sure of it.
4720eda14cbcSMatt Macy 		 */
4721eda14cbcSMatt Macy 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4722eda14cbcSMatt Macy 	}
4723eda14cbcSMatt Macy 	nvlist_free(mos_config);
4724eda14cbcSMatt Macy 
4725eda14cbcSMatt Macy 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
4726eda14cbcSMatt Macy 
4727eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
4728eda14cbcSMatt Macy 	    B_FALSE);
4729eda14cbcSMatt Macy 	if (error && error != ENOENT)
4730eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4731eda14cbcSMatt Macy 
4732eda14cbcSMatt Macy 	if (error == 0) {
4733eda14cbcSMatt Macy 		uint64_t autoreplace = 0;
4734eda14cbcSMatt Macy 
4735eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
4736eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
4737eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
4738eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
4739eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
4740eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
4741eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
4742eda14cbcSMatt Macy 		spa->spa_autoreplace = (autoreplace != 0);
4743eda14cbcSMatt Macy 	}
4744eda14cbcSMatt Macy 
4745eda14cbcSMatt Macy 	/*
4746eda14cbcSMatt Macy 	 * If we are importing a pool with missing top-level vdevs,
4747eda14cbcSMatt Macy 	 * we enforce that the pool doesn't panic or get suspended on
4748eda14cbcSMatt Macy 	 * error since the likelihood of missing data is extremely high.
4749eda14cbcSMatt Macy 	 */
4750eda14cbcSMatt Macy 	if (spa->spa_missing_tvds > 0 &&
4751eda14cbcSMatt Macy 	    spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
4752eda14cbcSMatt Macy 	    spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4753eda14cbcSMatt Macy 		spa_load_note(spa, "forcing failmode to 'continue' "
4754eda14cbcSMatt Macy 		    "as some top level vdevs are missing");
4755eda14cbcSMatt Macy 		spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
4756eda14cbcSMatt Macy 	}
4757eda14cbcSMatt Macy 
4758eda14cbcSMatt Macy 	return (0);
4759eda14cbcSMatt Macy }
4760eda14cbcSMatt Macy 
4761eda14cbcSMatt Macy static int
4762eda14cbcSMatt Macy spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
4763eda14cbcSMatt Macy {
4764eda14cbcSMatt Macy 	int error = 0;
4765eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4766eda14cbcSMatt Macy 
4767eda14cbcSMatt Macy 	/*
4768eda14cbcSMatt Macy 	 * If we're assembling the pool from the split-off vdevs of
4769eda14cbcSMatt Macy 	 * an existing pool, we don't want to attach the spares & cache
4770eda14cbcSMatt Macy 	 * devices.
4771eda14cbcSMatt Macy 	 */
4772eda14cbcSMatt Macy 
4773eda14cbcSMatt Macy 	/*
4774eda14cbcSMatt Macy 	 * Load any hot spares for this pool.
4775eda14cbcSMatt Macy 	 */
4776eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
4777eda14cbcSMatt Macy 	    B_FALSE);
4778eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4779eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4780eda14cbcSMatt Macy 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4781eda14cbcSMatt Macy 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
4782eda14cbcSMatt Macy 		if (load_nvlist(spa, spa->spa_spares.sav_object,
4783eda14cbcSMatt Macy 		    &spa->spa_spares.sav_config) != 0) {
4784eda14cbcSMatt Macy 			spa_load_failed(spa, "error loading spares nvlist");
4785eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4786eda14cbcSMatt Macy 		}
4787eda14cbcSMatt Macy 
4788eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4789eda14cbcSMatt Macy 		spa_load_spares(spa);
4790eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4791eda14cbcSMatt Macy 	} else if (error == 0) {
4792eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
4793eda14cbcSMatt Macy 	}
4794eda14cbcSMatt Macy 
4795eda14cbcSMatt Macy 	/*
4796eda14cbcSMatt Macy 	 * Load any level 2 ARC devices for this pool.
4797eda14cbcSMatt Macy 	 */
4798eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
4799eda14cbcSMatt Macy 	    &spa->spa_l2cache.sav_object, B_FALSE);
4800eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4801eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4802eda14cbcSMatt Macy 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4803eda14cbcSMatt Macy 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
4804eda14cbcSMatt Macy 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
4805eda14cbcSMatt Macy 		    &spa->spa_l2cache.sav_config) != 0) {
4806eda14cbcSMatt Macy 			spa_load_failed(spa, "error loading l2cache nvlist");
4807eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4808eda14cbcSMatt Macy 		}
4809eda14cbcSMatt Macy 
4810716fd348SMartin Matuska 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4811eda14cbcSMatt Macy 		spa_load_l2cache(spa);
4812eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4813eda14cbcSMatt Macy 	} else if (error == 0) {
4814eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
4815eda14cbcSMatt Macy 	}
4816eda14cbcSMatt Macy 
4817eda14cbcSMatt Macy 	return (0);
4818eda14cbcSMatt Macy }
4819eda14cbcSMatt Macy 
4820eda14cbcSMatt Macy static int
4821eda14cbcSMatt Macy spa_ld_load_vdev_metadata(spa_t *spa)
4822eda14cbcSMatt Macy {
4823eda14cbcSMatt Macy 	int error = 0;
4824eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4825eda14cbcSMatt Macy 
4826eda14cbcSMatt Macy 	/*
4827eda14cbcSMatt Macy 	 * If the 'multihost' property is set, then never allow a pool to
4828eda14cbcSMatt Macy 	 * be imported when the system hostid is zero.  The exception to
4829eda14cbcSMatt Macy 	 * this rule is zdb which is always allowed to access pools.
4830eda14cbcSMatt Macy 	 */
4831eda14cbcSMatt Macy 	if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
4832eda14cbcSMatt Macy 	    (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
4833eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
4834eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
4835eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4836eda14cbcSMatt Macy 	}
4837eda14cbcSMatt Macy 
4838eda14cbcSMatt Macy 	/*
4839eda14cbcSMatt Macy 	 * If the 'autoreplace' property is set, then post a resource notifying
4840eda14cbcSMatt Macy 	 * the ZFS DE that it should not issue any faults for unopenable
48412a58b312SMartin Matuska 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
48422a58b312SMartin Matuska 	 * unopenable vdevs so that the normal autoreplace handler can take
48432a58b312SMartin Matuska 	 * over.
48442a58b312SMartin Matuska 	 */
48452a58b312SMartin Matuska 	if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
48462a58b312SMartin Matuska 		spa_check_removed(spa->spa_root_vdev);
48472a58b312SMartin Matuska 		/*
48482a58b312SMartin Matuska 		 * For the import case, this is done in spa_import(), because
48492a58b312SMartin Matuska 		 * at this point we're using the spare definitions from
48502a58b312SMartin Matuska 		 * the MOS config, not necessarily from the userland config.
48512a58b312SMartin Matuska 		 */
48522a58b312SMartin Matuska 		if (spa->spa_load_state != SPA_LOAD_IMPORT) {
48532a58b312SMartin Matuska 			spa_aux_check_removed(&spa->spa_spares);
48542a58b312SMartin Matuska 			spa_aux_check_removed(&spa->spa_l2cache);
48552a58b312SMartin Matuska 		}
4856a0b956f5SMartin Matuska 	}
4857eda14cbcSMatt Macy 
4858eda14cbcSMatt Macy 	/*
4859eda14cbcSMatt Macy 	 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
4860eda14cbcSMatt Macy 	 */
4861eda14cbcSMatt Macy 	error = vdev_load(rvd);
4862eda14cbcSMatt Macy 	if (error != 0) {
4863eda14cbcSMatt Macy 		spa_load_failed(spa, "vdev_load failed [error=%d]", error);
4864eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4865eda14cbcSMatt Macy 	}
4866eda14cbcSMatt Macy 
4867eda14cbcSMatt Macy 	error = spa_ld_log_spacemaps(spa);
4868eda14cbcSMatt Macy 	if (error != 0) {
4869eda14cbcSMatt Macy 		spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]",
4870eda14cbcSMatt Macy 		    error);
4871eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4872eda14cbcSMatt Macy 	}
4873eda14cbcSMatt Macy 
4874eda14cbcSMatt Macy 	/*
4875eda14cbcSMatt Macy 	 * Propagate the leaf DTLs we just loaded all the way up the vdev tree.
4876eda14cbcSMatt Macy 	 */
4877eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4878eda14cbcSMatt Macy 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
4879eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
4880eda14cbcSMatt Macy 
4881eda14cbcSMatt Macy 	return (0);
4882eda14cbcSMatt Macy }
4883eda14cbcSMatt Macy 
4884eda14cbcSMatt Macy static int
4885eda14cbcSMatt Macy spa_ld_load_dedup_tables(spa_t *spa)
4886eda14cbcSMatt Macy {
4887eda14cbcSMatt Macy 	int error = 0;
4888eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4889eda14cbcSMatt Macy 
4890eda14cbcSMatt Macy 	error = ddt_load(spa);
4891eda14cbcSMatt Macy 	if (error != 0) {
4892eda14cbcSMatt Macy 		spa_load_failed(spa, "ddt_load failed [error=%d]", error);
4893eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4894eda14cbcSMatt Macy 	}
4895eda14cbcSMatt Macy 
4896eda14cbcSMatt Macy 	return (0);
4897eda14cbcSMatt Macy }
4898eda14cbcSMatt Macy 
4899eda14cbcSMatt Macy static int
4900eda14cbcSMatt Macy spa_ld_load_brt(spa_t *spa)
4901eda14cbcSMatt Macy {
4902eda14cbcSMatt Macy 	int error = 0;
4903eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4904eda14cbcSMatt Macy 
4905eda14cbcSMatt Macy 	error = brt_load(spa);
4906eda14cbcSMatt Macy 	if (error != 0) {
4907eda14cbcSMatt Macy 		spa_load_failed(spa, "brt_load failed [error=%d]", error);
4908eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4909eda14cbcSMatt Macy 	}
4910eda14cbcSMatt Macy 
4911eda14cbcSMatt Macy 	return (0);
4912eda14cbcSMatt Macy }
4913eda14cbcSMatt Macy 
4914eda14cbcSMatt Macy static int
4915eda14cbcSMatt Macy spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport)
4916eda14cbcSMatt Macy {
4917eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4918eda14cbcSMatt Macy 
4919eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
4920eda14cbcSMatt Macy 		boolean_t missing = spa_check_logs(spa);
4921eda14cbcSMatt Macy 		if (missing) {
4922eda14cbcSMatt Macy 			if (spa->spa_missing_tvds != 0) {
4923eda14cbcSMatt Macy 				spa_load_note(spa, "spa_check_logs failed "
4924eda14cbcSMatt Macy 				    "so dropping the logs");
4925eda14cbcSMatt Macy 			} else {
4926eda14cbcSMatt Macy 				*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
4927eda14cbcSMatt Macy 				spa_load_failed(spa, "spa_check_logs failed");
4928eda14cbcSMatt Macy 				return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
4929eda14cbcSMatt Macy 				    ENXIO));
4930eda14cbcSMatt Macy 			}
4931eda14cbcSMatt Macy 		}
4932eda14cbcSMatt Macy 	}
4933eda14cbcSMatt Macy 
4934eda14cbcSMatt Macy 	return (0);
4935eda14cbcSMatt Macy }
4936eda14cbcSMatt Macy 
4937eda14cbcSMatt Macy static int
4938eda14cbcSMatt Macy spa_ld_verify_pool_data(spa_t *spa)
4939eda14cbcSMatt Macy {
4940eda14cbcSMatt Macy 	int error = 0;
4941eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4942eda14cbcSMatt Macy 
4943eda14cbcSMatt Macy 	/*
4944eda14cbcSMatt Macy 	 * We've successfully opened the pool, verify that we're ready
4945eda14cbcSMatt Macy 	 * to start pushing transactions.
4946eda14cbcSMatt Macy 	 */
4947eda14cbcSMatt Macy 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4948eda14cbcSMatt Macy 		error = spa_load_verify(spa);
4949eda14cbcSMatt Macy 		if (error != 0) {
4950eda14cbcSMatt Macy 			spa_load_failed(spa, "spa_load_verify failed "
4951eda14cbcSMatt Macy 			    "[error=%d]", error);
4952eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4953eda14cbcSMatt Macy 			    error));
4954eda14cbcSMatt Macy 		}
4955eda14cbcSMatt Macy 	}
4956eda14cbcSMatt Macy 
4957eda14cbcSMatt Macy 	return (0);
4958eda14cbcSMatt Macy }
4959eda14cbcSMatt Macy 
4960eda14cbcSMatt Macy static void
4961eda14cbcSMatt Macy spa_ld_claim_log_blocks(spa_t *spa)
4962eda14cbcSMatt Macy {
4963eda14cbcSMatt Macy 	dmu_tx_t *tx;
4964eda14cbcSMatt Macy 	dsl_pool_t *dp = spa_get_dsl(spa);
4965eda14cbcSMatt Macy 
4966eda14cbcSMatt Macy 	/*
4967eda14cbcSMatt Macy 	 * Claim log blocks that haven't been committed yet.
4968eda14cbcSMatt Macy 	 * This must all happen in a single txg.
4969eda14cbcSMatt Macy 	 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
4970eda14cbcSMatt Macy 	 * invoked from zil_claim_log_block()'s i/o done callback.
4971eda14cbcSMatt Macy 	 * Price of rollback is that we abandon the log.
4972eda14cbcSMatt Macy 	 */
4973eda14cbcSMatt Macy 	spa->spa_claiming = B_TRUE;
4974eda14cbcSMatt Macy 
4975eda14cbcSMatt Macy 	tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
4976eda14cbcSMatt Macy 	(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
4977eda14cbcSMatt Macy 	    zil_claim, tx, DS_FIND_CHILDREN);
4978eda14cbcSMatt Macy 	dmu_tx_commit(tx);
4979eda14cbcSMatt Macy 
4980eda14cbcSMatt Macy 	spa->spa_claiming = B_FALSE;
4981eda14cbcSMatt Macy 
4982eda14cbcSMatt Macy 	spa_set_log_state(spa, SPA_LOG_GOOD);
4983eda14cbcSMatt Macy }
4984eda14cbcSMatt Macy 
4985eda14cbcSMatt Macy static void
4986eda14cbcSMatt Macy spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
4987eda14cbcSMatt Macy     boolean_t update_config_cache)
4988eda14cbcSMatt Macy {
4989eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4990eda14cbcSMatt Macy 	int need_update = B_FALSE;
4991eda14cbcSMatt Macy 
4992eda14cbcSMatt Macy 	/*
4993eda14cbcSMatt Macy 	 * If the config cache is stale, or we have uninitialized
4994eda14cbcSMatt Macy 	 * metaslabs (see spa_vdev_add()), then update the config.
4995eda14cbcSMatt Macy 	 *
4996eda14cbcSMatt Macy 	 * If this is a verbatim import, trust the current
4997eda14cbcSMatt Macy 	 * in-core spa_config and update the disk labels.
4998eda14cbcSMatt Macy 	 */
4999eda14cbcSMatt Macy 	if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
5000eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_IMPORT ||
5001eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_RECOVER ||
5002eda14cbcSMatt Macy 	    (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
5003eda14cbcSMatt Macy 		need_update = B_TRUE;
5004eda14cbcSMatt Macy 
5005eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++)
5006eda14cbcSMatt Macy 		if (rvd->vdev_child[c]->vdev_ms_array == 0)
5007eda14cbcSMatt Macy 			need_update = B_TRUE;
5008eda14cbcSMatt Macy 
5009eda14cbcSMatt Macy 	/*
5010eda14cbcSMatt Macy 	 * Update the config cache asynchronously in case we're the
5011eda14cbcSMatt Macy 	 * root pool, in which case the config cache isn't writable yet.
5012eda14cbcSMatt Macy 	 */
5013eda14cbcSMatt Macy 	if (need_update)
5014eda14cbcSMatt Macy 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
5015eda14cbcSMatt Macy }
5016eda14cbcSMatt Macy 
5017eda14cbcSMatt Macy static void
5018eda14cbcSMatt Macy spa_ld_prepare_for_reload(spa_t *spa)
5019eda14cbcSMatt Macy {
5020eda14cbcSMatt Macy 	spa_mode_t mode = spa->spa_mode;
5021eda14cbcSMatt Macy 	int async_suspended = spa->spa_async_suspended;
5022eda14cbcSMatt Macy 
5023eda14cbcSMatt Macy 	spa_unload(spa);
5024eda14cbcSMatt Macy 	spa_deactivate(spa);
5025eda14cbcSMatt Macy 	spa_activate(spa, mode);
5026eda14cbcSMatt Macy 
5027eda14cbcSMatt Macy 	/*
5028eda14cbcSMatt Macy 	 * We save the value of spa_async_suspended as it gets reset to 0 by
5029eda14cbcSMatt Macy 	 * spa_unload(). We want to restore it back to the original value before
5030eda14cbcSMatt Macy 	 * returning as we might be calling spa_async_resume() later.
5031eda14cbcSMatt Macy 	 */
5032eda14cbcSMatt Macy 	spa->spa_async_suspended = async_suspended;
5033eda14cbcSMatt Macy }
5034eda14cbcSMatt Macy 
5035eda14cbcSMatt Macy static int
5036eda14cbcSMatt Macy spa_ld_read_checkpoint_txg(spa_t *spa)
5037eda14cbcSMatt Macy {
5038eda14cbcSMatt Macy 	uberblock_t checkpoint;
5039eda14cbcSMatt Macy 	int error = 0;
5040eda14cbcSMatt Macy 
5041eda14cbcSMatt Macy 	ASSERT0(spa->spa_checkpoint_txg);
5042eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
5043eda14cbcSMatt Macy 	    spa->spa_load_thread == curthread);
5044eda14cbcSMatt Macy 
5045eda14cbcSMatt Macy 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
5046eda14cbcSMatt Macy 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
5047eda14cbcSMatt Macy 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
5048eda14cbcSMatt Macy 
5049eda14cbcSMatt Macy 	if (error == ENOENT)
5050eda14cbcSMatt Macy 		return (0);
5051eda14cbcSMatt Macy 
5052eda14cbcSMatt Macy 	if (error != 0)
5053eda14cbcSMatt Macy 		return (error);
5054eda14cbcSMatt Macy 
5055eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_txg, !=, 0);
5056eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
5057eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_timestamp, !=, 0);
5058eda14cbcSMatt Macy 	spa->spa_checkpoint_txg = checkpoint.ub_txg;
5059eda14cbcSMatt Macy 	spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
5060eda14cbcSMatt Macy 
5061eda14cbcSMatt Macy 	return (0);
5062eda14cbcSMatt Macy }
5063eda14cbcSMatt Macy 
5064eda14cbcSMatt Macy static int
5065eda14cbcSMatt Macy spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
5066eda14cbcSMatt Macy {
5067eda14cbcSMatt Macy 	int error = 0;
5068eda14cbcSMatt Macy 
5069eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5070eda14cbcSMatt Macy 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
5071eda14cbcSMatt Macy 
5072eda14cbcSMatt Macy 	/*
5073eda14cbcSMatt Macy 	 * Never trust the config that is provided unless we are assembling
5074eda14cbcSMatt Macy 	 * a pool following a split.
5075eda14cbcSMatt Macy 	 * This means don't trust blkptrs and the vdev tree in general. This
5076eda14cbcSMatt Macy 	 * also effectively puts the spa in read-only mode since
5077eda14cbcSMatt Macy 	 * spa_writeable() checks for spa_trust_config to be true.
5078eda14cbcSMatt Macy 	 * We will later load a trusted config from the MOS.
5079eda14cbcSMatt Macy 	 */
5080eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE)
5081eda14cbcSMatt Macy 		spa->spa_trust_config = B_FALSE;
5082eda14cbcSMatt Macy 
5083eda14cbcSMatt Macy 	/*
5084eda14cbcSMatt Macy 	 * Parse the config provided to create a vdev tree.
5085eda14cbcSMatt Macy 	 */
5086eda14cbcSMatt Macy 	error = spa_ld_parse_config(spa, type);
5087eda14cbcSMatt Macy 	if (error != 0)
5088eda14cbcSMatt Macy 		return (error);
5089eda14cbcSMatt Macy 
5090eda14cbcSMatt Macy 	spa_import_progress_add(spa);
5091eda14cbcSMatt Macy 
5092eda14cbcSMatt Macy 	/*
5093eda14cbcSMatt Macy 	 * Now that we have the vdev tree, try to open each vdev. This involves
5094eda14cbcSMatt Macy 	 * opening the underlying physical device, retrieving its geometry and
5095eda14cbcSMatt Macy 	 * probing the vdev with a dummy I/O. The state of each vdev will be set
5096eda14cbcSMatt Macy 	 * based on the success of those operations. After this we'll be ready
5097eda14cbcSMatt Macy 	 * to read from the vdevs.
5098eda14cbcSMatt Macy 	 */
5099eda14cbcSMatt Macy 	error = spa_ld_open_vdevs(spa);
5100eda14cbcSMatt Macy 	if (error != 0)
5101eda14cbcSMatt Macy 		return (error);
5102eda14cbcSMatt Macy 
5103eda14cbcSMatt Macy 	/*
5104eda14cbcSMatt Macy 	 * Read the label of each vdev and make sure that the GUIDs stored
5105eda14cbcSMatt Macy 	 * there match the GUIDs in the config provided.
5106eda14cbcSMatt Macy 	 * If we're assembling a new pool that's been split off from an
5107eda14cbcSMatt Macy 	 * existing pool, the labels haven't yet been updated so we skip
5108eda14cbcSMatt Macy 	 * validation for now.
5109eda14cbcSMatt Macy 	 */
5110eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE) {
5111eda14cbcSMatt Macy 		error = spa_ld_validate_vdevs(spa);
5112eda14cbcSMatt Macy 		if (error != 0)
5113eda14cbcSMatt Macy 			return (error);
5114eda14cbcSMatt Macy 	}
5115eda14cbcSMatt Macy 
5116eda14cbcSMatt Macy 	/*
5117eda14cbcSMatt Macy 	 * Read all vdev labels to find the best uberblock (i.e. latest,
5118eda14cbcSMatt Macy 	 * unless spa_load_max_txg is set) and store it in spa_uberblock. We
5119eda14cbcSMatt Macy 	 * get the list of features required to read blkptrs in the MOS from
5120eda14cbcSMatt Macy 	 * the vdev label with the best uberblock and verify that our version
5121eda14cbcSMatt Macy 	 * of zfs supports them all.
5122eda14cbcSMatt Macy 	 */
5123eda14cbcSMatt Macy 	error = spa_ld_select_uberblock(spa, type);
5124eda14cbcSMatt Macy 	if (error != 0)
5125eda14cbcSMatt Macy 		return (error);
5126eda14cbcSMatt Macy 
5127eda14cbcSMatt Macy 	/*
5128eda14cbcSMatt Macy 	 * Pass that uberblock to the dsl_pool layer which will open the root
5129eda14cbcSMatt Macy 	 * blkptr. This blkptr points to the latest version of the MOS and will
5130eda14cbcSMatt Macy 	 * allow us to read its contents.
5131eda14cbcSMatt Macy 	 */
5132eda14cbcSMatt Macy 	error = spa_ld_open_rootbp(spa);
5133eda14cbcSMatt Macy 	if (error != 0)
5134eda14cbcSMatt Macy 		return (error);
5135eda14cbcSMatt Macy 
5136eda14cbcSMatt Macy 	return (0);
5137eda14cbcSMatt Macy }
5138eda14cbcSMatt Macy 
513933b8c039SMartin Matuska static int
5140eda14cbcSMatt Macy spa_ld_checkpoint_rewind(spa_t *spa)
5141eda14cbcSMatt Macy {
5142eda14cbcSMatt Macy 	uberblock_t checkpoint;
5143eda14cbcSMatt Macy 	int error = 0;
5144eda14cbcSMatt Macy 
5145eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5146eda14cbcSMatt Macy 	ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
5147eda14cbcSMatt Macy 
5148eda14cbcSMatt Macy 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
5149eda14cbcSMatt Macy 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
5150eda14cbcSMatt Macy 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
5151eda14cbcSMatt Macy 
5152eda14cbcSMatt Macy 	if (error != 0) {
5153eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve checkpointed "
5154eda14cbcSMatt Macy 		    "uberblock from the MOS config [error=%d]", error);
5155eda14cbcSMatt Macy 
5156eda14cbcSMatt Macy 		if (error == ENOENT)
5157eda14cbcSMatt Macy 			error = ZFS_ERR_NO_CHECKPOINT;
5158eda14cbcSMatt Macy 
5159eda14cbcSMatt Macy 		return (error);
5160eda14cbcSMatt Macy 	}
5161eda14cbcSMatt Macy 
5162eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
5163eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
5164eda14cbcSMatt Macy 
5165eda14cbcSMatt Macy 	/*
5166eda14cbcSMatt Macy 	 * We need to update the txg and timestamp of the checkpointed
5167eda14cbcSMatt Macy 	 * uberblock to be higher than the latest one. This ensures that
5168eda14cbcSMatt Macy 	 * the checkpointed uberblock is selected if we were to close and
5169eda14cbcSMatt Macy 	 * reopen the pool right after we've written it in the vdev labels.
5170eda14cbcSMatt Macy 	 * (also see block comment in vdev_uberblock_compare)
5171eda14cbcSMatt Macy 	 */
5172eda14cbcSMatt Macy 	checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
5173eda14cbcSMatt Macy 	checkpoint.ub_timestamp = gethrestime_sec();
5174eda14cbcSMatt Macy 
5175eda14cbcSMatt Macy 	/*
5176eda14cbcSMatt Macy 	 * Set current uberblock to be the checkpointed uberblock.
5177eda14cbcSMatt Macy 	 */
5178eda14cbcSMatt Macy 	spa->spa_uberblock = checkpoint;
5179eda14cbcSMatt Macy 
5180eda14cbcSMatt Macy 	/*
5181eda14cbcSMatt Macy 	 * If we are doing a normal rewind, then the pool is open for
5182eda14cbcSMatt Macy 	 * writing and we sync the "updated" checkpointed uberblock to
5183eda14cbcSMatt Macy 	 * disk. Once this is done, we've basically rewound the whole
5184eda14cbcSMatt Macy 	 * pool and there is no way back.
5185eda14cbcSMatt Macy 	 *
5186eda14cbcSMatt Macy 	 * There are cases when we don't want to attempt and sync the
5187eda14cbcSMatt Macy 	 * checkpointed uberblock to disk because we are opening a
5188eda14cbcSMatt Macy 	 * pool as read-only. Specifically, verifying the checkpointed
5189eda14cbcSMatt Macy 	 * state with zdb, and importing the checkpointed state to get
5190eda14cbcSMatt Macy 	 * a "preview" of its content.
5191eda14cbcSMatt Macy 	 */
5192eda14cbcSMatt Macy 	if (spa_writeable(spa)) {
5193eda14cbcSMatt Macy 		vdev_t *rvd = spa->spa_root_vdev;
5194eda14cbcSMatt Macy 
5195eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5196eda14cbcSMatt Macy 		vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
5197eda14cbcSMatt Macy 		int svdcount = 0;
5198eda14cbcSMatt Macy 		int children = rvd->vdev_children;
5199eda14cbcSMatt Macy 		int c0 = random_in_range(children);
5200eda14cbcSMatt Macy 
5201eda14cbcSMatt Macy 		for (int c = 0; c < children; c++) {
5202eda14cbcSMatt Macy 			vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
5203eda14cbcSMatt Macy 
5204eda14cbcSMatt Macy 			/* Stop when revisiting the first vdev */
5205eda14cbcSMatt Macy 			if (c > 0 && svd[0] == vd)
5206eda14cbcSMatt Macy 				break;
5207eda14cbcSMatt Macy 
5208eda14cbcSMatt Macy 			if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
5209eda14cbcSMatt Macy 			    !vdev_is_concrete(vd))
5210eda14cbcSMatt Macy 				continue;
5211eda14cbcSMatt Macy 
5212eda14cbcSMatt Macy 			svd[svdcount++] = vd;
5213eda14cbcSMatt Macy 			if (svdcount == SPA_SYNC_MIN_VDEVS)
5214eda14cbcSMatt Macy 				break;
5215eda14cbcSMatt Macy 		}
5216eda14cbcSMatt Macy 		error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
5217eda14cbcSMatt Macy 		if (error == 0)
5218eda14cbcSMatt Macy 			spa->spa_last_synced_guid = rvd->vdev_guid;
5219eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
5220eda14cbcSMatt Macy 
5221eda14cbcSMatt Macy 		if (error != 0) {
5222eda14cbcSMatt Macy 			spa_load_failed(spa, "failed to write checkpointed "
5223a0b956f5SMartin Matuska 			    "uberblock to the vdev labels [error=%d]", error);
5224eda14cbcSMatt Macy 			return (error);
5225eda14cbcSMatt Macy 		}
5226eda14cbcSMatt Macy 	}
5227eda14cbcSMatt Macy 
5228eda14cbcSMatt Macy 	return (0);
5229eda14cbcSMatt Macy }
5230eda14cbcSMatt Macy 
5231eda14cbcSMatt Macy static int
5232eda14cbcSMatt Macy spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
5233eda14cbcSMatt Macy     boolean_t *update_config_cache)
5234eda14cbcSMatt Macy {
5235eda14cbcSMatt Macy 	int error;
5236eda14cbcSMatt Macy 
5237eda14cbcSMatt Macy 	/*
5238eda14cbcSMatt Macy 	 * Parse the config for pool, open and validate vdevs,
5239eda14cbcSMatt Macy 	 * select an uberblock, and use that uberblock to open
5240eda14cbcSMatt Macy 	 * the MOS.
5241eda14cbcSMatt Macy 	 */
5242eda14cbcSMatt Macy 	error = spa_ld_mos_init(spa, type);
5243eda14cbcSMatt Macy 	if (error != 0)
5244eda14cbcSMatt Macy 		return (error);
5245eda14cbcSMatt Macy 
5246eda14cbcSMatt Macy 	/*
5247eda14cbcSMatt Macy 	 * Retrieve the trusted config stored in the MOS and use it to create
5248eda14cbcSMatt Macy 	 * a new, exact version of the vdev tree, then reopen all vdevs.
5249eda14cbcSMatt Macy 	 */
5250eda14cbcSMatt Macy 	error = spa_ld_trusted_config(spa, type, B_FALSE);
5251eda14cbcSMatt Macy 	if (error == EAGAIN) {
5252eda14cbcSMatt Macy 		if (update_config_cache != NULL)
5253eda14cbcSMatt Macy 			*update_config_cache = B_TRUE;
5254eda14cbcSMatt Macy 
5255eda14cbcSMatt Macy 		/*
5256eda14cbcSMatt Macy 		 * Redo the loading process with the trusted config if it is
5257eda14cbcSMatt Macy 		 * too different from the untrusted config.
5258eda14cbcSMatt Macy 		 */
5259eda14cbcSMatt Macy 		spa_ld_prepare_for_reload(spa);
5260eda14cbcSMatt Macy 		spa_load_note(spa, "RELOADING");
5261eda14cbcSMatt Macy 		error = spa_ld_mos_init(spa, type);
5262eda14cbcSMatt Macy 		if (error != 0)
5263eda14cbcSMatt Macy 			return (error);
5264eda14cbcSMatt Macy 
5265eda14cbcSMatt Macy 		error = spa_ld_trusted_config(spa, type, B_TRUE);
5266eda14cbcSMatt Macy 		if (error != 0)
5267eda14cbcSMatt Macy 			return (error);
5268eda14cbcSMatt Macy 
5269eda14cbcSMatt Macy 	} else if (error != 0) {
5270eda14cbcSMatt Macy 		return (error);
5271eda14cbcSMatt Macy 	}
5272eda14cbcSMatt Macy 
5273eda14cbcSMatt Macy 	return (0);
5274eda14cbcSMatt Macy }
5275eda14cbcSMatt Macy 
5276eda14cbcSMatt Macy /*
52773494f7c0SMartin Matuska  * Load an existing storage pool, using the config provided. This config
5278eda14cbcSMatt Macy  * describes which vdevs are part of the pool and is later validated against
5279eda14cbcSMatt Macy  * partial configs present in each vdev's label and an entire copy of the
5280eda14cbcSMatt Macy  * config stored in the MOS.
5281eda14cbcSMatt Macy  */
5282eda14cbcSMatt Macy static int
5283eda14cbcSMatt Macy spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
5284eda14cbcSMatt Macy {
5285eda14cbcSMatt Macy 	int error = 0;
5286eda14cbcSMatt Macy 	boolean_t missing_feat_write = B_FALSE;
5287eda14cbcSMatt Macy 	boolean_t checkpoint_rewind =
5288eda14cbcSMatt Macy 	    (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
5289eda14cbcSMatt Macy 	boolean_t update_config_cache = B_FALSE;
52903494f7c0SMartin Matuska 	hrtime_t load_start = gethrtime();
5291eda14cbcSMatt Macy 
5292eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5293eda14cbcSMatt Macy 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
5294eda14cbcSMatt Macy 
5295eda14cbcSMatt Macy 	spa_load_note(spa, "LOADING");
5296eda14cbcSMatt Macy 
5297eda14cbcSMatt Macy 	error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
5298eda14cbcSMatt Macy 	if (error != 0)
52993494f7c0SMartin Matuska 		return (error);
5300eda14cbcSMatt Macy 
5301eda14cbcSMatt Macy 	/*
5302eda14cbcSMatt Macy 	 * If we are rewinding to the checkpoint then we need to repeat
5303eda14cbcSMatt Macy 	 * everything we've done so far in this function but this time
5304eda14cbcSMatt Macy 	 * selecting the checkpointed uberblock and using that to open
5305eda14cbcSMatt Macy 	 * the MOS.
5306eda14cbcSMatt Macy 	 */
5307eda14cbcSMatt Macy 	if (checkpoint_rewind) {
53083494f7c0SMartin Matuska 		/*
5309eda14cbcSMatt Macy 		 * If we are rewinding to the checkpoint update config cache
5310eda14cbcSMatt Macy 		 * anyway.
5311eda14cbcSMatt Macy 		 */
5312eda14cbcSMatt Macy 		update_config_cache = B_TRUE;
5313eda14cbcSMatt Macy 
5314eda14cbcSMatt Macy 		/*
5315eda14cbcSMatt Macy 		 * Extract the checkpointed uberblock from the current MOS
53163494f7c0SMartin Matuska 		 * and use this as the pool's uberblock from now on. If the
5317eda14cbcSMatt Macy 		 * pool is imported as writeable we also write the checkpoint
5318eda14cbcSMatt Macy 		 * uberblock to the labels, making the rewind permanent.
5319eda14cbcSMatt Macy 		 */
5320eda14cbcSMatt Macy 		error = spa_ld_checkpoint_rewind(spa);
5321eda14cbcSMatt Macy 		if (error != 0)
5322eda14cbcSMatt Macy 			return (error);
5323eda14cbcSMatt Macy 
5324eda14cbcSMatt Macy 		/*
53253494f7c0SMartin Matuska 		 * Redo the loading process again with the
5326eda14cbcSMatt Macy 		 * checkpointed uberblock.
5327eda14cbcSMatt Macy 		 */
5328eda14cbcSMatt Macy 		spa_ld_prepare_for_reload(spa);
5329eda14cbcSMatt Macy 		spa_load_note(spa, "LOADING checkpointed uberblock");
5330eda14cbcSMatt Macy 		error = spa_ld_mos_with_trusted_config(spa, type, NULL);
5331eda14cbcSMatt Macy 		if (error != 0)
5332eda14cbcSMatt Macy 			return (error);
5333eda14cbcSMatt Macy 	}
53343494f7c0SMartin Matuska 
5335eda14cbcSMatt Macy 	/*
5336eda14cbcSMatt Macy 	 * Drop the namespace lock for the rest of the function.
5337eda14cbcSMatt Macy 	 */
5338eda14cbcSMatt Macy 	spa->spa_load_thread = curthread;
53393494f7c0SMartin Matuska 	mutex_exit(&spa_namespace_lock);
5340eda14cbcSMatt Macy 
5341eda14cbcSMatt Macy 	/*
5342eda14cbcSMatt Macy 	 * Retrieve the checkpoint txg if the pool has a checkpoint.
5343eda14cbcSMatt Macy 	 */
53443494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading checkpoint txg");
53452a58b312SMartin Matuska 	error = spa_ld_read_checkpoint_txg(spa);
53462a58b312SMartin Matuska 	if (error != 0)
53472a58b312SMartin Matuska 		goto fail;
53482a58b312SMartin Matuska 
5349eda14cbcSMatt Macy 	/*
5350eda14cbcSMatt Macy 	 * Retrieve the mapping of indirect vdevs. Those vdevs were removed
5351eda14cbcSMatt Macy 	 * from the pool and their contents were re-mapped to other vdevs. Note
5352eda14cbcSMatt Macy 	 * that everything that we read before this step must have been
53533494f7c0SMartin Matuska 	 * rewritten on concrete vdevs after the last device removal was
5354eda14cbcSMatt Macy 	 * initiated. Otherwise we could be reading from indirect vdevs before
5355eda14cbcSMatt Macy 	 * we have loaded their mappings.
5356eda14cbcSMatt Macy 	 */
5357eda14cbcSMatt Macy 	spa_import_progress_set_notes(spa, "Loading indirect vdev metadata");
5358eda14cbcSMatt Macy 	error = spa_ld_open_indirect_vdev_metadata(spa);
5359eda14cbcSMatt Macy 	if (error != 0)
5360eda14cbcSMatt Macy 		goto fail;
5361eda14cbcSMatt Macy 
5362eda14cbcSMatt Macy 	/*
5363eda14cbcSMatt Macy 	 * Retrieve the full list of active features from the MOS and check if
5364eda14cbcSMatt Macy 	 * they are all supported.
5365eda14cbcSMatt Macy 	 */
5366eda14cbcSMatt Macy 	spa_import_progress_set_notes(spa, "Checking feature flags");
5367eda14cbcSMatt Macy 	error = spa_ld_check_features(spa, &missing_feat_write);
5368eda14cbcSMatt Macy 	if (error != 0)
5369eda14cbcSMatt Macy 		goto fail;
5370eda14cbcSMatt Macy 
5371eda14cbcSMatt Macy 	/*
5372eda14cbcSMatt Macy 	 * Load several special directories from the MOS needed by the dsl_pool
5373eda14cbcSMatt Macy 	 * layer.
5374eda14cbcSMatt Macy 	 */
53753494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading special MOS directories");
5376eda14cbcSMatt Macy 	error = spa_ld_load_special_directories(spa);
5377eda14cbcSMatt Macy 	if (error != 0)
5378eda14cbcSMatt Macy 		goto fail;
5379eda14cbcSMatt Macy 
5380eda14cbcSMatt Macy 	/*
5381eda14cbcSMatt Macy 	 * Retrieve pool properties from the MOS.
5382eda14cbcSMatt Macy 	 */
5383eda14cbcSMatt Macy 	spa_import_progress_set_notes(spa, "Loading properties");
5384eda14cbcSMatt Macy 	error = spa_ld_get_props(spa);
53853494f7c0SMartin Matuska 	if (error != 0)
5386eda14cbcSMatt Macy 		goto fail;
5387eda14cbcSMatt Macy 
5388eda14cbcSMatt Macy 	/*
5389eda14cbcSMatt Macy 	 * Retrieve the list of auxiliary devices - cache devices and spares -
5390eda14cbcSMatt Macy 	 * and open them.
5391eda14cbcSMatt Macy 	 */
5392eda14cbcSMatt Macy 	spa_import_progress_set_notes(spa, "Loading AUX vdevs");
53933494f7c0SMartin Matuska 	error = spa_ld_open_aux_vdevs(spa, type);
5394eda14cbcSMatt Macy 	if (error != 0)
5395eda14cbcSMatt Macy 		goto fail;
5396eda14cbcSMatt Macy 
5397eda14cbcSMatt Macy 	/*
5398eda14cbcSMatt Macy 	 * Load the metadata for all vdevs. Also check if unopenable devices
5399eda14cbcSMatt Macy 	 * should be autoreplaced.
5400eda14cbcSMatt Macy 	 */
5401e716630dSMartin Matuska 	spa_import_progress_set_notes(spa, "Loading vdev metadata");
5402e716630dSMartin Matuska 	error = spa_ld_load_vdev_metadata(spa);
5403e716630dSMartin Matuska 	if (error != 0)
5404e716630dSMartin Matuska 		goto fail;
5405e716630dSMartin Matuska 
5406e716630dSMartin Matuska 	spa_import_progress_set_notes(spa, "Loading dedup tables");
5407e716630dSMartin Matuska 	error = spa_ld_load_dedup_tables(spa);
5408eda14cbcSMatt Macy 	if (error != 0)
5409eda14cbcSMatt Macy 		goto fail;
5410eda14cbcSMatt Macy 
5411eda14cbcSMatt Macy 	spa_import_progress_set_notes(spa, "Loading BRT");
5412eda14cbcSMatt Macy 	error = spa_ld_load_brt(spa);
5413eda14cbcSMatt Macy 	if (error != 0)
5414eda14cbcSMatt Macy 		goto fail;
5415eda14cbcSMatt Macy 
5416eda14cbcSMatt Macy 	/*
54173494f7c0SMartin Matuska 	 * Verify the logs now to make sure we don't have any unexpected errors
5418eda14cbcSMatt Macy 	 * when we claim log blocks later.
5419eda14cbcSMatt Macy 	 */
5420eda14cbcSMatt Macy 	spa_import_progress_set_notes(spa, "Verifying Log Devices");
5421eda14cbcSMatt Macy 	error = spa_ld_verify_logs(spa, type, ereport);
5422eda14cbcSMatt Macy 	if (error != 0)
5423eda14cbcSMatt Macy 		goto fail;
5424eda14cbcSMatt Macy 
5425eda14cbcSMatt Macy 	if (missing_feat_write) {
5426eda14cbcSMatt Macy 		ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
5427eda14cbcSMatt Macy 
5428eda14cbcSMatt Macy 		/*
5429eda14cbcSMatt Macy 		 * At this point, we know that we can open the pool in
5430eda14cbcSMatt Macy 		 * read-only mode but not read-write mode. We now have enough
5431eda14cbcSMatt Macy 		 * information and can return to userland.
5432eda14cbcSMatt Macy 		 */
5433eda14cbcSMatt Macy 		error = spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
5434eda14cbcSMatt Macy 		    ENOTSUP);
5435eda14cbcSMatt Macy 		goto fail;
5436eda14cbcSMatt Macy 	}
54373494f7c0SMartin Matuska 
5438eda14cbcSMatt Macy 	/*
5439eda14cbcSMatt Macy 	 * Traverse the last txgs to make sure the pool was left off in a safe
5440eda14cbcSMatt Macy 	 * state. When performing an extreme rewind, we verify the whole pool,
5441eda14cbcSMatt Macy 	 * which can take a very long time.
5442eda14cbcSMatt Macy 	 */
5443eda14cbcSMatt Macy 	spa_import_progress_set_notes(spa, "Verifying pool data");
5444eda14cbcSMatt Macy 	error = spa_ld_verify_pool_data(spa);
54453494f7c0SMartin Matuska 	if (error != 0)
5446eda14cbcSMatt Macy 		goto fail;
5447eda14cbcSMatt Macy 
5448eda14cbcSMatt Macy 	/*
5449eda14cbcSMatt Macy 	 * Calculate the deflated space for the pool. This must be done before
5450eda14cbcSMatt Macy 	 * we write anything to the pool because we'd need to update the space
5451eda14cbcSMatt Macy 	 * accounting using the deflated sizes.
5452eda14cbcSMatt Macy 	 */
5453eda14cbcSMatt Macy 	spa_import_progress_set_notes(spa, "Calculating deflated space");
54543494f7c0SMartin Matuska 	spa_update_dspace(spa);
5455eda14cbcSMatt Macy 
5456eda14cbcSMatt Macy 	/*
5457eda14cbcSMatt Macy 	 * We have now retrieved all the information we needed to open the
5458eda14cbcSMatt Macy 	 * pool. If we are importing the pool in read-write mode, a few
5459eda14cbcSMatt Macy 	 * additional steps must be performed to finish the import.
5460eda14cbcSMatt Macy 	 */
5461eda14cbcSMatt Macy 	spa_import_progress_set_notes(spa, "Starting import");
5462eda14cbcSMatt Macy 	if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
5463eda14cbcSMatt Macy 	    spa->spa_load_max_txg == UINT64_MAX)) {
5464eda14cbcSMatt Macy 		uint64_t config_cache_txg = spa->spa_config_txg;
5465eda14cbcSMatt Macy 
5466eda14cbcSMatt Macy 		ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
5467eda14cbcSMatt Macy 
54683494f7c0SMartin Matuska 		/*
54693494f7c0SMartin Matuska 		 * Before we do any zio_write's, complete the raidz expansion
5470eda14cbcSMatt Macy 		 * scratch space copying, if necessary.
5471eda14cbcSMatt Macy 		 */
5472eda14cbcSMatt Macy 		if (RRSS_GET_STATE(&spa->spa_uberblock) == RRSS_SCRATCH_VALID)
5473eda14cbcSMatt Macy 			vdev_raidz_reflow_copy_scratch(spa);
5474eda14cbcSMatt Macy 
5475eda14cbcSMatt Macy 		/*
5476eda14cbcSMatt Macy 		 * In case of a checkpoint rewind, log the original txg
5477eda14cbcSMatt Macy 		 * of the checkpointed uberblock.
5478eda14cbcSMatt Macy 		 */
5479eda14cbcSMatt Macy 		if (checkpoint_rewind) {
5480eda14cbcSMatt Macy 			spa_history_log_internal(spa, "checkpoint rewind",
5481eda14cbcSMatt Macy 			    NULL, "rewound state to txg=%llu",
54823494f7c0SMartin Matuska 			    (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
54833494f7c0SMartin Matuska 		}
5484eda14cbcSMatt Macy 
5485eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa, "Claiming ZIL blocks");
5486eda14cbcSMatt Macy 		/*
5487eda14cbcSMatt Macy 		 * Traverse the ZIL and claim all blocks.
5488eda14cbcSMatt Macy 		 */
5489eda14cbcSMatt Macy 		spa_ld_claim_log_blocks(spa);
54903494f7c0SMartin Matuska 
54913494f7c0SMartin Matuska 		/*
5492eda14cbcSMatt Macy 		 * Kick-off the syncing thread.
5493eda14cbcSMatt Macy 		 */
5494eda14cbcSMatt Macy 		spa->spa_sync_on = B_TRUE;
54953494f7c0SMartin Matuska 		txg_sync_start(spa->spa_dsl_pool);
5496eda14cbcSMatt Macy 		mmp_thread_start(spa);
54973494f7c0SMartin Matuska 
5498eda14cbcSMatt Macy 		/*
5499eda14cbcSMatt Macy 		 * Wait for all claims to sync.  We sync up to the highest
5500eda14cbcSMatt Macy 		 * claimed log block birth time so that claimed log blocks
55013494f7c0SMartin Matuska 		 * don't appear to be from the future.  spa_claim_max_txg
5502eda14cbcSMatt Macy 		 * will have been set for us by ZIL traversal operations
5503eda14cbcSMatt Macy 		 * performed above.
5504eda14cbcSMatt Macy 		 */
5505eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa, "Syncing ZIL claims");
5506eda14cbcSMatt Macy 		txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
5507eda14cbcSMatt Macy 
5508eda14cbcSMatt Macy 		/*
5509eda14cbcSMatt Macy 		 * Check if we need to request an update of the config. On the
5510eda14cbcSMatt Macy 		 * next sync, we would update the config stored in vdev labels
5511eda14cbcSMatt Macy 		 * and the cachefile (by default /etc/zfs/zpool.cache).
5512eda14cbcSMatt Macy 		 */
5513eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa, "Updating configs");
5514eda14cbcSMatt Macy 		spa_ld_check_for_config_update(spa, config_cache_txg,
5515eda14cbcSMatt Macy 		    update_config_cache);
5516eda14cbcSMatt Macy 
5517eda14cbcSMatt Macy 		/*
5518eda14cbcSMatt Macy 		 * Check if a rebuild was in progress and if so resume it.
5519eda14cbcSMatt Macy 		 * Then check all DTLs to see if anything needs resilvering.
5520eda14cbcSMatt Macy 		 * The resilver will be deferred if a rebuild was started.
5521eda14cbcSMatt Macy 		 */
5522eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa, "Starting resilvers");
5523eda14cbcSMatt Macy 		if (vdev_rebuild_active(spa->spa_root_vdev)) {
5524eda14cbcSMatt Macy 			vdev_rebuild_restart(spa);
5525eda14cbcSMatt Macy 		} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
5526eda14cbcSMatt Macy 		    vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
5527eda14cbcSMatt Macy 			spa_async_request(spa, SPA_ASYNC_RESILVER);
5528eda14cbcSMatt Macy 		}
5529eda14cbcSMatt Macy 
5530eda14cbcSMatt Macy 		/*
5531eda14cbcSMatt Macy 		 * Log the fact that we booted up (so that we can detect if
5532eda14cbcSMatt Macy 		 * we rebooted in the middle of an operation).
5533eda14cbcSMatt Macy 		 */
5534eda14cbcSMatt Macy 		spa_history_log_version(spa, "open", NULL);
5535eda14cbcSMatt Macy 
5536eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa,
5537eda14cbcSMatt Macy 		    "Restarting device removals");
5538eda14cbcSMatt Macy 		spa_restart_removal(spa);
5539eda14cbcSMatt Macy 		spa_spawn_aux_threads(spa);
5540eda14cbcSMatt Macy 
5541eda14cbcSMatt Macy 		/*
5542eda14cbcSMatt Macy 		 * Delete any inconsistent datasets.
5543eda14cbcSMatt Macy 		 *
5544eda14cbcSMatt Macy 		 * Note:
5545eda14cbcSMatt Macy 		 * Since we may be issuing deletes for clones here,
5546eda14cbcSMatt Macy 		 * we make sure to do so after we've spawned all the
5547eda14cbcSMatt Macy 		 * auxiliary threads above (from which the livelist
5548eda14cbcSMatt Macy 		 * deletion zthr is part of).
5549eda14cbcSMatt Macy 		 */
5550eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa,
5551eda14cbcSMatt Macy 		    "Cleaning up inconsistent objsets");
5552eda14cbcSMatt Macy 		(void) dmu_objset_find(spa_name(spa),
5553eda14cbcSMatt Macy 		    dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
5554eda14cbcSMatt Macy 
5555eda14cbcSMatt Macy 		/*
5556eda14cbcSMatt Macy 		 * Clean up any stale temporary dataset userrefs.
5557eda14cbcSMatt Macy 		 */
5558eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa,
5559eda14cbcSMatt Macy 		    "Cleaning up temporary userrefs");
5560eda14cbcSMatt Macy 		dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
5561eda14cbcSMatt Macy 
5562eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5563eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa, "Restarting initialize");
5564eda14cbcSMatt Macy 		vdev_initialize_restart(spa->spa_root_vdev);
5565eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa, "Restarting TRIM");
5566eda14cbcSMatt Macy 		vdev_trim_restart(spa->spa_root_vdev);
5567eda14cbcSMatt Macy 		vdev_autotrim_restart(spa);
5568eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
5569eda14cbcSMatt Macy 		spa_import_progress_set_notes(spa, "Finished importing");
5570eda14cbcSMatt Macy 	}
5571eda14cbcSMatt Macy 	zio_handle_import_delay(spa, gethrtime() - load_start);
5572eda14cbcSMatt Macy 
5573eda14cbcSMatt Macy 	spa_import_progress_remove(spa_guid(spa));
5574eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
5575eda14cbcSMatt Macy 
5576eda14cbcSMatt Macy 	spa_load_note(spa, "LOADED");
5577eda14cbcSMatt Macy fail:
5578eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
5579eda14cbcSMatt Macy 	spa->spa_load_thread = NULL;
5580eda14cbcSMatt Macy 	cv_broadcast(&spa_namespace_cv);
5581eda14cbcSMatt Macy 
5582eda14cbcSMatt Macy 	return (error);
5583eda14cbcSMatt Macy 
5584eda14cbcSMatt Macy }
5585eda14cbcSMatt Macy 
5586eda14cbcSMatt Macy static int
5587eda14cbcSMatt Macy spa_load_retry(spa_t *spa, spa_load_state_t state)
5588eda14cbcSMatt Macy {
5589eda14cbcSMatt Macy 	spa_mode_t mode = spa->spa_mode;
5590eda14cbcSMatt Macy 
5591eda14cbcSMatt Macy 	spa_unload(spa);
5592eda14cbcSMatt Macy 	spa_deactivate(spa);
5593eda14cbcSMatt Macy 
5594eda14cbcSMatt Macy 	spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
5595eda14cbcSMatt Macy 
5596eda14cbcSMatt Macy 	spa_activate(spa, mode);
5597eda14cbcSMatt Macy 	spa_async_suspend(spa);
5598eda14cbcSMatt Macy 
5599eda14cbcSMatt Macy 	spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
5600eda14cbcSMatt Macy 	    (u_longlong_t)spa->spa_load_max_txg);
5601eda14cbcSMatt Macy 
5602eda14cbcSMatt Macy 	return (spa_load(spa, state, SPA_IMPORT_EXISTING));
5603eda14cbcSMatt Macy }
5604eda14cbcSMatt Macy 
5605eda14cbcSMatt Macy /*
5606eda14cbcSMatt Macy  * If spa_load() fails this function will try loading prior txg's. If
5607eda14cbcSMatt Macy  * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
5608eda14cbcSMatt Macy  * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
5609eda14cbcSMatt Macy  * function will not rewind the pool and will return the same error as
5610eda14cbcSMatt Macy  * spa_load().
5611eda14cbcSMatt Macy  */
5612eda14cbcSMatt Macy static int
5613eda14cbcSMatt Macy spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
5614eda14cbcSMatt Macy     int rewind_flags)
5615eda14cbcSMatt Macy {
5616eda14cbcSMatt Macy 	nvlist_t *loadinfo = NULL;
5617eda14cbcSMatt Macy 	nvlist_t *config = NULL;
5618eda14cbcSMatt Macy 	int load_error, rewind_error;
5619eda14cbcSMatt Macy 	uint64_t safe_rewind_txg;
5620eda14cbcSMatt Macy 	uint64_t min_txg;
5621eda14cbcSMatt Macy 
5622eda14cbcSMatt Macy 	if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
5623eda14cbcSMatt Macy 		spa->spa_load_max_txg = spa->spa_load_txg;
5624eda14cbcSMatt Macy 		spa_set_log_state(spa, SPA_LOG_CLEAR);
5625eda14cbcSMatt Macy 	} else {
5626eda14cbcSMatt Macy 		spa->spa_load_max_txg = max_request;
5627eda14cbcSMatt Macy 		if (max_request != UINT64_MAX)
5628eda14cbcSMatt Macy 			spa->spa_extreme_rewind = B_TRUE;
5629eda14cbcSMatt Macy 	}
5630eda14cbcSMatt Macy 
5631eda14cbcSMatt Macy 	load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
5632eda14cbcSMatt Macy 	if (load_error == 0)
5633eda14cbcSMatt Macy 		return (0);
5634eda14cbcSMatt Macy 	if (load_error == ZFS_ERR_NO_CHECKPOINT) {
5635eda14cbcSMatt Macy 		/*
5636eda14cbcSMatt Macy 		 * When attempting checkpoint-rewind on a pool with no
5637eda14cbcSMatt Macy 		 * checkpoint, we should not attempt to load uberblocks
5638eda14cbcSMatt Macy 		 * from previous txgs when spa_load fails.
5639eda14cbcSMatt Macy 		 */
5640eda14cbcSMatt Macy 		ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
5641eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5642eda14cbcSMatt Macy 		return (load_error);
5643eda14cbcSMatt Macy 	}
5644eda14cbcSMatt Macy 
5645eda14cbcSMatt Macy 	if (spa->spa_root_vdev != NULL)
5646eda14cbcSMatt Macy 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5647eda14cbcSMatt Macy 
5648eda14cbcSMatt Macy 	spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
5649eda14cbcSMatt Macy 	spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
5650eda14cbcSMatt Macy 
5651a0b956f5SMartin Matuska 	if (rewind_flags & ZPOOL_NEVER_REWIND) {
5652a0b956f5SMartin Matuska 		nvlist_free(config);
5653eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5654eda14cbcSMatt Macy 		return (load_error);
5655eda14cbcSMatt Macy 	}
5656eda14cbcSMatt Macy 
5657eda14cbcSMatt Macy 	if (state == SPA_LOAD_RECOVER) {
5658eda14cbcSMatt Macy 		/* Price of rolling back is discarding txgs, including log */
5659eda14cbcSMatt Macy 		spa_set_log_state(spa, SPA_LOG_CLEAR);
5660eda14cbcSMatt Macy 	} else {
5661eda14cbcSMatt Macy 		/*
5662eda14cbcSMatt Macy 		 * If we aren't rolling back save the load info from our first
5663eda14cbcSMatt Macy 		 * import attempt so that we can restore it after attempting
5664eda14cbcSMatt Macy 		 * to rewind.
5665eda14cbcSMatt Macy 		 */
5666eda14cbcSMatt Macy 		loadinfo = spa->spa_load_info;
5667eda14cbcSMatt Macy 		spa->spa_load_info = fnvlist_alloc();
5668eda14cbcSMatt Macy 	}
5669eda14cbcSMatt Macy 
5670eda14cbcSMatt Macy 	spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
5671eda14cbcSMatt Macy 	safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
5672eda14cbcSMatt Macy 	min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
5673eda14cbcSMatt Macy 	    TXG_INITIAL : safe_rewind_txg;
5674eda14cbcSMatt Macy 
5675eda14cbcSMatt Macy 	/*
5676eda14cbcSMatt Macy 	 * Continue as long as we're finding errors, we're still within
5677eda14cbcSMatt Macy 	 * the acceptable rewind range, and we're still finding uberblocks
5678eda14cbcSMatt Macy 	 */
5679eda14cbcSMatt Macy 	while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
5680eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
5681eda14cbcSMatt Macy 		if (spa->spa_load_max_txg < safe_rewind_txg)
5682eda14cbcSMatt Macy 			spa->spa_extreme_rewind = B_TRUE;
5683eda14cbcSMatt Macy 		rewind_error = spa_load_retry(spa, state);
5684eda14cbcSMatt Macy 	}
5685eda14cbcSMatt Macy 
5686eda14cbcSMatt Macy 	spa->spa_extreme_rewind = B_FALSE;
5687eda14cbcSMatt Macy 	spa->spa_load_max_txg = UINT64_MAX;
5688eda14cbcSMatt Macy 
5689eda14cbcSMatt Macy 	if (config && (rewind_error || state != SPA_LOAD_RECOVER))
5690eda14cbcSMatt Macy 		spa_config_set(spa, config);
5691eda14cbcSMatt Macy 	else
5692eda14cbcSMatt Macy 		nvlist_free(config);
5693eda14cbcSMatt Macy 
5694eda14cbcSMatt Macy 	if (state == SPA_LOAD_RECOVER) {
5695eda14cbcSMatt Macy 		ASSERT3P(loadinfo, ==, NULL);
5696eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5697eda14cbcSMatt Macy 		return (rewind_error);
5698eda14cbcSMatt Macy 	} else {
5699eda14cbcSMatt Macy 		/* Store the rewind info as part of the initial load info */
5700eda14cbcSMatt Macy 		fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
5701eda14cbcSMatt Macy 		    spa->spa_load_info);
5702eda14cbcSMatt Macy 
5703eda14cbcSMatt Macy 		/* Restore the initial load info */
5704eda14cbcSMatt Macy 		fnvlist_free(spa->spa_load_info);
5705eda14cbcSMatt Macy 		spa->spa_load_info = loadinfo;
5706eda14cbcSMatt Macy 
5707eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5708eda14cbcSMatt Macy 		return (load_error);
5709be181ee2SMartin Matuska 	}
5710eda14cbcSMatt Macy }
5711eda14cbcSMatt Macy 
5712eda14cbcSMatt Macy /*
5713eda14cbcSMatt Macy  * Pool Open/Import
5714eda14cbcSMatt Macy  *
5715eda14cbcSMatt Macy  * The import case is identical to an open except that the configuration is sent
5716eda14cbcSMatt Macy  * down from userland, instead of grabbed from the configuration cache.  For the
5717eda14cbcSMatt Macy  * case of an open, the pool configuration will exist in the
5718eda14cbcSMatt Macy  * POOL_STATE_UNINITIALIZED state.
5719eda14cbcSMatt Macy  *
5720eda14cbcSMatt Macy  * The stats information (gen/count/ustats) is used to gather vdev statistics at
5721eda14cbcSMatt Macy  * the same time open the pool, without having to keep around the spa_t in some
5722eda14cbcSMatt Macy  * ambiguous state.
572381b22a98SMartin Matuska  */
572481b22a98SMartin Matuska static int
5725eda14cbcSMatt Macy spa_open_common(const char *pool, spa_t **spapp, const void *tag,
572681b22a98SMartin Matuska     nvlist_t *nvpolicy, nvlist_t **config)
5727eda14cbcSMatt Macy {
5728eda14cbcSMatt Macy 	spa_t *spa;
5729eda14cbcSMatt Macy 	spa_load_state_t state = SPA_LOAD_OPEN;
5730eda14cbcSMatt Macy 	int error;
5731eda14cbcSMatt Macy 	int locked = B_FALSE;
5732eda14cbcSMatt Macy 	int firstopen = B_FALSE;
5733eda14cbcSMatt Macy 
5734eda14cbcSMatt Macy 	*spapp = NULL;
5735eda14cbcSMatt Macy 
5736eda14cbcSMatt Macy 	/*
5737eda14cbcSMatt Macy 	 * As disgusting as this is, we need to support recursive calls to this
5738eda14cbcSMatt Macy 	 * function because dsl_dir_open() is called during spa_load(), and ends
5739eda14cbcSMatt Macy 	 * up calling spa_open() again.  The real fix is to figure out how to
5740eda14cbcSMatt Macy 	 * avoid dsl_dir_open() calling this in the first place.
5741eda14cbcSMatt Macy 	 */
5742eda14cbcSMatt Macy 	if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
5743eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
5744eda14cbcSMatt Macy 		locked = B_TRUE;
5745eda14cbcSMatt Macy 	}
5746eda14cbcSMatt Macy 
5747dbd5678dSMartin Matuska 	if ((spa = spa_lookup(pool)) == NULL) {
574881b22a98SMartin Matuska 		if (locked)
574981b22a98SMartin Matuska 			mutex_exit(&spa_namespace_lock);
5750eda14cbcSMatt Macy 		return (SET_ERROR(ENOENT));
5751eda14cbcSMatt Macy 	}
5752eda14cbcSMatt Macy 
5753eda14cbcSMatt Macy 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
5754eda14cbcSMatt Macy 		zpool_load_policy_t policy;
5755eda14cbcSMatt Macy 
5756eda14cbcSMatt Macy 		firstopen = B_TRUE;
5757eda14cbcSMatt Macy 
5758eda14cbcSMatt Macy 		zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
5759eda14cbcSMatt Macy 		    &policy);
5760eda14cbcSMatt Macy 		if (policy.zlp_rewind & ZPOOL_DO_REWIND)
5761eda14cbcSMatt Macy 			state = SPA_LOAD_RECOVER;
5762eda14cbcSMatt Macy 
5763eda14cbcSMatt Macy 		spa_activate(spa, spa_mode_global);
5764eda14cbcSMatt Macy 
5765eda14cbcSMatt Macy 		if (state != SPA_LOAD_RECOVER)
5766eda14cbcSMatt Macy 			spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5767eda14cbcSMatt Macy 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
5768a0b956f5SMartin Matuska 
5769a0b956f5SMartin Matuska 		zfs_dbgmsg("spa_open_common: opening %s", pool);
5770eda14cbcSMatt Macy 		error = spa_load_best(spa, state, policy.zlp_txg,
5771eda14cbcSMatt Macy 		    policy.zlp_rewind);
5772eda14cbcSMatt Macy 
5773eda14cbcSMatt Macy 		if (error == EBADF) {
5774eda14cbcSMatt Macy 			/*
5775a0b956f5SMartin Matuska 			 * If vdev_validate() returns failure (indicated by
5776eda14cbcSMatt Macy 			 * EBADF), it indicates that one of the vdevs indicates
5777eda14cbcSMatt Macy 			 * that the pool has been exported or destroyed.  If
5778eda14cbcSMatt Macy 			 * this is the case, the config cache is out of sync and
5779eda14cbcSMatt Macy 			 * we should remove the pool from the namespace.
5780eda14cbcSMatt Macy 			 */
5781eda14cbcSMatt Macy 			spa_unload(spa);
5782eda14cbcSMatt Macy 			spa_deactivate(spa);
5783eda14cbcSMatt Macy 			spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
5784eda14cbcSMatt Macy 			spa_remove(spa);
5785eda14cbcSMatt Macy 			if (locked)
5786eda14cbcSMatt Macy 				mutex_exit(&spa_namespace_lock);
5787eda14cbcSMatt Macy 			return (SET_ERROR(ENOENT));
5788eda14cbcSMatt Macy 		}
5789eda14cbcSMatt Macy 
5790eda14cbcSMatt Macy 		if (error) {
5791eda14cbcSMatt Macy 			/*
5792eda14cbcSMatt Macy 			 * We can't open the pool, but we still have useful
5793eda14cbcSMatt Macy 			 * information: the state of each vdev after the
5794eda14cbcSMatt Macy 			 * attempted vdev_open().  Return this to the user.
5795eda14cbcSMatt Macy 			 */
5796eda14cbcSMatt Macy 			if (config != NULL && spa->spa_config) {
5797eda14cbcSMatt Macy 				*config = fnvlist_dup(spa->spa_config);
5798eda14cbcSMatt Macy 				fnvlist_add_nvlist(*config,
5799eda14cbcSMatt Macy 				    ZPOOL_CONFIG_LOAD_INFO,
5800eda14cbcSMatt Macy 				    spa->spa_load_info);
5801eda14cbcSMatt Macy 			}
5802eda14cbcSMatt Macy 			spa_unload(spa);
5803eda14cbcSMatt Macy 			spa_deactivate(spa);
5804eda14cbcSMatt Macy 			spa->spa_last_open_failed = error;
5805eda14cbcSMatt Macy 			if (locked)
5806eda14cbcSMatt Macy 				mutex_exit(&spa_namespace_lock);
5807eda14cbcSMatt Macy 			*spapp = NULL;
5808eda14cbcSMatt Macy 			return (error);
5809eda14cbcSMatt Macy 		}
5810eda14cbcSMatt Macy 	}
5811eda14cbcSMatt Macy 
5812eda14cbcSMatt Macy 	spa_open_ref(spa, tag);
5813eda14cbcSMatt Macy 
5814eda14cbcSMatt Macy 	if (config != NULL)
5815eda14cbcSMatt Macy 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5816eda14cbcSMatt Macy 
5817eda14cbcSMatt Macy 	/*
5818eda14cbcSMatt Macy 	 * If we've recovered the pool, pass back any information we
5819eda14cbcSMatt Macy 	 * gathered while doing the load.
5820eda14cbcSMatt Macy 	 */
5821eda14cbcSMatt Macy 	if (state == SPA_LOAD_RECOVER && config != NULL) {
5822eda14cbcSMatt Macy 		fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
5823eda14cbcSMatt Macy 		    spa->spa_load_info);
5824eda14cbcSMatt Macy 	}
5825eda14cbcSMatt Macy 
5826eda14cbcSMatt Macy 	if (locked) {
582781b22a98SMartin Matuska 		spa->spa_last_open_failed = 0;
582881b22a98SMartin Matuska 		spa->spa_last_ubsync_txg = 0;
582981b22a98SMartin Matuska 		spa->spa_load_txg = 0;
5830eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5831681ce946SMartin Matuska 	}
5832681ce946SMartin Matuska 
583381b22a98SMartin Matuska 	if (firstopen)
583481b22a98SMartin Matuska 		zvol_create_minors_recursive(spa_name(spa));
5835eda14cbcSMatt Macy 
5836eda14cbcSMatt Macy 	*spapp = spa;
5837eda14cbcSMatt Macy 
5838eda14cbcSMatt Macy 	return (0);
5839eda14cbcSMatt Macy }
5840eda14cbcSMatt Macy 
5841eda14cbcSMatt Macy int
584281b22a98SMartin Matuska spa_open_rewind(const char *name, spa_t **spapp, const void *tag,
584381b22a98SMartin Matuska     nvlist_t *policy, nvlist_t **config)
58442a58b312SMartin Matuska {
58452a58b312SMartin Matuska 	return (spa_open_common(name, spapp, tag, policy, config));
5846eda14cbcSMatt Macy }
5847eda14cbcSMatt Macy 
5848eda14cbcSMatt Macy int
5849eda14cbcSMatt Macy spa_open(const char *name, spa_t **spapp, const void *tag)
58502a58b312SMartin Matuska {
58512a58b312SMartin Matuska 	return (spa_open_common(name, spapp, tag, NULL, NULL));
58522a58b312SMartin Matuska }
5853eda14cbcSMatt Macy 
5854eda14cbcSMatt Macy /*
5855eda14cbcSMatt Macy  * Lookup the given spa_t, incrementing the inject count in the process,
5856eda14cbcSMatt Macy  * preventing it from being exported or destroyed.
5857eda14cbcSMatt Macy  */
5858eda14cbcSMatt Macy spa_t *
5859eda14cbcSMatt Macy spa_inject_addref(char *name)
5860eda14cbcSMatt Macy {
5861eda14cbcSMatt Macy 	spa_t *spa;
5862eda14cbcSMatt Macy 
5863eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
5864eda14cbcSMatt Macy 	if ((spa = spa_lookup(name)) == NULL) {
5865eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5866eda14cbcSMatt Macy 		return (NULL);
5867eda14cbcSMatt Macy 	}
5868eda14cbcSMatt Macy 	spa->spa_inject_ref++;
5869eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
5870eda14cbcSMatt Macy 
5871eda14cbcSMatt Macy 	return (spa);
5872eda14cbcSMatt Macy }
5873eda14cbcSMatt Macy 
5874eda14cbcSMatt Macy void
5875eda14cbcSMatt Macy spa_inject_delref(spa_t *spa)
5876eda14cbcSMatt Macy {
587781b22a98SMartin Matuska 	mutex_enter(&spa_namespace_lock);
587881b22a98SMartin Matuska 	spa->spa_inject_ref--;
587981b22a98SMartin Matuska 	mutex_exit(&spa_namespace_lock);
5880eda14cbcSMatt Macy }
5881681ce946SMartin Matuska 
5882681ce946SMartin Matuska /*
588381b22a98SMartin Matuska  * Add spares device information to the nvlist.
588481b22a98SMartin Matuska  */
5885eda14cbcSMatt Macy static void
5886eda14cbcSMatt Macy spa_add_spares(spa_t *spa, nvlist_t *config)
5887eda14cbcSMatt Macy {
5888eda14cbcSMatt Macy 	nvlist_t **spares;
5889eda14cbcSMatt Macy 	uint_t i, nspares;
5890eda14cbcSMatt Macy 	nvlist_t *nvroot;
589181b22a98SMartin Matuska 	uint64_t guid;
589281b22a98SMartin Matuska 	vdev_stat_t *vs;
5893eda14cbcSMatt Macy 	uint_t vsc;
5894eda14cbcSMatt Macy 	uint64_t pool;
5895eda14cbcSMatt Macy 
5896eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5897eda14cbcSMatt Macy 
5898eda14cbcSMatt Macy 	if (spa->spa_spares.sav_count == 0)
5899eda14cbcSMatt Macy 		return;
5900eda14cbcSMatt Macy 
5901eda14cbcSMatt Macy 	nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
5902eda14cbcSMatt Macy 	VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5903eda14cbcSMatt Macy 	    ZPOOL_CONFIG_SPARES, &spares, &nspares));
590481b22a98SMartin Matuska 	if (nspares != 0) {
590581b22a98SMartin Matuska 		fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
5906eda14cbcSMatt Macy 		    (const nvlist_t * const *)spares, nspares);
5907eda14cbcSMatt Macy 		VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
5908eda14cbcSMatt Macy 		    &spares, &nspares));
5909eda14cbcSMatt Macy 
5910eda14cbcSMatt Macy 		/*
5911eda14cbcSMatt Macy 		 * Go through and find any spares which have since been
5912eda14cbcSMatt Macy 		 * repurposed as an active spare.  If this is the case, update
5913eda14cbcSMatt Macy 		 * their status appropriately.
5914eda14cbcSMatt Macy 		 */
5915eda14cbcSMatt Macy 		for (i = 0; i < nspares; i++) {
5916eda14cbcSMatt Macy 			guid = fnvlist_lookup_uint64(spares[i],
5917eda14cbcSMatt Macy 			    ZPOOL_CONFIG_GUID);
5918eda14cbcSMatt Macy 			VERIFY0(nvlist_lookup_uint64_array(spares[i],
5919eda14cbcSMatt Macy 			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
5920eda14cbcSMatt Macy 			if (spa_spare_exists(guid, &pool, NULL) &&
5921eda14cbcSMatt Macy 			    pool != 0ULL) {
5922eda14cbcSMatt Macy 				vs->vs_state = VDEV_STATE_CANT_OPEN;
5923eda14cbcSMatt Macy 				vs->vs_aux = VDEV_AUX_SPARED;
5924eda14cbcSMatt Macy 			} else {
5925eda14cbcSMatt Macy 				vs->vs_state =
5926eda14cbcSMatt Macy 				    spa->spa_spares.sav_vdevs[i]->vdev_state;
5927eda14cbcSMatt Macy 			}
5928eda14cbcSMatt Macy 		}
5929eda14cbcSMatt Macy 	}
5930eda14cbcSMatt Macy }
5931eda14cbcSMatt Macy 
5932eda14cbcSMatt Macy /*
5933eda14cbcSMatt Macy  * Add l2cache device information to the nvlist, including vdev stats.
5934eda14cbcSMatt Macy  */
5935eda14cbcSMatt Macy static void
5936eda14cbcSMatt Macy spa_add_l2cache(spa_t *spa, nvlist_t *config)
5937eda14cbcSMatt Macy {
5938eda14cbcSMatt Macy 	nvlist_t **l2cache;
5939eda14cbcSMatt Macy 	uint_t i, j, nl2cache;
5940eda14cbcSMatt Macy 	nvlist_t *nvroot;
5941eda14cbcSMatt Macy 	uint64_t guid;
5942eda14cbcSMatt Macy 	vdev_t *vd;
5943eda14cbcSMatt Macy 	vdev_stat_t *vs;
5944eda14cbcSMatt Macy 	uint_t vsc;
5945eda14cbcSMatt Macy 
5946eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5947eda14cbcSMatt Macy 
5948eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_count == 0)
5949eda14cbcSMatt Macy 		return;
5950eda14cbcSMatt Macy 
5951eda14cbcSMatt Macy 	nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
5952eda14cbcSMatt Macy 	VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5953eda14cbcSMatt Macy 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
5954eda14cbcSMatt Macy 	if (nl2cache != 0) {
5955eda14cbcSMatt Macy 		fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
5956eda14cbcSMatt Macy 		    (const nvlist_t * const *)l2cache, nl2cache);
5957eda14cbcSMatt Macy 		VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
5958eda14cbcSMatt Macy 		    &l2cache, &nl2cache));
5959eda14cbcSMatt Macy 
5960eda14cbcSMatt Macy 		/*
5961eda14cbcSMatt Macy 		 * Update level 2 cache device stats.
5962eda14cbcSMatt Macy 		 */
5963eda14cbcSMatt Macy 
5964eda14cbcSMatt Macy 		for (i = 0; i < nl2cache; i++) {
5965eda14cbcSMatt Macy 			guid = fnvlist_lookup_uint64(l2cache[i],
5966eda14cbcSMatt Macy 			    ZPOOL_CONFIG_GUID);
5967eda14cbcSMatt Macy 
5968eda14cbcSMatt Macy 			vd = NULL;
5969eda14cbcSMatt Macy 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
5970eda14cbcSMatt Macy 				if (guid ==
5971eda14cbcSMatt Macy 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
5972eda14cbcSMatt Macy 					vd = spa->spa_l2cache.sav_vdevs[j];
5973eda14cbcSMatt Macy 					break;
5974eda14cbcSMatt Macy 				}
5975eda14cbcSMatt Macy 			}
5976eda14cbcSMatt Macy 			ASSERT(vd != NULL);
5977eda14cbcSMatt Macy 
5978eda14cbcSMatt Macy 			VERIFY0(nvlist_lookup_uint64_array(l2cache[i],
5979eda14cbcSMatt Macy 			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
5980eda14cbcSMatt Macy 			vdev_get_stats(vd, vs);
5981eda14cbcSMatt Macy 			vdev_config_generate_stats(vd, l2cache[i]);
5982eda14cbcSMatt Macy 
5983eda14cbcSMatt Macy 		}
5984eda14cbcSMatt Macy 	}
5985eda14cbcSMatt Macy }
5986eda14cbcSMatt Macy 
5987eda14cbcSMatt Macy static void
5988eda14cbcSMatt Macy spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
5989eda14cbcSMatt Macy {
5990eda14cbcSMatt Macy 	zap_cursor_t zc;
5991eda14cbcSMatt Macy 	zap_attribute_t za;
5992eda14cbcSMatt Macy 
5993eda14cbcSMatt Macy 	if (spa->spa_feat_for_read_obj != 0) {
5994eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
5995eda14cbcSMatt Macy 		    spa->spa_feat_for_read_obj);
5996eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
5997eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
5998eda14cbcSMatt Macy 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
5999eda14cbcSMatt Macy 			    za.za_num_integers == 1);
6000eda14cbcSMatt Macy 			VERIFY0(nvlist_add_uint64(features, za.za_name,
6001eda14cbcSMatt Macy 			    za.za_first_integer));
6002eda14cbcSMatt Macy 		}
6003eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
6004eda14cbcSMatt Macy 	}
6005eda14cbcSMatt Macy 
6006eda14cbcSMatt Macy 	if (spa->spa_feat_for_write_obj != 0) {
6007eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
6008eda14cbcSMatt Macy 		    spa->spa_feat_for_write_obj);
6009eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
6010eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
6011eda14cbcSMatt Macy 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
6012eda14cbcSMatt Macy 			    za.za_num_integers == 1);
6013eda14cbcSMatt Macy 			VERIFY0(nvlist_add_uint64(features, za.za_name,
6014eda14cbcSMatt Macy 			    za.za_first_integer));
6015eda14cbcSMatt Macy 		}
6016eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
6017eda14cbcSMatt Macy 	}
6018eda14cbcSMatt Macy }
6019eda14cbcSMatt Macy 
602081b22a98SMartin Matuska static void
602181b22a98SMartin Matuska spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
6022eda14cbcSMatt Macy {
602381b22a98SMartin Matuska 	int i;
6024eda14cbcSMatt Macy 
602515f0b8c3SMartin Matuska 	for (i = 0; i < SPA_FEATURES; i++) {
6026eda14cbcSMatt Macy 		zfeature_info_t feature = spa_feature_table[i];
6027eda14cbcSMatt Macy 		uint64_t refcount;
602881b22a98SMartin Matuska 
6029eda14cbcSMatt Macy 		if (feature_get_refcount(spa, &feature, &refcount) != 0)
603081b22a98SMartin Matuska 			continue;
603181b22a98SMartin Matuska 
6032eda14cbcSMatt Macy 		VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
603381b22a98SMartin Matuska 	}
6034eda14cbcSMatt Macy }
6035eda14cbcSMatt Macy 
6036eda14cbcSMatt Macy /*
6037eda14cbcSMatt Macy  * Store a list of pool features and their reference counts in the
6038eda14cbcSMatt Macy  * config.
6039eda14cbcSMatt Macy  *
6040eda14cbcSMatt Macy  * The first time this is called on a spa, allocate a new nvlist, fetch
6041eda14cbcSMatt Macy  * the pool features and reference counts from disk, then save the list
6042eda14cbcSMatt Macy  * in the spa. In subsequent calls on the same spa use the saved nvlist
6043eda14cbcSMatt Macy  * and refresh its values from the cached reference counts.  This
6044eda14cbcSMatt Macy  * ensures we don't block here on I/O on a suspended pool so 'zpool
6045eda14cbcSMatt Macy  * clear' can resume the pool.
6046eda14cbcSMatt Macy  */
6047eda14cbcSMatt Macy static void
6048eda14cbcSMatt Macy spa_add_feature_stats(spa_t *spa, nvlist_t *config)
6049eda14cbcSMatt Macy {
6050eda14cbcSMatt Macy 	nvlist_t *features;
6051eda14cbcSMatt Macy 
6052eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
6053eda14cbcSMatt Macy 
6054eda14cbcSMatt Macy 	mutex_enter(&spa->spa_feat_stats_lock);
6055eda14cbcSMatt Macy 	features = spa->spa_feat_stats;
6056eda14cbcSMatt Macy 
6057eda14cbcSMatt Macy 	if (features != NULL) {
6058eda14cbcSMatt Macy 		spa_feature_stats_from_cache(spa, features);
6059eda14cbcSMatt Macy 	} else {
6060eda14cbcSMatt Macy 		VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
6061eda14cbcSMatt Macy 		spa->spa_feat_stats = features;
6062eda14cbcSMatt Macy 		spa_feature_stats_from_disk(spa, features);
6063eda14cbcSMatt Macy 	}
6064eda14cbcSMatt Macy 
6065eda14cbcSMatt Macy 	VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
6066eda14cbcSMatt Macy 	    features));
6067eda14cbcSMatt Macy 
6068eda14cbcSMatt Macy 	mutex_exit(&spa->spa_feat_stats_lock);
6069eda14cbcSMatt Macy }
6070eda14cbcSMatt Macy 
6071eda14cbcSMatt Macy int
6072eda14cbcSMatt Macy spa_get_stats(const char *name, nvlist_t **config,
6073eda14cbcSMatt Macy     char *altroot, size_t buflen)
6074eda14cbcSMatt Macy {
6075eda14cbcSMatt Macy 	int error;
6076eda14cbcSMatt Macy 	spa_t *spa;
6077eda14cbcSMatt Macy 
6078eda14cbcSMatt Macy 	*config = NULL;
6079eda14cbcSMatt Macy 	error = spa_open_common(name, &spa, FTAG, NULL, config);
6080eda14cbcSMatt Macy 
6081eda14cbcSMatt Macy 	if (spa != NULL) {
6082eda14cbcSMatt Macy 		/*
6083eda14cbcSMatt Macy 		 * This still leaves a window of inconsistency where the spares
6084eda14cbcSMatt Macy 		 * or l2cache devices could change and the config would be
6085eda14cbcSMatt Macy 		 * self-inconsistent.
6086eda14cbcSMatt Macy 		 */
6087eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6088eda14cbcSMatt Macy 
6089eda14cbcSMatt Macy 		if (*config != NULL) {
6090eda14cbcSMatt Macy 			uint64_t loadtimes[2];
6091eda14cbcSMatt Macy 
6092eda14cbcSMatt Macy 			loadtimes[0] = spa->spa_loaded_ts.tv_sec;
6093eda14cbcSMatt Macy 			loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
6094eda14cbcSMatt Macy 			fnvlist_add_uint64_array(*config,
6095eda14cbcSMatt Macy 			    ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2);
6096eda14cbcSMatt Macy 
6097eda14cbcSMatt Macy 			fnvlist_add_uint64(*config,
6098eda14cbcSMatt Macy 			    ZPOOL_CONFIG_ERRCOUNT,
6099eda14cbcSMatt Macy 			    spa_approx_errlog_size(spa));
6100eda14cbcSMatt Macy 
6101eda14cbcSMatt Macy 			if (spa_suspended(spa)) {
6102eda14cbcSMatt Macy 				fnvlist_add_uint64(*config,
6103eda14cbcSMatt Macy 				    ZPOOL_CONFIG_SUSPENDED,
6104eda14cbcSMatt Macy 				    spa->spa_failmode);
6105eda14cbcSMatt Macy 				fnvlist_add_uint64(*config,
6106eda14cbcSMatt Macy 				    ZPOOL_CONFIG_SUSPENDED_REASON,
6107eda14cbcSMatt Macy 				    spa->spa_suspended);
6108eda14cbcSMatt Macy 			}
6109eda14cbcSMatt Macy 
6110eda14cbcSMatt Macy 			spa_add_spares(spa, *config);
6111eda14cbcSMatt Macy 			spa_add_l2cache(spa, *config);
6112eda14cbcSMatt Macy 			spa_add_feature_stats(spa, *config);
6113eda14cbcSMatt Macy 		}
6114eda14cbcSMatt Macy 	}
6115eda14cbcSMatt Macy 
6116eda14cbcSMatt Macy 	/*
6117eda14cbcSMatt Macy 	 * We want to get the alternate root even for faulted pools, so we cheat
6118eda14cbcSMatt Macy 	 * and call spa_lookup() directly.
6119eda14cbcSMatt Macy 	 */
6120eda14cbcSMatt Macy 	if (altroot) {
6121eda14cbcSMatt Macy 		if (spa == NULL) {
6122eda14cbcSMatt Macy 			mutex_enter(&spa_namespace_lock);
6123eda14cbcSMatt Macy 			spa = spa_lookup(name);
6124eda14cbcSMatt Macy 			if (spa)
612581b22a98SMartin Matuska 				spa_altroot(spa, altroot, buflen);
612681b22a98SMartin Matuska 			else
6127eda14cbcSMatt Macy 				altroot[0] = '\0';
6128eda14cbcSMatt Macy 			spa = NULL;
6129eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
6130eda14cbcSMatt Macy 		} else {
6131eda14cbcSMatt Macy 			spa_altroot(spa, altroot, buflen);
6132eda14cbcSMatt Macy 		}
6133eda14cbcSMatt Macy 	}
6134eda14cbcSMatt Macy 
6135eda14cbcSMatt Macy 	if (spa != NULL) {
6136eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
6137eda14cbcSMatt Macy 		spa_close(spa, FTAG);
6138eda14cbcSMatt Macy 	}
6139eda14cbcSMatt Macy 
6140eda14cbcSMatt Macy 	return (error);
6141eda14cbcSMatt Macy }
6142eda14cbcSMatt Macy 
6143eda14cbcSMatt Macy /*
6144eda14cbcSMatt Macy  * Validate that the auxiliary device array is well formed.  We must have an
6145eda14cbcSMatt Macy  * array of nvlists, each which describes a valid leaf vdev.  If this is an
6146eda14cbcSMatt Macy  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
6147eda14cbcSMatt Macy  * specified, as long as they are well-formed.
6148eda14cbcSMatt Macy  */
6149eda14cbcSMatt Macy static int
6150eda14cbcSMatt Macy spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
6151eda14cbcSMatt Macy     spa_aux_vdev_t *sav, const char *config, uint64_t version,
6152eda14cbcSMatt Macy     vdev_labeltype_t label)
6153eda14cbcSMatt Macy {
6154eda14cbcSMatt Macy 	nvlist_t **dev;
6155eda14cbcSMatt Macy 	uint_t i, ndev;
6156eda14cbcSMatt Macy 	vdev_t *vd;
6157eda14cbcSMatt Macy 	int error;
6158eda14cbcSMatt Macy 
6159eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
6160eda14cbcSMatt Macy 
6161eda14cbcSMatt Macy 	/*
6162eda14cbcSMatt Macy 	 * It's acceptable to have no devs specified.
6163eda14cbcSMatt Macy 	 */
6164eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
6165eda14cbcSMatt Macy 		return (0);
6166eda14cbcSMatt Macy 
6167eda14cbcSMatt Macy 	if (ndev == 0)
6168eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
6169eda14cbcSMatt Macy 
6170eda14cbcSMatt Macy 	/*
6171eda14cbcSMatt Macy 	 * Make sure the pool is formatted with a version that supports this
6172eda14cbcSMatt Macy 	 * device type.
6173eda14cbcSMatt Macy 	 */
6174eda14cbcSMatt Macy 	if (spa_version(spa) < version)
6175eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
6176eda14cbcSMatt Macy 
617781b22a98SMartin Matuska 	/*
617881b22a98SMartin Matuska 	 * Set the pending device list so we correctly handle device in-use
6179eda14cbcSMatt Macy 	 * checking.
6180eda14cbcSMatt Macy 	 */
6181eda14cbcSMatt Macy 	sav->sav_pending = dev;
6182eda14cbcSMatt Macy 	sav->sav_npending = ndev;
618381b22a98SMartin Matuska 
6184eda14cbcSMatt Macy 	for (i = 0; i < ndev; i++) {
618581b22a98SMartin Matuska 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
6186eda14cbcSMatt Macy 		    mode)) != 0)
618781b22a98SMartin Matuska 			goto out;
6188eda14cbcSMatt Macy 
6189681ce946SMartin Matuska 		if (!vd->vdev_ops->vdev_op_leaf) {
6190681ce946SMartin Matuska 			vdev_free(vd);
6191eda14cbcSMatt Macy 			error = SET_ERROR(EINVAL);
6192eda14cbcSMatt Macy 			goto out;
6193eda14cbcSMatt Macy 		}
6194eda14cbcSMatt Macy 
6195eda14cbcSMatt Macy 		vd->vdev_top = vd;
6196eda14cbcSMatt Macy 
6197eda14cbcSMatt Macy 		if ((error = vdev_open(vd)) == 0 &&
619881b22a98SMartin Matuska 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
6199681ce946SMartin Matuska 			fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
6200681ce946SMartin Matuska 			    vd->vdev_guid);
6201eda14cbcSMatt Macy 		}
6202eda14cbcSMatt Macy 
6203eda14cbcSMatt Macy 		vdev_free(vd);
6204eda14cbcSMatt Macy 
6205eda14cbcSMatt Macy 		if (error &&
6206eda14cbcSMatt Macy 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
6207eda14cbcSMatt Macy 			goto out;
6208eda14cbcSMatt Macy 		else
6209eda14cbcSMatt Macy 			error = 0;
6210eda14cbcSMatt Macy 	}
6211eda14cbcSMatt Macy 
6212eda14cbcSMatt Macy out:
6213eda14cbcSMatt Macy 	sav->sav_pending = NULL;
6214eda14cbcSMatt Macy 	sav->sav_npending = 0;
6215eda14cbcSMatt Macy 	return (error);
6216eda14cbcSMatt Macy }
6217eda14cbcSMatt Macy 
6218eda14cbcSMatt Macy static int
6219eda14cbcSMatt Macy spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
6220eda14cbcSMatt Macy {
6221eda14cbcSMatt Macy 	int error;
6222eda14cbcSMatt Macy 
6223eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
6224eda14cbcSMatt Macy 
6225eda14cbcSMatt Macy 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
6226eda14cbcSMatt Macy 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
6227eda14cbcSMatt Macy 	    VDEV_LABEL_SPARE)) != 0) {
6228eda14cbcSMatt Macy 		return (error);
6229eda14cbcSMatt Macy 	}
6230eda14cbcSMatt Macy 
6231eda14cbcSMatt Macy 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
6232eda14cbcSMatt Macy 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
6233eda14cbcSMatt Macy 	    VDEV_LABEL_L2CACHE));
6234eda14cbcSMatt Macy }
6235eda14cbcSMatt Macy 
6236eda14cbcSMatt Macy static void
6237eda14cbcSMatt Macy spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
6238eda14cbcSMatt Macy     const char *config)
6239eda14cbcSMatt Macy {
6240eda14cbcSMatt Macy 	int i;
6241eda14cbcSMatt Macy 
6242eda14cbcSMatt Macy 	if (sav->sav_config != NULL) {
6243eda14cbcSMatt Macy 		nvlist_t **olddevs;
6244eda14cbcSMatt Macy 		uint_t oldndevs;
6245eda14cbcSMatt Macy 		nvlist_t **newdevs;
6246eda14cbcSMatt Macy 
6247eda14cbcSMatt Macy 		/*
6248eda14cbcSMatt Macy 		 * Generate new dev list by concatenating with the
6249eda14cbcSMatt Macy 		 * current dev list.
62502a58b312SMartin Matuska 		 */
6251eda14cbcSMatt Macy 		VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config,
6252eda14cbcSMatt Macy 		    &olddevs, &oldndevs));
6253eda14cbcSMatt Macy 
6254eda14cbcSMatt Macy 		newdevs = kmem_alloc(sizeof (void *) *
6255eda14cbcSMatt Macy 		    (ndevs + oldndevs), KM_SLEEP);
6256eda14cbcSMatt Macy 		for (i = 0; i < oldndevs; i++)
6257eda14cbcSMatt Macy 			newdevs[i] = fnvlist_dup(olddevs[i]);
62587877fdebSMatt Macy 		for (i = 0; i < ndevs; i++)
6259eda14cbcSMatt Macy 			newdevs[i + oldndevs] = fnvlist_dup(devs[i]);
6260eda14cbcSMatt Macy 
6261eda14cbcSMatt Macy 		fnvlist_remove(sav->sav_config, config);
6262eda14cbcSMatt Macy 
62632a58b312SMartin Matuska 		fnvlist_add_nvlist_array(sav->sav_config, config,
62642a58b312SMartin Matuska 		    (const nvlist_t * const *)newdevs, ndevs + oldndevs);
6265eda14cbcSMatt Macy 		for (i = 0; i < oldndevs + ndevs; i++)
6266eda14cbcSMatt Macy 			nvlist_free(newdevs[i]);
6267eda14cbcSMatt Macy 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
6268eda14cbcSMatt Macy 	} else {
6269eda14cbcSMatt Macy 		/*
6270eda14cbcSMatt Macy 		 * Generate a new dev list.
6271eda14cbcSMatt Macy 		 */
6272eda14cbcSMatt Macy 		sav->sav_config = fnvlist_alloc();
6273eda14cbcSMatt Macy 		fnvlist_add_nvlist_array(sav->sav_config, config,
6274eda14cbcSMatt Macy 		    (const nvlist_t * const *)devs, ndevs);
6275eda14cbcSMatt Macy 	}
6276eda14cbcSMatt Macy }
6277eda14cbcSMatt Macy 
6278eda14cbcSMatt Macy /*
6279eda14cbcSMatt Macy  * Stop and drop level 2 ARC devices
6280eda14cbcSMatt Macy  */
6281eda14cbcSMatt Macy void
6282eda14cbcSMatt Macy spa_l2cache_drop(spa_t *spa)
6283eda14cbcSMatt Macy {
6284eda14cbcSMatt Macy 	vdev_t *vd;
6285eda14cbcSMatt Macy 	int i;
6286eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
6287eda14cbcSMatt Macy 
6288eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++) {
6289eda14cbcSMatt Macy 		uint64_t pool;
6290eda14cbcSMatt Macy 
6291eda14cbcSMatt Macy 		vd = sav->sav_vdevs[i];
6292eda14cbcSMatt Macy 		ASSERT(vd != NULL);
6293eda14cbcSMatt Macy 
6294eda14cbcSMatt Macy 		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
6295eda14cbcSMatt Macy 		    pool != 0ULL && l2arc_vdev_present(vd))
6296eda14cbcSMatt Macy 			l2arc_remove_vdev(vd);
6297eda14cbcSMatt Macy 	}
6298eda14cbcSMatt Macy }
6299eda14cbcSMatt Macy 
6300eda14cbcSMatt Macy /*
6301eda14cbcSMatt Macy  * Verify encryption parameters for spa creation. If we are encrypting, we must
6302eda14cbcSMatt Macy  * have the encryption feature flag enabled.
6303eda14cbcSMatt Macy  */
6304eda14cbcSMatt Macy static int
6305eda14cbcSMatt Macy spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
6306eda14cbcSMatt Macy     boolean_t has_encryption)
6307eda14cbcSMatt Macy {
6308eda14cbcSMatt Macy 	if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
6309eda14cbcSMatt Macy 	    dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
6310eda14cbcSMatt Macy 	    !has_encryption)
6311eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
6312eda14cbcSMatt Macy 
6313eda14cbcSMatt Macy 	return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
6314eda14cbcSMatt Macy }
6315eda14cbcSMatt Macy 
6316eda14cbcSMatt Macy /*
6317eda14cbcSMatt Macy  * Pool Creation
6318eda14cbcSMatt Macy  */
6319eda14cbcSMatt Macy int
6320eda14cbcSMatt Macy spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
6321eda14cbcSMatt Macy     nvlist_t *zplprops, dsl_crypto_params_t *dcp)
6322eda14cbcSMatt Macy {
6323eda14cbcSMatt Macy 	spa_t *spa;
6324eda14cbcSMatt Macy 	const char *altroot = NULL;
6325eda14cbcSMatt Macy 	vdev_t *rvd;
6326eda14cbcSMatt Macy 	dsl_pool_t *dp;
6327eda14cbcSMatt Macy 	dmu_tx_t *tx;
6328eda14cbcSMatt Macy 	int error = 0;
6329eda14cbcSMatt Macy 	uint64_t txg = TXG_INITIAL;
6330eda14cbcSMatt Macy 	nvlist_t **spares, **l2cache;
6331eda14cbcSMatt Macy 	uint_t nspares, nl2cache;
6332eda14cbcSMatt Macy 	uint64_t version, obj, ndraid = 0;
6333eda14cbcSMatt Macy 	boolean_t has_features;
6334eda14cbcSMatt Macy 	boolean_t has_encryption;
6335eda14cbcSMatt Macy 	boolean_t has_allocclass;
6336eda14cbcSMatt Macy 	spa_feature_t feat;
6337eda14cbcSMatt Macy 	const char *feat_name;
6338eda14cbcSMatt Macy 	const char *poolname;
6339eda14cbcSMatt Macy 	nvlist_t *nvl;
6340eda14cbcSMatt Macy 
6341eda14cbcSMatt Macy 	if (props == NULL ||
6342eda14cbcSMatt Macy 	    nvlist_lookup_string(props,
6343eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_TNAME), &poolname) != 0)
6344eda14cbcSMatt Macy 		poolname = (char *)pool;
6345eda14cbcSMatt Macy 
6346eda14cbcSMatt Macy 	/*
6347eda14cbcSMatt Macy 	 * If this pool already exists, return failure.
6348eda14cbcSMatt Macy 	 */
6349eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6350eda14cbcSMatt Macy 	if (spa_lookup(poolname) != NULL) {
6351eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6352eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
6353eda14cbcSMatt Macy 	}
6354eda14cbcSMatt Macy 
6355eda14cbcSMatt Macy 	/*
6356eda14cbcSMatt Macy 	 * Allocate a new spa_t structure.
6357eda14cbcSMatt Macy 	 */
6358eda14cbcSMatt Macy 	nvl = fnvlist_alloc();
6359eda14cbcSMatt Macy 	fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
6360eda14cbcSMatt Macy 	(void) nvlist_lookup_string(props,
6361eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
6362eda14cbcSMatt Macy 	spa = spa_add(poolname, nvl, altroot);
6363eda14cbcSMatt Macy 	fnvlist_free(nvl);
6364eda14cbcSMatt Macy 	spa_activate(spa, spa_mode_global);
6365eda14cbcSMatt Macy 
6366eda14cbcSMatt Macy 	if (props && (error = spa_prop_validate(spa, props))) {
6367eda14cbcSMatt Macy 		spa_deactivate(spa);
6368eda14cbcSMatt Macy 		spa_remove(spa);
6369eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6370eda14cbcSMatt Macy 		return (error);
6371eda14cbcSMatt Macy 	}
6372eda14cbcSMatt Macy 
6373eda14cbcSMatt Macy 	/*
6374eda14cbcSMatt Macy 	 * Temporary pool names should never be written to disk.
6375eda14cbcSMatt Macy 	 */
6376eda14cbcSMatt Macy 	if (poolname != pool)
6377eda14cbcSMatt Macy 		spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
6378eda14cbcSMatt Macy 
6379eda14cbcSMatt Macy 	has_features = B_FALSE;
63807877fdebSMatt Macy 	has_encryption = B_FALSE;
63817877fdebSMatt Macy 	has_allocclass = B_FALSE;
6382eda14cbcSMatt Macy 	for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
6383eda14cbcSMatt Macy 	    elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
6384eda14cbcSMatt Macy 		if (zpool_prop_feature(nvpair_name(elem))) {
6385eda14cbcSMatt Macy 			has_features = B_TRUE;
6386eda14cbcSMatt Macy 
6387eda14cbcSMatt Macy 			feat_name = strchr(nvpair_name(elem), '@') + 1;
6388eda14cbcSMatt Macy 			VERIFY0(zfeature_lookup_name(feat_name, &feat));
6389eda14cbcSMatt Macy 			if (feat == SPA_FEATURE_ENCRYPTION)
6390eda14cbcSMatt Macy 				has_encryption = B_TRUE;
6391eda14cbcSMatt Macy 			if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
6392eda14cbcSMatt Macy 				has_allocclass = B_TRUE;
6393eda14cbcSMatt Macy 		}
6394eda14cbcSMatt Macy 	}
6395eda14cbcSMatt Macy 
6396eda14cbcSMatt Macy 	/* verify encryption params, if they were provided */
6397eda14cbcSMatt Macy 	if (dcp != NULL) {
6398eda14cbcSMatt Macy 		error = spa_create_check_encryption_params(dcp, has_encryption);
6399eda14cbcSMatt Macy 		if (error != 0) {
6400eda14cbcSMatt Macy 			spa_deactivate(spa);
6401eda14cbcSMatt Macy 			spa_remove(spa);
6402eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
6403eda14cbcSMatt Macy 			return (error);
6404eda14cbcSMatt Macy 		}
6405eda14cbcSMatt Macy 	}
6406eda14cbcSMatt Macy 	if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
6407eda14cbcSMatt Macy 		spa_deactivate(spa);
6408eda14cbcSMatt Macy 		spa_remove(spa);
640981b22a98SMartin Matuska 		mutex_exit(&spa_namespace_lock);
641081b22a98SMartin Matuska 		return (ENOTSUP);
6411681ce946SMartin Matuska 	}
6412681ce946SMartin Matuska 
6413eda14cbcSMatt Macy 	if (has_features || nvlist_lookup_uint64(props,
6414eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
6415eda14cbcSMatt Macy 		version = SPA_VERSION;
6416eda14cbcSMatt Macy 	}
6417eda14cbcSMatt Macy 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
6418eda14cbcSMatt Macy 
6419eda14cbcSMatt Macy 	spa->spa_first_txg = txg;
6420eda14cbcSMatt Macy 	spa->spa_uberblock.ub_txg = txg - 1;
6421eda14cbcSMatt Macy 	spa->spa_uberblock.ub_version = version;
6422eda14cbcSMatt Macy 	spa->spa_ubsync = spa->spa_uberblock;
6423eda14cbcSMatt Macy 	spa->spa_load_state = SPA_LOAD_CREATE;
6424681ce946SMartin Matuska 	spa->spa_removing_phys.sr_state = DSS_NONE;
6425681ce946SMartin Matuska 	spa->spa_removing_phys.sr_removing_vdev = -1;
642681b22a98SMartin Matuska 	spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
6427681ce946SMartin Matuska 	spa->spa_indirect_vdevs_loaded = B_TRUE;
6428681ce946SMartin Matuska 
6429eda14cbcSMatt Macy 	/*
6430eda14cbcSMatt Macy 	 * Create "The Godfather" zio to hold all async IOs
6431eda14cbcSMatt Macy 	 */
6432eda14cbcSMatt Macy 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
6433eda14cbcSMatt Macy 	    KM_SLEEP);
6434eda14cbcSMatt Macy 	for (int i = 0; i < max_ncpus; i++) {
6435eda14cbcSMatt Macy 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
6436eda14cbcSMatt Macy 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
6437eda14cbcSMatt Macy 		    ZIO_FLAG_GODFATHER);
6438eda14cbcSMatt Macy 	}
6439eda14cbcSMatt Macy 
6440eda14cbcSMatt Macy 	/*
6441eda14cbcSMatt Macy 	 * Create the root vdev.
6442eda14cbcSMatt Macy 	 */
64432a58b312SMartin Matuska 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
64442a58b312SMartin Matuska 
64452a58b312SMartin Matuska 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
64462a58b312SMartin Matuska 
6447eda14cbcSMatt Macy 	ASSERT(error != 0 || rvd != NULL);
6448eda14cbcSMatt Macy 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
6449eda14cbcSMatt Macy 
6450eda14cbcSMatt Macy 	if (error == 0 && !zfs_allocatable_devs(nvroot))
6451eda14cbcSMatt Macy 		error = SET_ERROR(EINVAL);
6452eda14cbcSMatt Macy 
6453eda14cbcSMatt Macy 	if (error == 0 &&
6454eda14cbcSMatt Macy 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
6455eda14cbcSMatt Macy 	    (error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
6456eda14cbcSMatt Macy 	    (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
6457eda14cbcSMatt Macy 		/*
6458eda14cbcSMatt Macy 		 * instantiate the metaslab groups (this will dirty the vdevs)
6459eda14cbcSMatt Macy 		 * we can no longer error exit past this point
6460eda14cbcSMatt Macy 		 */
6461eda14cbcSMatt Macy 		for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
6462eda14cbcSMatt Macy 			vdev_t *vd = rvd->vdev_child[c];
6463eda14cbcSMatt Macy 
6464eda14cbcSMatt Macy 			vdev_metaslab_set_size(vd);
6465eda14cbcSMatt Macy 			vdev_expand(vd, txg);
6466eda14cbcSMatt Macy 		}
6467eda14cbcSMatt Macy 	}
6468eda14cbcSMatt Macy 
6469eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
6470eda14cbcSMatt Macy 
6471eda14cbcSMatt Macy 	if (error != 0) {
6472eda14cbcSMatt Macy 		spa_unload(spa);
6473eda14cbcSMatt Macy 		spa_deactivate(spa);
6474eda14cbcSMatt Macy 		spa_remove(spa);
6475eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6476eda14cbcSMatt Macy 		return (error);
6477eda14cbcSMatt Macy 	}
6478eda14cbcSMatt Macy 
6479eda14cbcSMatt Macy 	/*
6480eda14cbcSMatt Macy 	 * Get the list of spares, if specified.
6481eda14cbcSMatt Macy 	 */
6482eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
6483eda14cbcSMatt Macy 	    &spares, &nspares) == 0) {
6484eda14cbcSMatt Macy 		spa->spa_spares.sav_config = fnvlist_alloc();
6485eda14cbcSMatt Macy 		fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
6486eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
6487eda14cbcSMatt Macy 		    nspares);
6488eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6489eda14cbcSMatt Macy 		spa_load_spares(spa);
6490eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6491eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
6492eda14cbcSMatt Macy 	}
6493eda14cbcSMatt Macy 
6494eda14cbcSMatt Macy 	/*
6495eda14cbcSMatt Macy 	 * Get the list of level 2 cache devices, if specified.
6496eda14cbcSMatt Macy 	 */
6497eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6498eda14cbcSMatt Macy 	    &l2cache, &nl2cache) == 0) {
6499eda14cbcSMatt Macy 		VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config,
6500eda14cbcSMatt Macy 		    NV_UNIQUE_NAME, KM_SLEEP));
6501eda14cbcSMatt Macy 		fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
6502eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
6503eda14cbcSMatt Macy 		    nl2cache);
6504eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6505eda14cbcSMatt Macy 		spa_load_l2cache(spa);
6506eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6507eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
6508eda14cbcSMatt Macy 	}
6509eda14cbcSMatt Macy 
6510eda14cbcSMatt Macy 	spa->spa_is_initializing = B_TRUE;
6511eda14cbcSMatt Macy 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
6512eda14cbcSMatt Macy 	spa->spa_is_initializing = B_FALSE;
6513eda14cbcSMatt Macy 
6514eda14cbcSMatt Macy 	/*
6515eda14cbcSMatt Macy 	 * Create DDTs (dedup tables).
6516eda14cbcSMatt Macy 	 */
6517eda14cbcSMatt Macy 	ddt_create(spa);
6518eda14cbcSMatt Macy 	/*
6519eda14cbcSMatt Macy 	 * Create BRT table and BRT table object.
6520eda14cbcSMatt Macy 	 */
6521eda14cbcSMatt Macy 	brt_create(spa);
6522eda14cbcSMatt Macy 
6523eda14cbcSMatt Macy 	spa_update_dspace(spa);
6524eda14cbcSMatt Macy 
6525eda14cbcSMatt Macy 	tx = dmu_tx_create_assigned(dp, txg);
6526eda14cbcSMatt Macy 
65277877fdebSMatt Macy 	/*
65287877fdebSMatt Macy 	 * Create the pool's history object.
65297877fdebSMatt Macy 	 */
6530eda14cbcSMatt Macy 	if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
6531eda14cbcSMatt Macy 		spa_history_create_obj(spa, tx);
6532eda14cbcSMatt Macy 
6533eda14cbcSMatt Macy 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
6534eda14cbcSMatt Macy 	spa_history_log_version(spa, "create", tx);
6535eda14cbcSMatt Macy 
6536eda14cbcSMatt Macy 	/*
6537eda14cbcSMatt Macy 	 * Create the pool config object.
6538eda14cbcSMatt Macy 	 */
6539be181ee2SMartin Matuska 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
6540eda14cbcSMatt Macy 	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
6541eda14cbcSMatt Macy 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
6542eda14cbcSMatt Macy 
6543eda14cbcSMatt Macy 	if (zap_add(spa->spa_meta_objset,
6544eda14cbcSMatt Macy 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
6545eda14cbcSMatt Macy 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
6546eda14cbcSMatt Macy 		cmn_err(CE_PANIC, "failed to add pool config");
6547eda14cbcSMatt Macy 	}
6548eda14cbcSMatt Macy 
6549c03c5b1cSMartin Matuska 	if (zap_add(spa->spa_meta_objset,
6550c03c5b1cSMartin Matuska 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
6551eda14cbcSMatt Macy 	    sizeof (uint64_t), 1, &version, tx) != 0) {
6552eda14cbcSMatt Macy 		cmn_err(CE_PANIC, "failed to add pool version");
6553eda14cbcSMatt Macy 	}
6554eda14cbcSMatt Macy 
6555eda14cbcSMatt Macy 	/* Newly created pools with the right version are always deflated. */
6556eda14cbcSMatt Macy 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
6557eda14cbcSMatt Macy 		spa->spa_deflate = TRUE;
6558eda14cbcSMatt Macy 		if (zap_add(spa->spa_meta_objset,
6559eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6560eda14cbcSMatt Macy 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
6561eda14cbcSMatt Macy 			cmn_err(CE_PANIC, "failed to add deflate");
6562eda14cbcSMatt Macy 		}
65632a58b312SMartin Matuska 	}
6564eda14cbcSMatt Macy 
6565eda14cbcSMatt Macy 	/*
6566eda14cbcSMatt Macy 	 * Create the deferred-free bpobj.  Turn off compression
6567eda14cbcSMatt Macy 	 * because sync-to-convergence takes longer if the blocksize
6568eda14cbcSMatt Macy 	 * keeps changing.
6569eda14cbcSMatt Macy 	 */
6570eda14cbcSMatt Macy 	obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
6571eda14cbcSMatt Macy 	dmu_object_set_compress(spa->spa_meta_objset, obj,
6572eda14cbcSMatt Macy 	    ZIO_COMPRESS_OFF, tx);
6573eda14cbcSMatt Macy 	if (zap_add(spa->spa_meta_objset,
6574eda14cbcSMatt Macy 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
6575eda14cbcSMatt Macy 	    sizeof (uint64_t), 1, &obj, tx) != 0) {
6576eda14cbcSMatt Macy 		cmn_err(CE_PANIC, "failed to add bpobj");
6577eda14cbcSMatt Macy 	}
6578eda14cbcSMatt Macy 	VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
6579eda14cbcSMatt Macy 	    spa->spa_meta_objset, obj));
6580eda14cbcSMatt Macy 
6581eda14cbcSMatt Macy 	/*
6582eda14cbcSMatt Macy 	 * Generate some random noise for salted checksums to operate on.
6583eda14cbcSMatt Macy 	 */
6584eda14cbcSMatt Macy 	(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
6585eda14cbcSMatt Macy 	    sizeof (spa->spa_cksum_salt.zcs_bytes));
6586eda14cbcSMatt Macy 
6587eda14cbcSMatt Macy 	/*
6588eda14cbcSMatt Macy 	 * Set pool properties.
6589eda14cbcSMatt Macy 	 */
6590eda14cbcSMatt Macy 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
6591eda14cbcSMatt Macy 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
6592eda14cbcSMatt Macy 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
6593eda14cbcSMatt Macy 	spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
6594eda14cbcSMatt Macy 	spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
6595eda14cbcSMatt Macy 	spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
6596eda14cbcSMatt Macy 
6597eda14cbcSMatt Macy 	if (props != NULL) {
6598eda14cbcSMatt Macy 		spa_configfile_set(spa, props, B_FALSE);
6599eda14cbcSMatt Macy 		spa_sync_props(props, tx);
6600eda14cbcSMatt Macy 	}
6601eda14cbcSMatt Macy 
6602be181ee2SMartin Matuska 	for (int i = 0; i < ndraid; i++)
6603eda14cbcSMatt Macy 		spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
6604eda14cbcSMatt Macy 
6605eda14cbcSMatt Macy 	dmu_tx_commit(tx);
6606eda14cbcSMatt Macy 
6607eda14cbcSMatt Macy 	spa->spa_sync_on = B_TRUE;
6608eda14cbcSMatt Macy 	txg_sync_start(dp);
6609eda14cbcSMatt Macy 	mmp_thread_start(spa);
6610eda14cbcSMatt Macy 	txg_wait_synced(dp, txg);
6611eda14cbcSMatt Macy 
6612eda14cbcSMatt Macy 	spa_spawn_aux_threads(spa);
6613eda14cbcSMatt Macy 
6614eda14cbcSMatt Macy 	spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
6615eda14cbcSMatt Macy 
6616eda14cbcSMatt Macy 	/*
6617eda14cbcSMatt Macy 	 * Don't count references from objsets that are already closed
6618eda14cbcSMatt Macy 	 * and are making their way through the eviction process.
6619eda14cbcSMatt Macy 	 */
6620eda14cbcSMatt Macy 	spa_evicting_os_wait(spa);
6621eda14cbcSMatt Macy 	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
6622eda14cbcSMatt Macy 	spa->spa_load_state = SPA_LOAD_NONE;
6623eda14cbcSMatt Macy 
6624eda14cbcSMatt Macy 	spa_import_os(spa);
6625eda14cbcSMatt Macy 
6626eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
6627eda14cbcSMatt Macy 
6628eda14cbcSMatt Macy 	return (0);
6629eda14cbcSMatt Macy }
6630eda14cbcSMatt Macy 
6631eda14cbcSMatt Macy /*
6632eda14cbcSMatt Macy  * Import a non-root pool into the system.
6633eda14cbcSMatt Macy  */
6634eda14cbcSMatt Macy int
663581b22a98SMartin Matuska spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
6636eda14cbcSMatt Macy {
6637eda14cbcSMatt Macy 	spa_t *spa;
6638eda14cbcSMatt Macy 	const char *altroot = NULL;
6639eda14cbcSMatt Macy 	spa_load_state_t state = SPA_LOAD_IMPORT;
6640eda14cbcSMatt Macy 	zpool_load_policy_t policy;
6641eda14cbcSMatt Macy 	spa_mode_t mode = spa_mode_global;
6642eda14cbcSMatt Macy 	uint64_t readonly = B_FALSE;
6643eda14cbcSMatt Macy 	int error;
6644eda14cbcSMatt Macy 	nvlist_t *nvroot;
6645eda14cbcSMatt Macy 	nvlist_t **spares, **l2cache;
6646eda14cbcSMatt Macy 	uint_t nspares, nl2cache;
6647eda14cbcSMatt Macy 
6648eda14cbcSMatt Macy 	/*
6649eda14cbcSMatt Macy 	 * If a pool with this name exists, return failure.
6650eda14cbcSMatt Macy 	 */
6651eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6652eda14cbcSMatt Macy 	if (spa_lookup(pool) != NULL) {
665381b22a98SMartin Matuska 		mutex_exit(&spa_namespace_lock);
6654eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
6655eda14cbcSMatt Macy 	}
6656eda14cbcSMatt Macy 
6657eda14cbcSMatt Macy 	/*
6658eda14cbcSMatt Macy 	 * Create and initialize the spa structure.
6659eda14cbcSMatt Macy 	 */
6660eda14cbcSMatt Macy 	(void) nvlist_lookup_string(props,
6661eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
6662eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(props,
6663eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
6664eda14cbcSMatt Macy 	if (readonly)
6665eda14cbcSMatt Macy 		mode = SPA_MODE_READ;
6666eda14cbcSMatt Macy 	spa = spa_add(pool, config, altroot);
6667eda14cbcSMatt Macy 	spa->spa_import_flags = flags;
6668eda14cbcSMatt Macy 
6669eda14cbcSMatt Macy 	/*
6670eda14cbcSMatt Macy 	 * Verbatim import - Take a pool and insert it into the namespace
6671eda14cbcSMatt Macy 	 * as if it had been loaded at boot.
6672eda14cbcSMatt Macy 	 */
6673eda14cbcSMatt Macy 	if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
6674eda14cbcSMatt Macy 		if (props != NULL)
6675eda14cbcSMatt Macy 			spa_configfile_set(spa, props, B_FALSE);
6676eda14cbcSMatt Macy 
667781b22a98SMartin Matuska 		spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
667881b22a98SMartin Matuska 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
6679eda14cbcSMatt Macy 		zfs_dbgmsg("spa_import: verbatim import of %s", pool);
668081b22a98SMartin Matuska 		mutex_exit(&spa_namespace_lock);
668181b22a98SMartin Matuska 		return (0);
6682681ce946SMartin Matuska 	}
6683681ce946SMartin Matuska 
6684eda14cbcSMatt Macy 	spa_activate(spa, mode);
6685eda14cbcSMatt Macy 
6686eda14cbcSMatt Macy 	/*
6687eda14cbcSMatt Macy 	 * Don't start async tasks until we know everything is healthy.
6688eda14cbcSMatt Macy 	 */
6689eda14cbcSMatt Macy 	spa_async_suspend(spa);
6690eda14cbcSMatt Macy 
6691eda14cbcSMatt Macy 	zpool_get_load_policy(config, &policy);
669281b22a98SMartin Matuska 	if (policy.zlp_rewind & ZPOOL_DO_REWIND)
669381b22a98SMartin Matuska 		state = SPA_LOAD_RECOVER;
6694eda14cbcSMatt Macy 
669581b22a98SMartin Matuska 	spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
669681b22a98SMartin Matuska 
6697681ce946SMartin Matuska 	if (state != SPA_LOAD_RECOVER) {
6698681ce946SMartin Matuska 		spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
6699eda14cbcSMatt Macy 		zfs_dbgmsg("spa_import: importing %s", pool);
6700eda14cbcSMatt Macy 	} else {
6701eda14cbcSMatt Macy 		zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
6702eda14cbcSMatt Macy 		    "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
6703eda14cbcSMatt Macy 	}
6704eda14cbcSMatt Macy 	error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
6705eda14cbcSMatt Macy 
6706eda14cbcSMatt Macy 	/*
6707eda14cbcSMatt Macy 	 * Propagate anything learned while loading the pool and pass it
6708eda14cbcSMatt Macy 	 * back to caller (i.e. rewind info, missing devices, etc).
6709eda14cbcSMatt Macy 	 */
6710eda14cbcSMatt Macy 	fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info);
6711eda14cbcSMatt Macy 
6712eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6713eda14cbcSMatt Macy 	/*
6714eda14cbcSMatt Macy 	 * Toss any existing sparelist, as it doesn't have any validity
6715eda14cbcSMatt Macy 	 * anymore, and conflicts with spa_has_spare().
6716eda14cbcSMatt Macy 	 */
6717eda14cbcSMatt Macy 	if (spa->spa_spares.sav_config) {
6718eda14cbcSMatt Macy 		nvlist_free(spa->spa_spares.sav_config);
6719eda14cbcSMatt Macy 		spa->spa_spares.sav_config = NULL;
6720eda14cbcSMatt Macy 		spa_load_spares(spa);
6721eda14cbcSMatt Macy 	}
6722eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_config) {
6723eda14cbcSMatt Macy 		nvlist_free(spa->spa_l2cache.sav_config);
6724eda14cbcSMatt Macy 		spa->spa_l2cache.sav_config = NULL;
6725eda14cbcSMatt Macy 		spa_load_l2cache(spa);
6726eda14cbcSMatt Macy 	}
6727eda14cbcSMatt Macy 
6728eda14cbcSMatt Macy 	nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
6729eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
6730eda14cbcSMatt Macy 
6731eda14cbcSMatt Macy 	if (props != NULL)
6732eda14cbcSMatt Macy 		spa_configfile_set(spa, props, B_FALSE);
6733eda14cbcSMatt Macy 
6734c03c5b1cSMartin Matuska 	if (error != 0 || (props && spa_writeable(spa) &&
6735c03c5b1cSMartin Matuska 	    (error = spa_prop_set(spa, props)))) {
6736eda14cbcSMatt Macy 		spa_unload(spa);
6737eda14cbcSMatt Macy 		spa_deactivate(spa);
6738eda14cbcSMatt Macy 		spa_remove(spa);
6739eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6740eda14cbcSMatt Macy 		return (error);
6741eda14cbcSMatt Macy 	}
6742eda14cbcSMatt Macy 
67432a58b312SMartin Matuska 	spa_async_resume(spa);
6744eda14cbcSMatt Macy 
6745eda14cbcSMatt Macy 	/*
6746eda14cbcSMatt Macy 	 * Override any spares and level 2 cache devices as specified by
6747eda14cbcSMatt Macy 	 * the user, as these may have correct device names/devids, etc.
6748eda14cbcSMatt Macy 	 */
6749eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
6750eda14cbcSMatt Macy 	    &spares, &nspares) == 0) {
6751eda14cbcSMatt Macy 		if (spa->spa_spares.sav_config)
6752eda14cbcSMatt Macy 			fnvlist_remove(spa->spa_spares.sav_config,
6753eda14cbcSMatt Macy 			    ZPOOL_CONFIG_SPARES);
6754eda14cbcSMatt Macy 		else
6755eda14cbcSMatt Macy 			spa->spa_spares.sav_config = fnvlist_alloc();
6756eda14cbcSMatt Macy 		fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
6757eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
6758eda14cbcSMatt Macy 		    nspares);
6759eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6760eda14cbcSMatt Macy 		spa_load_spares(spa);
6761eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6762eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
6763eda14cbcSMatt Macy 	}
6764eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6765eda14cbcSMatt Macy 	    &l2cache, &nl2cache) == 0) {
6766eda14cbcSMatt Macy 		if (spa->spa_l2cache.sav_config)
6767eda14cbcSMatt Macy 			fnvlist_remove(spa->spa_l2cache.sav_config,
6768eda14cbcSMatt Macy 			    ZPOOL_CONFIG_L2CACHE);
6769eda14cbcSMatt Macy 		else
6770eda14cbcSMatt Macy 			spa->spa_l2cache.sav_config = fnvlist_alloc();
6771eda14cbcSMatt Macy 		fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
6772eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
6773eda14cbcSMatt Macy 		    nl2cache);
6774eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6775eda14cbcSMatt Macy 		spa_load_l2cache(spa);
6776eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6777eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
6778eda14cbcSMatt Macy 	}
6779eda14cbcSMatt Macy 
6780eda14cbcSMatt Macy 	/*
6781eda14cbcSMatt Macy 	 * Check for any removed devices.
6782eda14cbcSMatt Macy 	 */
6783e639e0d2SMartin Matuska 	if (spa->spa_autoreplace) {
6784e639e0d2SMartin Matuska 		spa_aux_check_removed(&spa->spa_spares);
6785e639e0d2SMartin Matuska 		spa_aux_check_removed(&spa->spa_l2cache);
6786e639e0d2SMartin Matuska 	}
6787e639e0d2SMartin Matuska 
6788e639e0d2SMartin Matuska 	if (spa_writeable(spa)) {
6789e639e0d2SMartin Matuska 		/*
6790e639e0d2SMartin Matuska 		 * Update the config cache to include the newly-imported pool.
6791e639e0d2SMartin Matuska 		 */
6792e639e0d2SMartin Matuska 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6793eda14cbcSMatt Macy 	}
6794eda14cbcSMatt Macy 
6795eda14cbcSMatt Macy 	/*
6796eda14cbcSMatt Macy 	 * It's possible that the pool was expanded while it was exported.
6797eda14cbcSMatt Macy 	 * We kick off an async task to handle this for us.
6798eda14cbcSMatt Macy 	 */
6799eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
680081b22a98SMartin Matuska 
680181b22a98SMartin Matuska 	spa_history_log_version(spa, "import", NULL);
680281b22a98SMartin Matuska 
680381b22a98SMartin Matuska 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
680481b22a98SMartin Matuska 
680581b22a98SMartin Matuska 	mutex_exit(&spa_namespace_lock);
680681b22a98SMartin Matuska 
680781b22a98SMartin Matuska 	zvol_create_minors_recursive(pool);
6808eda14cbcSMatt Macy 
6809eda14cbcSMatt Macy 	spa_import_os(spa);
6810eda14cbcSMatt Macy 
6811eda14cbcSMatt Macy 	return (0);
6812eda14cbcSMatt Macy }
6813eda14cbcSMatt Macy 
6814eda14cbcSMatt Macy nvlist_t *
6815eda14cbcSMatt Macy spa_tryimport(nvlist_t *tryconfig)
6816eda14cbcSMatt Macy {
6817eda14cbcSMatt Macy 	nvlist_t *config = NULL;
6818eda14cbcSMatt Macy 	const char *poolname, *cachefile;
6819eda14cbcSMatt Macy 	spa_t *spa;
6820eda14cbcSMatt Macy 	uint64_t state;
6821eda14cbcSMatt Macy 	int error;
6822eda14cbcSMatt Macy 	zpool_load_policy_t policy;
6823eda14cbcSMatt Macy 
6824eda14cbcSMatt Macy 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
6825eda14cbcSMatt Macy 		return (NULL);
6826eda14cbcSMatt Macy 
6827eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
6828eda14cbcSMatt Macy 		return (NULL);
6829eda14cbcSMatt Macy 
6830eda14cbcSMatt Macy 	/*
6831eda14cbcSMatt Macy 	 * Create and initialize the spa structure.
6832eda14cbcSMatt Macy 	 */
6833eda14cbcSMatt Macy 	char *name = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6834eda14cbcSMatt Macy 	(void) snprintf(name, MAXPATHLEN, "%s-%llx-%s",
6835eda14cbcSMatt Macy 	    TRYIMPORT_NAME, (u_longlong_t)curthread, poolname);
683681b22a98SMartin Matuska 
683781b22a98SMartin Matuska 	mutex_enter(&spa_namespace_lock);
6838eda14cbcSMatt Macy 	spa = spa_add(name, tryconfig, NULL);
6839eda14cbcSMatt Macy 	spa_activate(spa, SPA_MODE_READ);
6840eda14cbcSMatt Macy 	kmem_free(name, MAXPATHLEN);
6841eda14cbcSMatt Macy 
6842eda14cbcSMatt Macy 	/*
6843eda14cbcSMatt Macy 	 * Rewind pool if a max txg was provided.
6844eda14cbcSMatt Macy 	 */
6845eda14cbcSMatt Macy 	zpool_get_load_policy(spa->spa_config, &policy);
6846eda14cbcSMatt Macy 	if (policy.zlp_txg != UINT64_MAX) {
6847eda14cbcSMatt Macy 		spa->spa_load_max_txg = policy.zlp_txg;
6848eda14cbcSMatt Macy 		spa->spa_extreme_rewind = B_TRUE;
6849eda14cbcSMatt Macy 		zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
6850eda14cbcSMatt Macy 		    poolname, (longlong_t)policy.zlp_txg);
6851eda14cbcSMatt Macy 	} else {
6852eda14cbcSMatt Macy 		zfs_dbgmsg("spa_tryimport: importing %s", poolname);
6853eda14cbcSMatt Macy 	}
6854eda14cbcSMatt Macy 
6855eda14cbcSMatt Macy 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
6856eda14cbcSMatt Macy 	    == 0) {
6857eda14cbcSMatt Macy 		zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
6858eda14cbcSMatt Macy 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
6859eda14cbcSMatt Macy 	} else {
6860eda14cbcSMatt Macy 		spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
6861eda14cbcSMatt Macy 	}
6862eda14cbcSMatt Macy 
6863eda14cbcSMatt Macy 	/*
6864eda14cbcSMatt Macy 	 * spa_import() relies on a pool config fetched by spa_try_import()
6865eda14cbcSMatt Macy 	 * for spare/cache devices. Import flags are not passed to
6866eda14cbcSMatt Macy 	 * spa_tryimport(), which makes it return early due to a missing log
6867eda14cbcSMatt Macy 	 * device and missing retrieving the cache device and spare eventually.
6868eda14cbcSMatt Macy 	 * Passing ZFS_IMPORT_MISSING_LOG to spa_tryimport() makes it fetch
6869eda14cbcSMatt Macy 	 * the correct configuration regardless of the missing log device.
6870180f8225SMatt Macy 	 */
6871eda14cbcSMatt Macy 	spa->spa_import_flags |= ZFS_IMPORT_MISSING_LOG;
6872eda14cbcSMatt Macy 
6873184c1b94SMartin Matuska 	error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
6874eda14cbcSMatt Macy 
6875eda14cbcSMatt Macy 	/*
6876eda14cbcSMatt Macy 	 * If 'tryconfig' was at least parsable, return the current config.
6877eda14cbcSMatt Macy 	 */
6878eda14cbcSMatt Macy 	if (spa->spa_root_vdev != NULL) {
6879eda14cbcSMatt Macy 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
6880eda14cbcSMatt Macy 		fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, poolname);
6881eda14cbcSMatt Macy 		fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state);
6882eda14cbcSMatt Macy 		fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
6883eda14cbcSMatt Macy 		    spa->spa_uberblock.ub_timestamp);
6884eda14cbcSMatt Macy 		fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
6885eda14cbcSMatt Macy 		    spa->spa_load_info);
6886eda14cbcSMatt Macy 		fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
6887eda14cbcSMatt Macy 		    spa->spa_errata);
6888eda14cbcSMatt Macy 
6889eda14cbcSMatt Macy 		/*
6890eda14cbcSMatt Macy 		 * If the bootfs property exists on this pool then we
6891eda14cbcSMatt Macy 		 * copy it out so that external consumers can tell which
6892eda14cbcSMatt Macy 		 * pools are bootable.
6893eda14cbcSMatt Macy 		 */
6894eda14cbcSMatt Macy 		if ((!error || error == EEXIST) && spa->spa_bootfs) {
6895eda14cbcSMatt Macy 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6896eda14cbcSMatt Macy 
6897eda14cbcSMatt Macy 			/*
6898eda14cbcSMatt Macy 			 * We have to play games with the name since the
6899eda14cbcSMatt Macy 			 * pool was opened as TRYIMPORT_NAME.
6900eda14cbcSMatt Macy 			 */
6901eda14cbcSMatt Macy 			if (dsl_dsobj_to_dsname(spa_name(spa),
6902eda14cbcSMatt Macy 			    spa->spa_bootfs, tmpname) == 0) {
6903eda14cbcSMatt Macy 				char *cp;
6904eda14cbcSMatt Macy 				char *dsname;
6905eda14cbcSMatt Macy 
6906eda14cbcSMatt Macy 				dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6907eda14cbcSMatt Macy 
6908eda14cbcSMatt Macy 				cp = strchr(tmpname, '/');
6909eda14cbcSMatt Macy 				if (cp == NULL) {
6910eda14cbcSMatt Macy 					(void) strlcpy(dsname, tmpname,
6911eda14cbcSMatt Macy 					    MAXPATHLEN);
6912eda14cbcSMatt Macy 				} else {
6913eda14cbcSMatt Macy 					(void) snprintf(dsname, MAXPATHLEN,
6914eda14cbcSMatt Macy 					    "%s/%s", poolname, ++cp);
6915eda14cbcSMatt Macy 				}
6916eda14cbcSMatt Macy 				fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS,
6917eda14cbcSMatt Macy 				    dsname);
6918eda14cbcSMatt Macy 				kmem_free(dsname, MAXPATHLEN);
6919eda14cbcSMatt Macy 			}
6920eda14cbcSMatt Macy 			kmem_free(tmpname, MAXPATHLEN);
6921eda14cbcSMatt Macy 		}
6922eda14cbcSMatt Macy 
6923eda14cbcSMatt Macy 		/*
6924eda14cbcSMatt Macy 		 * Add the list of hot spares and level 2 cache devices.
6925eda14cbcSMatt Macy 		 */
6926184c1b94SMartin Matuska 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6927184c1b94SMartin Matuska 		spa_add_spares(spa, config);
6928184c1b94SMartin Matuska 		spa_add_l2cache(spa, config);
6929eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
6930eda14cbcSMatt Macy 	}
6931eda14cbcSMatt Macy 
6932be181ee2SMartin Matuska 	spa_unload(spa);
6933eda14cbcSMatt Macy 	spa_deactivate(spa);
6934eda14cbcSMatt Macy 	spa_remove(spa);
6935eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
6936eda14cbcSMatt Macy 
6937eda14cbcSMatt Macy 	return (config);
6938eda14cbcSMatt Macy }
6939eda14cbcSMatt Macy 
6940eda14cbcSMatt Macy /*
6941184c1b94SMartin Matuska  * Pool export/destroy
6942184c1b94SMartin Matuska  *
6943eda14cbcSMatt Macy  * The act of destroying or exporting a pool is very simple.  We make sure there
6944eda14cbcSMatt Macy  * is no more pending I/O and any references to the pool are gone.  Then, we
6945eda14cbcSMatt Macy  * update the pool state and sync all the labels to disk, removing the
6946eda14cbcSMatt Macy  * configuration from the cache afterwards. If the 'hardforce' flag is set, then
6947eda14cbcSMatt Macy  * we don't sync the labels or remove the configuration cache.
6948eda14cbcSMatt Macy  */
6949eda14cbcSMatt Macy static int
6950eda14cbcSMatt Macy spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
6951eda14cbcSMatt Macy     boolean_t force, boolean_t hardforce)
6952eda14cbcSMatt Macy {
6953eda14cbcSMatt Macy 	int error;
6954eda14cbcSMatt Macy 	spa_t *spa;
6955eda14cbcSMatt Macy 	hrtime_t export_start = gethrtime();
6956eda14cbcSMatt Macy 
6957eda14cbcSMatt Macy 	if (oldconfig)
6958eda14cbcSMatt Macy 		*oldconfig = NULL;
6959eda14cbcSMatt Macy 
6960eda14cbcSMatt Macy 	if (!(spa_mode_global & SPA_MODE_WRITE))
6961eda14cbcSMatt Macy 		return (SET_ERROR(EROFS));
6962eda14cbcSMatt Macy 
6963eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6964eda14cbcSMatt Macy 	if ((spa = spa_lookup(pool)) == NULL) {
6965be181ee2SMartin Matuska 		mutex_exit(&spa_namespace_lock);
6966c03c5b1cSMartin Matuska 		return (SET_ERROR(ENOENT));
6967c03c5b1cSMartin Matuska 	}
6968c03c5b1cSMartin Matuska 
6969c03c5b1cSMartin Matuska 	if (spa->spa_is_exporting) {
6970c03c5b1cSMartin Matuska 		/* the pool is being exported by another thread */
6971c03c5b1cSMartin Matuska 		mutex_exit(&spa_namespace_lock);
6972c03c5b1cSMartin Matuska 		return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
6973c03c5b1cSMartin Matuska 	}
6974c03c5b1cSMartin Matuska 	spa->spa_is_exporting = B_TRUE;
6975c03c5b1cSMartin Matuska 
6976c03c5b1cSMartin Matuska 	/*
6977c03c5b1cSMartin Matuska 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
6978c03c5b1cSMartin Matuska 	 * reacquire the namespace lock, and see if we can export.
6979c03c5b1cSMartin Matuska 	 */
6980c03c5b1cSMartin Matuska 	spa_open_ref(spa, FTAG);
6981c03c5b1cSMartin Matuska 	mutex_exit(&spa_namespace_lock);
6982c03c5b1cSMartin Matuska 	spa_async_suspend(spa);
6983c03c5b1cSMartin Matuska 	if (spa->spa_zvol_taskq) {
6984eda14cbcSMatt Macy 		zvol_remove_minors(spa, spa_name(spa), B_TRUE);
6985eda14cbcSMatt Macy 		taskq_wait(spa->spa_zvol_taskq);
6986eda14cbcSMatt Macy 	}
6987eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6988eda14cbcSMatt Macy 	spa_close(spa, FTAG);
6989eda14cbcSMatt Macy 
6990eda14cbcSMatt Macy 	if (spa->spa_state == POOL_STATE_UNINITIALIZED)
6991c03c5b1cSMartin Matuska 		goto export_spa;
6992c03c5b1cSMartin Matuska 	/*
6993eda14cbcSMatt Macy 	 * The pool will be in core if it's openable, in which case we can
6994eda14cbcSMatt Macy 	 * modify its state.  Objsets may be open only because they're dirty,
6995eda14cbcSMatt Macy 	 * so we have to force it to sync before checking spa_refcnt.
6996eda14cbcSMatt Macy 	 */
6997eda14cbcSMatt Macy 	if (spa->spa_sync_on) {
6998eda14cbcSMatt Macy 		txg_wait_synced(spa->spa_dsl_pool, 0);
6999eda14cbcSMatt Macy 		spa_evicting_os_wait(spa);
7000eda14cbcSMatt Macy 	}
7001eda14cbcSMatt Macy 
7002eda14cbcSMatt Macy 	/*
7003eda14cbcSMatt Macy 	 * A pool cannot be exported or destroyed if there are active
700481b22a98SMartin Matuska 	 * references.  If we are resetting a pool, allow references by
7005eda14cbcSMatt Macy 	 * fault injection handlers.
7006eda14cbcSMatt Macy 	 */
7007eda14cbcSMatt Macy 	if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
7008be181ee2SMartin Matuska 		error = SET_ERROR(EBUSY);
7009eda14cbcSMatt Macy 		goto fail;
7010eda14cbcSMatt Macy 	}
7011eda14cbcSMatt Macy 
7012eda14cbcSMatt Macy 	if (spa->spa_sync_on) {
7013eda14cbcSMatt Macy 		vdev_t *rvd = spa->spa_root_vdev;
7014eda14cbcSMatt Macy 		/*
7015eda14cbcSMatt Macy 		 * A pool cannot be exported if it has an active shared spare.
7016eda14cbcSMatt Macy 		 * This is to prevent other pools stealing the active spare
7017eda14cbcSMatt Macy 		 * from an exported pool. At user's own will, such pool can
7018eda14cbcSMatt Macy 		 * be forcedly exported.
7019eda14cbcSMatt Macy 		 */
7020eda14cbcSMatt Macy 		if (!force && new_state == POOL_STATE_EXPORTED &&
7021184c1b94SMartin Matuska 		    spa_has_active_shared_spare(spa)) {
7022184c1b94SMartin Matuska 			error = SET_ERROR(EXDEV);
7023184c1b94SMartin Matuska 			goto fail;
7024184c1b94SMartin Matuska 		}
7025184c1b94SMartin Matuska 
7026184c1b94SMartin Matuska 		/*
7027eda14cbcSMatt Macy 		 * We're about to export or destroy this pool. Make sure
7028eda14cbcSMatt Macy 		 * we stop all initialization and trim activity here before
7029eda14cbcSMatt Macy 		 * we set the spa_final_txg. This will ensure that all
7030eda14cbcSMatt Macy 		 * dirty data resulting from the initialization is
7031eda14cbcSMatt Macy 		 * committed to disk before we unload the pool.
7032eda14cbcSMatt Macy 		 */
7033180f8225SMatt Macy 		vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
7034eda14cbcSMatt Macy 		vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
7035eda14cbcSMatt Macy 		vdev_autotrim_stop_all(spa);
7036eda14cbcSMatt Macy 		vdev_rebuild_stop_all(spa);
7037eda14cbcSMatt Macy 
7038eda14cbcSMatt Macy 		/*
7039eda14cbcSMatt Macy 		 * We want this to be reflected on every label,
7040eda14cbcSMatt Macy 		 * so mark them all dirty.  spa_unload() will do the
7041eda14cbcSMatt Macy 		 * final sync that pushes these changes out.
7042eda14cbcSMatt Macy 		 */
7043180f8225SMatt Macy 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
7044eda14cbcSMatt Macy 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7045eda14cbcSMatt Macy 			spa->spa_state = new_state;
7046eda14cbcSMatt Macy 			vdev_config_dirty(rvd);
7047eda14cbcSMatt Macy 			spa_config_exit(spa, SCL_ALL, FTAG);
7048eda14cbcSMatt Macy 		}
7049eda14cbcSMatt Macy 
7050eda14cbcSMatt Macy 		/*
7051eda14cbcSMatt Macy 		 * If the log space map feature is enabled and the pool is
7052eda14cbcSMatt Macy 		 * getting exported (but not destroyed), we want to spend some
7053eda14cbcSMatt Macy 		 * time flushing as many metaslabs as we can in an attempt to
7054eda14cbcSMatt Macy 		 * destroy log space maps and save import time. This has to be
7055180f8225SMatt Macy 		 * done before we set the spa_final_txg, otherwise
7056eda14cbcSMatt Macy 		 * spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs.
7057eda14cbcSMatt Macy 		 * spa_should_flush_logs_on_unload() should be called after
7058eda14cbcSMatt Macy 		 * spa_state has been set to the new_state.
7059eda14cbcSMatt Macy 		 */
7060eda14cbcSMatt Macy 		if (spa_should_flush_logs_on_unload(spa))
7061eda14cbcSMatt Macy 			spa_unload_log_sm_flush_all(spa);
7062eda14cbcSMatt Macy 
7063eda14cbcSMatt Macy 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
7064eda14cbcSMatt Macy 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7065eda14cbcSMatt Macy 			spa->spa_final_txg = spa_last_synced_txg(spa) +
7066eda14cbcSMatt Macy 			    TXG_DEFER_SIZE + 1;
7067eda14cbcSMatt Macy 			spa_config_exit(spa, SCL_ALL, FTAG);
70687877fdebSMatt Macy 		}
70697877fdebSMatt Macy 	}
70707877fdebSMatt Macy 
70717877fdebSMatt Macy export_spa:
70727877fdebSMatt Macy 	spa_export_os(spa);
70737877fdebSMatt Macy 
70747877fdebSMatt Macy 	if (new_state == POOL_STATE_DESTROYED)
70757877fdebSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
70767877fdebSMatt Macy 	else if (new_state == POOL_STATE_EXPORTED)
70777877fdebSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
70787877fdebSMatt Macy 
70797877fdebSMatt Macy 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
70807877fdebSMatt Macy 		spa_unload(spa);
7081eda14cbcSMatt Macy 		spa_deactivate(spa);
7082eda14cbcSMatt Macy 	}
7083eda14cbcSMatt Macy 
7084eda14cbcSMatt Macy 	if (oldconfig && spa->spa_config)
7085eda14cbcSMatt Macy 		*oldconfig = fnvlist_dup(spa->spa_config);
70867877fdebSMatt Macy 
7087eda14cbcSMatt Macy 	if (new_state != POOL_STATE_UNINITIALIZED) {
7088eda14cbcSMatt Macy 		if (!hardforce)
7089eda14cbcSMatt Macy 			spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
7090eda14cbcSMatt Macy 		spa_remove(spa);
7091eda14cbcSMatt Macy 	} else {
7092eda14cbcSMatt Macy 		/*
7093eda14cbcSMatt Macy 		 * If spa_remove() is not called for this spa_t and
7094eda14cbcSMatt Macy 		 * there is any possibility that it can be reused,
7095eda14cbcSMatt Macy 		 * we make sure to reset the exporting flag.
7096eda14cbcSMatt Macy 		 */
7097eda14cbcSMatt Macy 		spa->spa_is_exporting = B_FALSE;
7098eda14cbcSMatt Macy 	}
7099eda14cbcSMatt Macy 
7100eda14cbcSMatt Macy 	if (new_state == POOL_STATE_EXPORTED)
7101eda14cbcSMatt Macy 		zio_handle_export_delay(spa, gethrtime() - export_start);
7102eda14cbcSMatt Macy 
7103eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7104eda14cbcSMatt Macy 	return (0);
7105eda14cbcSMatt Macy 
7106eda14cbcSMatt Macy fail:
7107eda14cbcSMatt Macy 	spa->spa_is_exporting = B_FALSE;
7108eda14cbcSMatt Macy 	spa_async_resume(spa);
7109eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7110eda14cbcSMatt Macy 	return (error);
7111eda14cbcSMatt Macy }
7112eda14cbcSMatt Macy 
7113eda14cbcSMatt Macy /*
7114eda14cbcSMatt Macy  * Destroy a storage pool.
71157877fdebSMatt Macy  */
7116eda14cbcSMatt Macy int
71177877fdebSMatt Macy spa_destroy(const char *pool)
71187877fdebSMatt Macy {
71197877fdebSMatt Macy 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
71207877fdebSMatt Macy 	    B_FALSE, B_FALSE));
712116038816SMartin Matuska }
71227877fdebSMatt Macy 
71237877fdebSMatt Macy /*
71247877fdebSMatt Macy  * Export a storage pool.
71257877fdebSMatt Macy  */
71267877fdebSMatt Macy int
71277877fdebSMatt Macy spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
71287877fdebSMatt Macy     boolean_t hardforce)
71297877fdebSMatt Macy {
71307877fdebSMatt Macy 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
71317877fdebSMatt Macy 	    force, hardforce));
7132eda14cbcSMatt Macy }
7133eda14cbcSMatt Macy 
7134eda14cbcSMatt Macy /*
7135eda14cbcSMatt Macy  * Similar to spa_export(), this unloads the spa_t without actually removing it
7136eda14cbcSMatt Macy  * from the namespace in any way.
7137eda14cbcSMatt Macy  */
7138eda14cbcSMatt Macy int
7139eda14cbcSMatt Macy spa_reset(const char *pool)
7140eda14cbcSMatt Macy {
7141eda14cbcSMatt Macy 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
7142eda14cbcSMatt Macy 	    B_FALSE, B_FALSE));
7143eda14cbcSMatt Macy }
71447877fdebSMatt Macy 
7145eda14cbcSMatt Macy /*
7146eda14cbcSMatt Macy  * ==========================================================================
7147eda14cbcSMatt Macy  * Device manipulation
7148eda14cbcSMatt Macy  * ==========================================================================
7149eda14cbcSMatt Macy  */
7150eda14cbcSMatt Macy 
7151eda14cbcSMatt Macy /*
7152eda14cbcSMatt Macy  * This is called as a synctask to increment the draid feature flag
7153eda14cbcSMatt Macy  */
71547877fdebSMatt Macy static void
71557877fdebSMatt Macy spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
7156eda14cbcSMatt Macy {
71577877fdebSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
7158eda14cbcSMatt Macy 	int draid = (int)(uintptr_t)arg;
7159eda14cbcSMatt Macy 
7160eda14cbcSMatt Macy 	for (int c = 0; c < draid; c++)
7161eda14cbcSMatt Macy 		spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
7162eda14cbcSMatt Macy }
7163eda14cbcSMatt Macy 
7164eda14cbcSMatt Macy /*
7165eda14cbcSMatt Macy  * Add a device to a storage pool.
7166eda14cbcSMatt Macy  */
7167eda14cbcSMatt Macy int
7168eda14cbcSMatt Macy spa_vdev_add(spa_t *spa, nvlist_t *nvroot, boolean_t check_ashift)
7169eda14cbcSMatt Macy {
7170eda14cbcSMatt Macy 	uint64_t txg, ndraid = 0;
7171eda14cbcSMatt Macy 	int error;
7172eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
7173eda14cbcSMatt Macy 	vdev_t *vd, *tvd;
7174eda14cbcSMatt Macy 	nvlist_t **spares, **l2cache;
7175eda14cbcSMatt Macy 	uint_t nspares, nl2cache;
7176eda14cbcSMatt Macy 
7177eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
7178eda14cbcSMatt Macy 
7179eda14cbcSMatt Macy 	txg = spa_vdev_enter(spa);
7180eda14cbcSMatt Macy 
7181eda14cbcSMatt Macy 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
7182eda14cbcSMatt Macy 	    VDEV_ALLOC_ADD)) != 0)
7183eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
7184eda14cbcSMatt Macy 
7185eda14cbcSMatt Macy 	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
7186eda14cbcSMatt Macy 
7187eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
7188eda14cbcSMatt Macy 	    &nspares) != 0)
7189eda14cbcSMatt Macy 		nspares = 0;
7190eda14cbcSMatt Macy 
7191eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
7192eda14cbcSMatt Macy 	    &nl2cache) != 0)
7193eda14cbcSMatt Macy 		nl2cache = 0;
7194eda14cbcSMatt Macy 
7195eda14cbcSMatt Macy 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
7196eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
7197eda14cbcSMatt Macy 
71987877fdebSMatt Macy 	if (vd->vdev_children != 0 &&
71997877fdebSMatt Macy 	    (error = vdev_create(vd, txg, B_FALSE)) != 0) {
72007877fdebSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, error));
72017877fdebSMatt Macy 	}
72027877fdebSMatt Macy 
72037877fdebSMatt Macy 	/*
72047877fdebSMatt Macy 	 * The virtual dRAID spares must be added after vdev tree is created
72057877fdebSMatt Macy 	 * and the vdev guids are generated.  The guid of their associated
72067877fdebSMatt Macy 	 * dRAID is stored in the config and used when opening the spare.
72077877fdebSMatt Macy 	 */
72087877fdebSMatt Macy 	if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
72097877fdebSMatt Macy 	    rvd->vdev_children)) == 0) {
72107877fdebSMatt Macy 		if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
7211eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
7212eda14cbcSMatt Macy 			nspares = 0;
7213eda14cbcSMatt Macy 	} else {
7214eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, error));
7215eda14cbcSMatt Macy 	}
7216eda14cbcSMatt Macy 
7217eda14cbcSMatt Macy 	/*
7218eda14cbcSMatt Macy 	 * We must validate the spares and l2cache devices after checking the
7219eda14cbcSMatt Macy 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
7220eda14cbcSMatt Macy 	 */
7221eda14cbcSMatt Macy 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
7222eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, error));
7223eda14cbcSMatt Macy 
7224eda14cbcSMatt Macy 	/*
7225eda14cbcSMatt Macy 	 * If we are in the middle of a device removal, we can only add
7226eda14cbcSMatt Macy 	 * devices which match the existing devices in the pool.
7227eda14cbcSMatt Macy 	 * If we are in the middle of a removal, or have some indirect
7228eda14cbcSMatt Macy 	 * vdevs, we can not add raidz or dRAID top levels.
7229eda14cbcSMatt Macy 	 */
7230eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL ||
7231eda14cbcSMatt Macy 	    spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
7232eda14cbcSMatt Macy 		for (int c = 0; c < vd->vdev_children; c++) {
7233eda14cbcSMatt Macy 			tvd = vd->vdev_child[c];
7234e716630dSMartin Matuska 			if (spa->spa_vdev_removal != NULL &&
7235e716630dSMartin Matuska 			    tvd->vdev_ashift != spa->spa_max_ashift) {
7236e716630dSMartin Matuska 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
7237e716630dSMartin Matuska 			}
7238eda14cbcSMatt Macy 			/* Fail if top level vdev is raidz or a dRAID */
7239eda14cbcSMatt Macy 			if (vdev_get_nparity(tvd) != 0)
7240eda14cbcSMatt Macy 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
7241eda14cbcSMatt Macy 
7242eda14cbcSMatt Macy 			/*
7243eda14cbcSMatt Macy 			 * Need the top level mirror to be
7244eda14cbcSMatt Macy 			 * a mirror of leaf vdevs only
7245eda14cbcSMatt Macy 			 */
7246eda14cbcSMatt Macy 			if (tvd->vdev_ops == &vdev_mirror_ops) {
7247eda14cbcSMatt Macy 				for (uint64_t cid = 0;
7248eda14cbcSMatt Macy 				    cid < tvd->vdev_children; cid++) {
7249eda14cbcSMatt Macy 					vdev_t *cvd = tvd->vdev_child[cid];
7250eda14cbcSMatt Macy 					if (!cvd->vdev_ops->vdev_op_leaf) {
7251eda14cbcSMatt Macy 						return (spa_vdev_exit(spa, vd,
7252eda14cbcSMatt Macy 						    txg, EINVAL));
7253eda14cbcSMatt Macy 					}
7254eda14cbcSMatt Macy 				}
7255eda14cbcSMatt Macy 			}
7256eda14cbcSMatt Macy 		}
7257eda14cbcSMatt Macy 	}
7258eda14cbcSMatt Macy 
7259eda14cbcSMatt Macy 	if (check_ashift && spa->spa_max_ashift == spa->spa_min_ashift) {
7260e716630dSMartin Matuska 		for (int c = 0; c < vd->vdev_children; c++) {
7261eda14cbcSMatt Macy 			tvd = vd->vdev_child[c];
7262eda14cbcSMatt Macy 			if (tvd->vdev_ashift != spa->spa_max_ashift) {
7263eda14cbcSMatt Macy 				return (spa_vdev_exit(spa, vd, txg,
7264eda14cbcSMatt Macy 				    ZFS_ERR_ASHIFT_MISMATCH));
7265eda14cbcSMatt Macy 			}
7266eda14cbcSMatt Macy 		}
7267eda14cbcSMatt Macy 	}
7268eda14cbcSMatt Macy 
7269eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
7270eda14cbcSMatt Macy 		tvd = vd->vdev_child[c];
7271eda14cbcSMatt Macy 		vdev_remove_child(vd, tvd);
7272eda14cbcSMatt Macy 		tvd->vdev_id = rvd->vdev_children;
7273eda14cbcSMatt Macy 		vdev_add_child(rvd, tvd);
7274eda14cbcSMatt Macy 		vdev_config_dirty(tvd);
7275eda14cbcSMatt Macy 	}
7276eda14cbcSMatt Macy 
7277eda14cbcSMatt Macy 	if (nspares != 0) {
7278eda14cbcSMatt Macy 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
7279eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES);
72804e8d558cSMartin Matuska 		spa_load_spares(spa);
72814e8d558cSMartin Matuska 		spa->spa_spares.sav_sync = B_TRUE;
7282eda14cbcSMatt Macy 	}
7283eda14cbcSMatt Macy 
72844e8d558cSMartin Matuska 	if (nl2cache != 0) {
7285eda14cbcSMatt Macy 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
7286eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE);
7287eda14cbcSMatt Macy 		spa_load_l2cache(spa);
7288eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
7289eda14cbcSMatt Macy 	}
7290eda14cbcSMatt Macy 
7291e716630dSMartin Matuska 	/*
7292e716630dSMartin Matuska 	 * We can't increment a feature while holding spa_vdev so we
7293e716630dSMartin Matuska 	 * have to do it in a synctask.
7294e716630dSMartin Matuska 	 */
7295eda14cbcSMatt Macy 	if (ndraid != 0) {
7296eda14cbcSMatt Macy 		dmu_tx_t *tx;
7297eda14cbcSMatt Macy 
7298eda14cbcSMatt Macy 		tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
7299e716630dSMartin Matuska 		dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
7300e716630dSMartin Matuska 		    (void *)(uintptr_t)ndraid, tx);
7301e716630dSMartin Matuska 		dmu_tx_commit(tx);
7302e716630dSMartin Matuska 	}
7303eda14cbcSMatt Macy 
7304eda14cbcSMatt Macy 	/*
7305e716630dSMartin Matuska 	 * We have to be careful when adding new vdevs to an existing pool.
7306e716630dSMartin Matuska 	 * If other threads start allocating from these vdevs before we
7307e716630dSMartin Matuska 	 * sync the config cache, and we lose power, then upon reboot we may
7308e716630dSMartin Matuska 	 * fail to open the pool because there are DVAs that the config cache
7309e716630dSMartin Matuska 	 * can't translate.  Therefore, we first add the vdevs without
7310e716630dSMartin Matuska 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
7311e716630dSMartin Matuska 	 * and then let spa_config_update() initialize the new metaslabs.
7312e716630dSMartin Matuska 	 *
7313e716630dSMartin Matuska 	 * spa_load() checks for added-but-not-initialized vdevs, so that
7314e716630dSMartin Matuska 	 * if we lose power at any point in this sequence, the remaining
7315e716630dSMartin Matuska 	 * steps will be completed the next time we load the pool.
7316e716630dSMartin Matuska 	 */
7317e716630dSMartin Matuska 	(void) spa_vdev_exit(spa, vd, txg, 0);
7318e716630dSMartin Matuska 
7319eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
7320eda14cbcSMatt Macy 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
7321dbd5678dSMartin Matuska 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
7322dbd5678dSMartin Matuska 	mutex_exit(&spa_namespace_lock);
7323eda14cbcSMatt Macy 
7324eda14cbcSMatt Macy 	return (0);
7325eda14cbcSMatt Macy }
7326eda14cbcSMatt Macy 
7327eda14cbcSMatt Macy /*
7328eda14cbcSMatt Macy  * Attach a device to a vdev specified by its guid.  The vdev type can be
7329eda14cbcSMatt Macy  * a mirror, a raidz, or a leaf device that is also a top-level (e.g. a
7330eda14cbcSMatt Macy  * single device). When the vdev is a single device, a mirror vdev will be
7331eda14cbcSMatt Macy  * automatically inserted.
7332eda14cbcSMatt Macy  *
7333eda14cbcSMatt Macy  * If 'replacing' is specified, the new device is intended to replace the
7334eda14cbcSMatt Macy  * existing device; in this case the two devices are made into their own
7335eda14cbcSMatt Macy  * mirror using the 'replacing' vdev, which is functionally identical to
7336eda14cbcSMatt Macy  * the mirror vdev (it actually reuses all the same ops) but has a few
7337dbd5678dSMartin Matuska  * extra rules: you can't attach to it after it's been created, and upon
7338eda14cbcSMatt Macy  * completion of resilvering, the first disk (the one being replaced)
7339dbd5678dSMartin Matuska  * is automatically detached.
7340dbd5678dSMartin Matuska  *
7341eda14cbcSMatt Macy  * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
7342dbd5678dSMartin Matuska  * should be performed instead of traditional healing reconstruction.  From
7343eda14cbcSMatt Macy  * an administrators perspective these are both resilver operations.
73447877fdebSMatt Macy  */
73457877fdebSMatt Macy int
73467877fdebSMatt Macy spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
73477877fdebSMatt Macy     int rebuild)
73487877fdebSMatt Macy {
73497877fdebSMatt Macy 	uint64_t txg, dtl_max_txg;
73507877fdebSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
73517877fdebSMatt Macy 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
7352eda14cbcSMatt Macy 	vdev_ops_t *pvops;
7353eda14cbcSMatt Macy 	char *oldvdpath, *newvdpath;
73547877fdebSMatt Macy 	int newvd_isspare = B_FALSE;
7355eda14cbcSMatt Macy 	int error;
73567877fdebSMatt Macy 
7357eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
73587877fdebSMatt Macy 
73597877fdebSMatt Macy 	txg = spa_vdev_enter(spa);
73607877fdebSMatt Macy 
73617877fdebSMatt Macy 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
73627877fdebSMatt Macy 
73637877fdebSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
73647877fdebSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
7365eda14cbcSMatt Macy 		error = (spa_has_checkpoint(spa)) ?
7366eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
7367eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
7368eda14cbcSMatt Macy 	}
7369eda14cbcSMatt Macy 
7370eda14cbcSMatt Macy 	if (rebuild) {
73712276e539SMartin Matuska 		if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
73722276e539SMartin Matuska 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
73732276e539SMartin Matuska 
7374eda14cbcSMatt Macy 		if (dsl_scan_resilvering(spa_get_dsl(spa)) ||
7375eda14cbcSMatt Macy 		    dsl_scan_resilver_scheduled(spa_get_dsl(spa))) {
73762276e539SMartin Matuska 			return (spa_vdev_exit(spa, NULL, txg,
73772276e539SMartin Matuska 			    ZFS_ERR_RESILVER_IN_PROGRESS));
7378eda14cbcSMatt Macy 		}
7379eda14cbcSMatt Macy 	} else {
7380eda14cbcSMatt Macy 		if (vdev_rebuild_active(rvd))
7381eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg,
7382eda14cbcSMatt Macy 			    ZFS_ERR_REBUILD_IN_PROGRESS));
7383eda14cbcSMatt Macy 	}
7384eda14cbcSMatt Macy 
7385eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL) {
7386eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg,
7387eda14cbcSMatt Macy 		    ZFS_ERR_DEVRM_IN_PROGRESS));
7388eda14cbcSMatt Macy 	}
7389eda14cbcSMatt Macy 
7390eda14cbcSMatt Macy 	if (oldvd == NULL)
7391eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
7392eda14cbcSMatt Macy 
7393eda14cbcSMatt Macy 	boolean_t raidz = oldvd->vdev_ops == &vdev_raidz_ops;
7394eda14cbcSMatt Macy 
7395eda14cbcSMatt Macy 	if (raidz) {
7396eda14cbcSMatt Macy 		if (!spa_feature_is_enabled(spa, SPA_FEATURE_RAIDZ_EXPANSION))
7397eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7398eda14cbcSMatt Macy 
7399eda14cbcSMatt Macy 		/*
7400eda14cbcSMatt Macy 		 * Can't expand a raidz while prior expand is in progress.
7401eda14cbcSMatt Macy 		 */
7402eda14cbcSMatt Macy 		if (spa->spa_raidz_expand != NULL) {
7403eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg,
7404eda14cbcSMatt Macy 			    ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS));
7405eda14cbcSMatt Macy 		}
7406eda14cbcSMatt Macy 	} else if (!oldvd->vdev_ops->vdev_op_leaf) {
7407eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7408eda14cbcSMatt Macy 	}
7409eda14cbcSMatt Macy 
7410eda14cbcSMatt Macy 	if (raidz)
7411eda14cbcSMatt Macy 		pvd = oldvd;
7412eda14cbcSMatt Macy 	else
7413eda14cbcSMatt Macy 		pvd = oldvd->vdev_parent;
7414eda14cbcSMatt Macy 
7415eda14cbcSMatt Macy 	if (spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
7416e716630dSMartin Matuska 	    VDEV_ALLOC_ATTACH) != 0)
7417e716630dSMartin Matuska 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7418eda14cbcSMatt Macy 
7419eda14cbcSMatt Macy 	if (newrootvd->vdev_children != 1)
7420eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
7421eda14cbcSMatt Macy 
7422eda14cbcSMatt Macy 	newvd = newrootvd->vdev_child[0];
7423eda14cbcSMatt Macy 
7424eda14cbcSMatt Macy 	if (!newvd->vdev_ops->vdev_op_leaf)
7425eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
7426eda14cbcSMatt Macy 
7427eda14cbcSMatt Macy 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
7428e716630dSMartin Matuska 		return (spa_vdev_exit(spa, newrootvd, txg, error));
7429e716630dSMartin Matuska 
7430e716630dSMartin Matuska 	/*
7431e716630dSMartin Matuska 	 * log, dedup and special vdevs should not be replaced by spares.
7432e716630dSMartin Matuska 	 */
7433e716630dSMartin Matuska 	if ((oldvd->vdev_top->vdev_alloc_bias != VDEV_BIAS_NONE ||
7434e716630dSMartin Matuska 	    oldvd->vdev_top->vdev_islog) && newvd->vdev_isspare) {
7435e716630dSMartin Matuska 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7436e716630dSMartin Matuska 	}
7437e716630dSMartin Matuska 
7438e716630dSMartin Matuska 	/*
7439e716630dSMartin Matuska 	 * A dRAID spare can only replace a child of its parent dRAID vdev.
7440e716630dSMartin Matuska 	 */
7441e716630dSMartin Matuska 	if (newvd->vdev_ops == &vdev_draid_spare_ops &&
7442e716630dSMartin Matuska 	    oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
7443e716630dSMartin Matuska 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7444e716630dSMartin Matuska 	}
7445e716630dSMartin Matuska 
7446e716630dSMartin Matuska 	if (rebuild) {
7447e716630dSMartin Matuska 		/*
7448e716630dSMartin Matuska 		 * For rebuilds, the top vdev must support reconstruction
7449e716630dSMartin Matuska 		 * using only space maps.  This means the only allowable
7450e716630dSMartin Matuska 		 * vdevs types are the root vdev, a mirror, or dRAID.
7451e716630dSMartin Matuska 		 */
7452e716630dSMartin Matuska 		tvd = pvd;
7453e716630dSMartin Matuska 		if (pvd->vdev_top != NULL)
7454e716630dSMartin Matuska 			tvd = pvd->vdev_top;
7455e716630dSMartin Matuska 
7456e716630dSMartin Matuska 		if (tvd->vdev_ops != &vdev_mirror_ops &&
7457e716630dSMartin Matuska 		    tvd->vdev_ops != &vdev_root_ops &&
7458e716630dSMartin Matuska 		    tvd->vdev_ops != &vdev_draid_ops) {
7459e716630dSMartin Matuska 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7460e716630dSMartin Matuska 		}
7461e716630dSMartin Matuska 	}
7462e716630dSMartin Matuska 
7463e716630dSMartin Matuska 	if (!replacing) {
7464e716630dSMartin Matuska 		/*
7465e716630dSMartin Matuska 		 * For attach, the only allowable parent is a mirror or
7466e716630dSMartin Matuska 		 * the root vdev. A raidz vdev can be attached to, but
7467e716630dSMartin Matuska 		 * you cannot attach to a raidz child.
7468eda14cbcSMatt Macy 		 */
7469eda14cbcSMatt Macy 		if (pvd->vdev_ops != &vdev_mirror_ops &&
7470eda14cbcSMatt Macy 		    pvd->vdev_ops != &vdev_root_ops &&
7471e716630dSMartin Matuska 		    !raidz)
7472eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7473e716630dSMartin Matuska 
7474eda14cbcSMatt Macy 		pvops = &vdev_mirror_ops;
7475e716630dSMartin Matuska 	} else {
7476e716630dSMartin Matuska 		/*
7477eda14cbcSMatt Macy 		 * Active hot spares can only be replaced by inactive hot
7478eda14cbcSMatt Macy 		 * spares.
7479eda14cbcSMatt Macy 		 */
7480eda14cbcSMatt Macy 		if (pvd->vdev_ops == &vdev_spare_ops &&
7481e716630dSMartin Matuska 		    oldvd->vdev_isspare &&
7482e716630dSMartin Matuska 		    !spa_has_spare(spa, newvd->vdev_guid))
7483eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7484eda14cbcSMatt Macy 
7485eda14cbcSMatt Macy 		/*
7486eda14cbcSMatt Macy 		 * If the source is a hot spare, and the parent isn't already a
7487eda14cbcSMatt Macy 		 * spare, then we want to create a new hot spare.  Otherwise, we
7488eda14cbcSMatt Macy 		 * want to create a replacing vdev.  The user is not allowed to
7489e716630dSMartin Matuska 		 * attach to a spared vdev child unless the 'isspare' state is
7490eda14cbcSMatt Macy 		 * the same (spare replaces spare, non-spare replaces
7491eda14cbcSMatt Macy 		 * non-spare).
7492eda14cbcSMatt Macy 		 */
7493e716630dSMartin Matuska 		if (pvd->vdev_ops == &vdev_replacing_ops &&
7494e716630dSMartin Matuska 		    spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
7495e716630dSMartin Matuska 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7496eda14cbcSMatt Macy 		} else if (pvd->vdev_ops == &vdev_spare_ops &&
7497eda14cbcSMatt Macy 		    newvd->vdev_isspare != oldvd->vdev_isspare) {
7498eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7499eda14cbcSMatt Macy 		}
7500eda14cbcSMatt Macy 
7501eda14cbcSMatt Macy 		if (newvd->vdev_isspare)
7502eda14cbcSMatt Macy 			pvops = &vdev_spare_ops;
7503eda14cbcSMatt Macy 		else
7504eda14cbcSMatt Macy 			pvops = &vdev_replacing_ops;
7505eda14cbcSMatt Macy 	}
7506eda14cbcSMatt Macy 
7507eda14cbcSMatt Macy 	/*
7508eda14cbcSMatt Macy 	 * Make sure the new device is big enough.
7509eda14cbcSMatt Macy 	 */
7510eda14cbcSMatt Macy 	vdev_t *min_vdev = raidz ? oldvd->vdev_child[0] : oldvd;
7511eda14cbcSMatt Macy 	if (newvd->vdev_asize < vdev_get_min_asize(min_vdev))
7512eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
7513eda14cbcSMatt Macy 
7514eda14cbcSMatt Macy 	/*
7515eda14cbcSMatt Macy 	 * The new device cannot have a higher alignment requirement
7516eda14cbcSMatt Macy 	 * than the top-level vdev.
7517eda14cbcSMatt Macy 	 */
7518eda14cbcSMatt Macy 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
7519eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7520eda14cbcSMatt Macy 
7521eda14cbcSMatt Macy 	/*
7522eda14cbcSMatt Macy 	 * RAIDZ-expansion-specific checks.
7523e716630dSMartin Matuska 	 */
7524e716630dSMartin Matuska 	if (raidz) {
7525e716630dSMartin Matuska 		if (vdev_raidz_attach_check(newvd) != 0)
7526e716630dSMartin Matuska 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7527e716630dSMartin Matuska 
7528e716630dSMartin Matuska 		/*
7529e716630dSMartin Matuska 		 * Fail early if a child is not healthy or being replaced
7530e716630dSMartin Matuska 		 */
7531e716630dSMartin Matuska 		for (int i = 0; i < oldvd->vdev_children; i++) {
7532e716630dSMartin Matuska 			if (vdev_is_dead(oldvd->vdev_child[i]) ||
7533e716630dSMartin Matuska 			    !oldvd->vdev_child[i]->vdev_ops->vdev_op_leaf) {
7534e716630dSMartin Matuska 				return (spa_vdev_exit(spa, newrootvd, txg,
7535e716630dSMartin Matuska 				    ENXIO));
7536e716630dSMartin Matuska 			}
7537e716630dSMartin Matuska 			/* Also fail if reserved boot area is in-use */
7538e716630dSMartin Matuska 			if (vdev_check_boot_reserve(spa, oldvd->vdev_child[i])
7539e716630dSMartin Matuska 			    != 0) {
7540e716630dSMartin Matuska 				return (spa_vdev_exit(spa, newrootvd, txg,
7541e716630dSMartin Matuska 				    EADDRINUSE));
7542e716630dSMartin Matuska 			}
7543e716630dSMartin Matuska 		}
7544e716630dSMartin Matuska 	}
7545e716630dSMartin Matuska 
7546e716630dSMartin Matuska 	if (raidz) {
7547e716630dSMartin Matuska 		/*
7548e716630dSMartin Matuska 		 * Note: oldvdpath is freed by spa_strfree(),  but
7549e716630dSMartin Matuska 		 * kmem_asprintf() is freed by kmem_strfree(), so we have to
7550eda14cbcSMatt Macy 		 * move it to a spa_strdup-ed string.
7551eda14cbcSMatt Macy 		 */
7552eda14cbcSMatt Macy 		char *tmp = kmem_asprintf("raidz%u-%u",
7553eda14cbcSMatt Macy 		    (uint_t)vdev_get_nparity(oldvd), (uint_t)oldvd->vdev_id);
7554eda14cbcSMatt Macy 		oldvdpath = spa_strdup(tmp);
7555eda14cbcSMatt Macy 		kmem_strfree(tmp);
7556eda14cbcSMatt Macy 	} else {
7557eda14cbcSMatt Macy 		oldvdpath = spa_strdup(oldvd->vdev_path);
7558eda14cbcSMatt Macy 	}
7559eda14cbcSMatt Macy 	newvdpath = spa_strdup(newvd->vdev_path);
7560eda14cbcSMatt Macy 
7561eda14cbcSMatt Macy 	/*
7562eda14cbcSMatt Macy 	 * If this is an in-place replacement, update oldvd's path and devid
7563eda14cbcSMatt Macy 	 * to make it distinguishable from newvd, and unopenable from now on.
7564e716630dSMartin Matuska 	 */
7565e716630dSMartin Matuska 	if (strcmp(oldvdpath, newvdpath) == 0) {
7566e716630dSMartin Matuska 		spa_strfree(oldvd->vdev_path);
7567eda14cbcSMatt Macy 		oldvd->vdev_path = kmem_alloc(strlen(newvdpath) + 5,
7568eda14cbcSMatt Macy 		    KM_SLEEP);
7569eda14cbcSMatt Macy 		(void) sprintf(oldvd->vdev_path, "%s/old",
7570eda14cbcSMatt Macy 		    newvdpath);
7571eda14cbcSMatt Macy 		if (oldvd->vdev_devid != NULL) {
7572eda14cbcSMatt Macy 			spa_strfree(oldvd->vdev_devid);
7573eda14cbcSMatt Macy 			oldvd->vdev_devid = NULL;
7574eda14cbcSMatt Macy 		}
7575eda14cbcSMatt Macy 		spa_strfree(oldvdpath);
7576e716630dSMartin Matuska 		oldvdpath = spa_strdup(oldvd->vdev_path);
7577e716630dSMartin Matuska 	}
7578eda14cbcSMatt Macy 
7579eda14cbcSMatt Macy 	/*
7580eda14cbcSMatt Macy 	 * If the parent is not a mirror, or if we're replacing, insert the new
7581eda14cbcSMatt Macy 	 * mirror/replacing/spare vdev above oldvd.
7582eda14cbcSMatt Macy 	 */
7583eda14cbcSMatt Macy 	if (!raidz && pvd->vdev_ops != pvops) {
7584e716630dSMartin Matuska 		pvd = vdev_add_parent(oldvd, pvops);
7585eda14cbcSMatt Macy 		ASSERT(pvd->vdev_ops == pvops);
7586eda14cbcSMatt Macy 		ASSERT(oldvd->vdev_parent == pvd);
7587eda14cbcSMatt Macy 	}
7588eda14cbcSMatt Macy 
7589eda14cbcSMatt Macy 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
7590eda14cbcSMatt Macy 
7591eda14cbcSMatt Macy 	/*
7592eda14cbcSMatt Macy 	 * Extract the new device from its root and add it to pvd.
7593eda14cbcSMatt Macy 	 */
7594eda14cbcSMatt Macy 	vdev_remove_child(newrootvd, newvd);
7595eda14cbcSMatt Macy 	newvd->vdev_id = pvd->vdev_children;
7596eda14cbcSMatt Macy 	newvd->vdev_crtxg = oldvd->vdev_crtxg;
7597eda14cbcSMatt Macy 	vdev_add_child(pvd, newvd);
7598eda14cbcSMatt Macy 
7599eda14cbcSMatt Macy 	/*
7600eda14cbcSMatt Macy 	 * Reevaluate the parent vdev state.
7601eda14cbcSMatt Macy 	 */
7602eda14cbcSMatt Macy 	vdev_propagate_state(pvd);
7603eda14cbcSMatt Macy 
7604eda14cbcSMatt Macy 	tvd = newvd->vdev_top;
7605eda14cbcSMatt Macy 	ASSERT(pvd->vdev_top == tvd);
7606eda14cbcSMatt Macy 	ASSERT(tvd->vdev_parent == rvd);
7607eda14cbcSMatt Macy 
7608eda14cbcSMatt Macy 	vdev_config_dirty(tvd);
7609eda14cbcSMatt Macy 
7610eda14cbcSMatt Macy 	/*
7611eda14cbcSMatt Macy 	 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
7612d411c1d6SMartin Matuska 	 * for any dmu_sync-ed blocks.  It will propagate upward when
7613eda14cbcSMatt Macy 	 * spa_vdev_exit() calls vdev_dtl_reassess().
7614eda14cbcSMatt Macy 	 */
7615eda14cbcSMatt Macy 	dtl_max_txg = txg + TXG_CONCURRENT_STATES;
7616eda14cbcSMatt Macy 
7617eda14cbcSMatt Macy 	if (raidz) {
7618eda14cbcSMatt Macy 		/*
7619eda14cbcSMatt Macy 		 * Wait for the youngest allocations and frees to sync,
7620eda14cbcSMatt Macy 		 * and then wait for the deferral of those frees to finish.
7621eda14cbcSMatt Macy 		 */
7622eda14cbcSMatt Macy 		spa_vdev_config_exit(spa, NULL,
7623eda14cbcSMatt Macy 		    txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
7624eda14cbcSMatt Macy 
7625eda14cbcSMatt Macy 		vdev_initialize_stop_all(tvd, VDEV_INITIALIZE_ACTIVE);
7626eda14cbcSMatt Macy 		vdev_trim_stop_all(tvd, VDEV_TRIM_ACTIVE);
7627eda14cbcSMatt Macy 		vdev_autotrim_stop_wait(tvd);
7628eda14cbcSMatt Macy 
7629eda14cbcSMatt Macy 		dtl_max_txg = spa_vdev_config_enter(spa);
7630eda14cbcSMatt Macy 
7631eda14cbcSMatt Macy 		tvd->vdev_rz_expanding = B_TRUE;
7632eda14cbcSMatt Macy 
7633eda14cbcSMatt Macy 		vdev_dirty_leaves(tvd, VDD_DTL, dtl_max_txg);
7634eda14cbcSMatt Macy 		vdev_config_dirty(tvd);
7635eda14cbcSMatt Macy 
7636eda14cbcSMatt Macy 		dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool,
7637eda14cbcSMatt Macy 		    dtl_max_txg);
7638eda14cbcSMatt Macy 		dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_raidz_attach_sync,
7639eda14cbcSMatt Macy 		    newvd, tx);
7640eda14cbcSMatt Macy 		dmu_tx_commit(tx);
7641eda14cbcSMatt Macy 	} else {
7642eda14cbcSMatt Macy 		vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
7643eda14cbcSMatt Macy 		    dtl_max_txg - TXG_INITIAL);
7644eda14cbcSMatt Macy 
7645eda14cbcSMatt Macy 		if (newvd->vdev_isspare) {
7646eda14cbcSMatt Macy 			spa_spare_activate(newvd);
7647eda14cbcSMatt Macy 			spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
7648eda14cbcSMatt Macy 		}
7649eda14cbcSMatt Macy 
7650eda14cbcSMatt Macy 		newvd_isspare = newvd->vdev_isspare;
7651eda14cbcSMatt Macy 
7652eda14cbcSMatt Macy 		/*
7653eda14cbcSMatt Macy 		 * Mark newvd's DTL dirty in this txg.
7654eda14cbcSMatt Macy 		 */
7655eda14cbcSMatt Macy 		vdev_dirty(tvd, VDD_DTL, newvd, txg);
7656eda14cbcSMatt Macy 
7657eda14cbcSMatt Macy 		/*
7658eda14cbcSMatt Macy 		 * Schedule the resilver or rebuild to restart in the future.
7659eda14cbcSMatt Macy 		 * We do this to ensure that dmu_sync-ed blocks have been
7660eda14cbcSMatt Macy 		 * stitched into the respective datasets.
7661eda14cbcSMatt Macy 		 */
7662eda14cbcSMatt Macy 		if (rebuild) {
7663eda14cbcSMatt Macy 			newvd->vdev_rebuild_txg = txg;
7664eda14cbcSMatt Macy 
7665eda14cbcSMatt Macy 			vdev_rebuild(tvd);
7666eda14cbcSMatt Macy 		} else {
7667eda14cbcSMatt Macy 			newvd->vdev_resilver_txg = txg;
7668eda14cbcSMatt Macy 
7669eda14cbcSMatt Macy 			if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
7670eda14cbcSMatt Macy 			    spa_feature_is_enabled(spa,
7671eda14cbcSMatt Macy 			    SPA_FEATURE_RESILVER_DEFER)) {
7672eda14cbcSMatt Macy 				vdev_defer_resilver(newvd);
7673eda14cbcSMatt Macy 			} else {
7674eda14cbcSMatt Macy 				dsl_scan_restart_resilver(spa->spa_dsl_pool,
7675eda14cbcSMatt Macy 				    dtl_max_txg);
7676eda14cbcSMatt Macy 			}
7677eda14cbcSMatt Macy 		}
7678eda14cbcSMatt Macy 	}
7679eda14cbcSMatt Macy 
7680eda14cbcSMatt Macy 	if (spa->spa_bootfs)
7681eda14cbcSMatt Macy 		spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
7682eda14cbcSMatt Macy 
7683eda14cbcSMatt Macy 	spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
7684eda14cbcSMatt Macy 
7685eda14cbcSMatt Macy 	/*
7686eda14cbcSMatt Macy 	 * Commit the config
7687eda14cbcSMatt Macy 	 */
7688eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
7689eda14cbcSMatt Macy 
7690eda14cbcSMatt Macy 	spa_history_log_internal(spa, "vdev attach", NULL,
7691eda14cbcSMatt Macy 	    "%s vdev=%s %s vdev=%s",
7692eda14cbcSMatt Macy 	    replacing && newvd_isspare ? "spare in" :
7693eda14cbcSMatt Macy 	    replacing ? "replace" : "attach", newvdpath,
7694eda14cbcSMatt Macy 	    replacing ? "for" : "to", oldvdpath);
7695eda14cbcSMatt Macy 
7696eda14cbcSMatt Macy 	spa_strfree(oldvdpath);
7697eda14cbcSMatt Macy 	spa_strfree(newvdpath);
7698eda14cbcSMatt Macy 
7699eda14cbcSMatt Macy 	return (0);
7700eda14cbcSMatt Macy }
7701eda14cbcSMatt Macy 
7702eda14cbcSMatt Macy /*
7703eda14cbcSMatt Macy  * Detach a device from a mirror or replacing vdev.
7704eda14cbcSMatt Macy  *
7705eda14cbcSMatt Macy  * If 'replace_done' is specified, only detach if the parent
7706eda14cbcSMatt Macy  * is a replacing or a spare vdev.
7707eda14cbcSMatt Macy  */
7708eda14cbcSMatt Macy int
7709eda14cbcSMatt Macy spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
7710eda14cbcSMatt Macy {
7711eda14cbcSMatt Macy 	uint64_t txg;
7712eda14cbcSMatt Macy 	int error;
7713eda14cbcSMatt Macy 	vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
7714eda14cbcSMatt Macy 	vdev_t *vd, *pvd, *cvd, *tvd;
7715eda14cbcSMatt Macy 	boolean_t unspare = B_FALSE;
7716eda14cbcSMatt Macy 	uint64_t unspare_guid = 0;
7717eda14cbcSMatt Macy 	char *vdpath;
7718eda14cbcSMatt Macy 
7719eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
7720eda14cbcSMatt Macy 
7721eda14cbcSMatt Macy 	txg = spa_vdev_detach_enter(spa, guid);
7722eda14cbcSMatt Macy 
7723eda14cbcSMatt Macy 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
7724eda14cbcSMatt Macy 
7725eda14cbcSMatt Macy 	/*
7726eda14cbcSMatt Macy 	 * Besides being called directly from the userland through the
7727eda14cbcSMatt Macy 	 * ioctl interface, spa_vdev_detach() can be potentially called
77287877fdebSMatt Macy 	 * at the end of spa_vdev_resilver_done().
77297877fdebSMatt Macy 	 *
77307877fdebSMatt Macy 	 * In the regular case, when we have a checkpoint this shouldn't
77317877fdebSMatt Macy 	 * happen as we never empty the DTLs of a vdev during the scrub
77327877fdebSMatt Macy 	 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
7733eda14cbcSMatt Macy 	 * should never get here when we have a checkpoint.
77347877fdebSMatt Macy 	 *
77357877fdebSMatt Macy 	 * That said, even in a case when we checkpoint the pool exactly
77367877fdebSMatt Macy 	 * as spa_vdev_resilver_done() calls this function everything
77377877fdebSMatt Macy 	 * should be fine as the resilver will return right away.
77387877fdebSMatt Macy 	 */
7739eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
77407877fdebSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
77417877fdebSMatt Macy 		error = (spa_has_checkpoint(spa)) ?
7742eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
7743eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
7744eda14cbcSMatt Macy 	}
7745eda14cbcSMatt Macy 
7746eda14cbcSMatt Macy 	if (vd == NULL)
7747eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
7748eda14cbcSMatt Macy 
7749eda14cbcSMatt Macy 	if (!vd->vdev_ops->vdev_op_leaf)
7750eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7751dbd5678dSMartin Matuska 
7752eda14cbcSMatt Macy 	pvd = vd->vdev_parent;
7753eda14cbcSMatt Macy 
7754eda14cbcSMatt Macy 	/*
7755eda14cbcSMatt Macy 	 * If the parent/child relationship is not as expected, don't do it.
7756eda14cbcSMatt Macy 	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
7757eda14cbcSMatt Macy 	 * vdev that's replacing B with C.  The user's intent in replacing
7758eda14cbcSMatt Macy 	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
7759eda14cbcSMatt Macy 	 * the replace by detaching C, the expected behavior is to end up
7760eda14cbcSMatt Macy 	 * M(A,B).  But suppose that right after deciding to detach C,
7761eda14cbcSMatt Macy 	 * the replacement of B completes.  We would have M(A,C), and then
7762eda14cbcSMatt Macy 	 * ask to detach C, which would leave us with just A -- not what
7763eda14cbcSMatt Macy 	 * the user wanted.  To prevent this, we make sure that the
7764eda14cbcSMatt Macy 	 * parent/child relationship hasn't changed -- in this example,
7765eda14cbcSMatt Macy 	 * that C's parent is still the replacing vdev R.
7766eda14cbcSMatt Macy 	 */
7767eda14cbcSMatt Macy 	if (pvd->vdev_guid != pguid && pguid != 0)
7768eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
7769eda14cbcSMatt Macy 
7770eda14cbcSMatt Macy 	/*
7771eda14cbcSMatt Macy 	 * Only 'replacing' or 'spare' vdevs can be replaced.
7772eda14cbcSMatt Macy 	 */
7773eda14cbcSMatt Macy 	if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
7774eda14cbcSMatt Macy 	    pvd->vdev_ops != &vdev_spare_ops)
7775eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7776eda14cbcSMatt Macy 
7777eda14cbcSMatt Macy 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
7778eda14cbcSMatt Macy 	    spa_version(spa) >= SPA_VERSION_SPARES);
7779eda14cbcSMatt Macy 
7780eda14cbcSMatt Macy 	/*
7781eda14cbcSMatt Macy 	 * Only mirror, replacing, and spare vdevs support detach.
7782eda14cbcSMatt Macy 	 */
7783eda14cbcSMatt Macy 	if (pvd->vdev_ops != &vdev_replacing_ops &&
7784eda14cbcSMatt Macy 	    pvd->vdev_ops != &vdev_mirror_ops &&
7785eda14cbcSMatt Macy 	    pvd->vdev_ops != &vdev_spare_ops)
7786eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7787eda14cbcSMatt Macy 
7788eda14cbcSMatt Macy 	/*
7789eda14cbcSMatt Macy 	 * If this device has the only valid copy of some data,
7790eda14cbcSMatt Macy 	 * we cannot safely detach it.
7791eda14cbcSMatt Macy 	 */
7792eda14cbcSMatt Macy 	if (vdev_dtl_required(vd))
7793eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
7794eda14cbcSMatt Macy 
7795eda14cbcSMatt Macy 	ASSERT(pvd->vdev_children >= 2);
7796eda14cbcSMatt Macy 
7797eda14cbcSMatt Macy 	/*
7798eda14cbcSMatt Macy 	 * If we are detaching the second disk from a replacing vdev, then
7799eda14cbcSMatt Macy 	 * check to see if we changed the original vdev's path to have "/old"
7800eda14cbcSMatt Macy 	 * at the end in spa_vdev_attach().  If so, undo that change now.
7801eda14cbcSMatt Macy 	 */
7802eda14cbcSMatt Macy 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
7803eda14cbcSMatt Macy 	    vd->vdev_path != NULL) {
7804eda14cbcSMatt Macy 		size_t len = strlen(vd->vdev_path);
7805eda14cbcSMatt Macy 
7806eda14cbcSMatt Macy 		for (int c = 0; c < pvd->vdev_children; c++) {
7807eda14cbcSMatt Macy 			cvd = pvd->vdev_child[c];
7808eda14cbcSMatt Macy 
7809eda14cbcSMatt Macy 			if (cvd == vd || cvd->vdev_path == NULL)
7810eda14cbcSMatt Macy 				continue;
7811eda14cbcSMatt Macy 
7812eda14cbcSMatt Macy 			if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
7813eda14cbcSMatt Macy 			    strcmp(cvd->vdev_path + len, "/old") == 0) {
7814eda14cbcSMatt Macy 				spa_strfree(cvd->vdev_path);
7815eda14cbcSMatt Macy 				cvd->vdev_path = spa_strdup(vd->vdev_path);
7816eda14cbcSMatt Macy 				break;
7817eda14cbcSMatt Macy 			}
7818eda14cbcSMatt Macy 		}
7819eda14cbcSMatt Macy 	}
7820eda14cbcSMatt Macy 
7821eda14cbcSMatt Macy 	/*
7822eda14cbcSMatt Macy 	 * If we are detaching the original disk from a normal spare, then it
7823eda14cbcSMatt Macy 	 * implies that the spare should become a real disk, and be removed
7824eda14cbcSMatt Macy 	 * from the active spare list for the pool.  dRAID spares on the
7825eda14cbcSMatt Macy 	 * other hand are coupled to the pool and thus should never be removed
7826eda14cbcSMatt Macy 	 * from the spares list.
7827eda14cbcSMatt Macy 	 */
7828eda14cbcSMatt Macy 	if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
7829eda14cbcSMatt Macy 		vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
7830eda14cbcSMatt Macy 
7831eda14cbcSMatt Macy 		if (last_cvd->vdev_isspare &&
7832eda14cbcSMatt Macy 		    last_cvd->vdev_ops != &vdev_draid_spare_ops) {
7833eda14cbcSMatt Macy 			unspare = B_TRUE;
7834eda14cbcSMatt Macy 		}
7835eda14cbcSMatt Macy 	}
7836eda14cbcSMatt Macy 
7837eda14cbcSMatt Macy 	/*
7838eda14cbcSMatt Macy 	 * Erase the disk labels so the disk can be used for other things.
7839eda14cbcSMatt Macy 	 * This must be done after all other error cases are handled,
7840eda14cbcSMatt Macy 	 * but before we disembowel vd (so we can still do I/O to it).
7841eda14cbcSMatt Macy 	 * But if we can't do it, don't treat the error as fatal --
7842eda14cbcSMatt Macy 	 * it may be that the unwritability of the disk is the reason
7843eda14cbcSMatt Macy 	 * it's being detached!
7844eda14cbcSMatt Macy 	 */
7845eda14cbcSMatt Macy 	(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
7846eda14cbcSMatt Macy 
7847eda14cbcSMatt Macy 	/*
7848eda14cbcSMatt Macy 	 * Remove vd from its parent and compact the parent's children.
7849eda14cbcSMatt Macy 	 */
7850eda14cbcSMatt Macy 	vdev_remove_child(pvd, vd);
7851eda14cbcSMatt Macy 	vdev_compact_children(pvd);
7852eda14cbcSMatt Macy 
7853eda14cbcSMatt Macy 	/*
7854eda14cbcSMatt Macy 	 * Remember one of the remaining children so we can get tvd below.
7855eda14cbcSMatt Macy 	 */
7856eda14cbcSMatt Macy 	cvd = pvd->vdev_child[pvd->vdev_children - 1];
7857eda14cbcSMatt Macy 
7858eda14cbcSMatt Macy 	/*
7859eda14cbcSMatt Macy 	 * If we need to remove the remaining child from the list of hot spares,
7860eda14cbcSMatt Macy 	 * do it now, marking the vdev as no longer a spare in the process.
7861eda14cbcSMatt Macy 	 * We must do this before vdev_remove_parent(), because that can
7862eda14cbcSMatt Macy 	 * change the GUID if it creates a new toplevel GUID.  For a similar
7863eda14cbcSMatt Macy 	 * reason, we must remove the spare now, in the same txg as the detach;
7864eda14cbcSMatt Macy 	 * otherwise someone could attach a new sibling, change the GUID, and
7865eda14cbcSMatt Macy 	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
7866eda14cbcSMatt Macy 	 */
7867eda14cbcSMatt Macy 	if (unspare) {
7868eda14cbcSMatt Macy 		ASSERT(cvd->vdev_isspare);
7869eda14cbcSMatt Macy 		spa_spare_remove(cvd);
7870eda14cbcSMatt Macy 		unspare_guid = cvd->vdev_guid;
7871eda14cbcSMatt Macy 		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
7872eda14cbcSMatt Macy 		cvd->vdev_unspare = B_TRUE;
7873eda14cbcSMatt Macy 	}
7874eda14cbcSMatt Macy 
7875eda14cbcSMatt Macy 	/*
7876eda14cbcSMatt Macy 	 * If the parent mirror/replacing vdev only has one child,
7877eda14cbcSMatt Macy 	 * the parent is no longer needed.  Remove it from the tree.
7878eda14cbcSMatt Macy 	 */
7879eda14cbcSMatt Macy 	if (pvd->vdev_children == 1) {
7880eda14cbcSMatt Macy 		if (pvd->vdev_ops == &vdev_spare_ops)
7881eda14cbcSMatt Macy 			cvd->vdev_unspare = B_FALSE;
7882eda14cbcSMatt Macy 		vdev_remove_parent(cvd);
7883eda14cbcSMatt Macy 	}
7884eda14cbcSMatt Macy 
7885eda14cbcSMatt Macy 	/*
7886eda14cbcSMatt Macy 	 * We don't set tvd until now because the parent we just removed
7887eda14cbcSMatt Macy 	 * may have been the previous top-level vdev.
7888eda14cbcSMatt Macy 	 */
7889eda14cbcSMatt Macy 	tvd = cvd->vdev_top;
7890eda14cbcSMatt Macy 	ASSERT(tvd->vdev_parent == rvd);
7891eda14cbcSMatt Macy 
7892eda14cbcSMatt Macy 	/*
7893eda14cbcSMatt Macy 	 * Reevaluate the parent vdev state.
7894eda14cbcSMatt Macy 	 */
7895eda14cbcSMatt Macy 	vdev_propagate_state(cvd);
7896eda14cbcSMatt Macy 
7897eda14cbcSMatt Macy 	/*
7898eda14cbcSMatt Macy 	 * If the 'autoexpand' property is set on the pool then automatically
7899eda14cbcSMatt Macy 	 * try to expand the size of the pool. For example if the device we
7900eda14cbcSMatt Macy 	 * just detached was smaller than the others, it may be possible to
7901eda14cbcSMatt Macy 	 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
7902eda14cbcSMatt Macy 	 * first so that we can obtain the updated sizes of the leaf vdevs.
7903eda14cbcSMatt Macy 	 */
7904eda14cbcSMatt Macy 	if (spa->spa_autoexpand) {
7905eda14cbcSMatt Macy 		vdev_reopen(tvd);
7906eda14cbcSMatt Macy 		vdev_expand(tvd, txg);
7907e716630dSMartin Matuska 	}
7908eda14cbcSMatt Macy 
7909eda14cbcSMatt Macy 	vdev_config_dirty(tvd);
7910eda14cbcSMatt Macy 
7911eda14cbcSMatt Macy 	/*
7912eda14cbcSMatt Macy 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
7913eda14cbcSMatt Macy 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
7914eda14cbcSMatt Macy 	 * But first make sure we're not on any *other* txg's DTL list, to
7915eda14cbcSMatt Macy 	 * prevent vd from being accessed after it's freed.
7916eda14cbcSMatt Macy 	 */
7917eda14cbcSMatt Macy 	vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
7918eda14cbcSMatt Macy 	for (int t = 0; t < TXG_SIZE; t++)
7919c0a83fe0SMartin Matuska 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
7920c0a83fe0SMartin Matuska 	vd->vdev_detached = B_TRUE;
7921c0a83fe0SMartin Matuska 	vdev_dirty(tvd, VDD_DTL, vd, txg);
7922c0a83fe0SMartin Matuska 
7923eda14cbcSMatt Macy 	spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
7924eda14cbcSMatt Macy 	spa_notify_waiters(spa);
7925eda14cbcSMatt Macy 
7926eda14cbcSMatt Macy 	/* hang on to the spa before we release the lock */
7927eda14cbcSMatt Macy 	spa_open_ref(spa, FTAG);
7928eda14cbcSMatt Macy 
7929eda14cbcSMatt Macy 	error = spa_vdev_exit(spa, vd, txg, 0);
7930eda14cbcSMatt Macy 
7931eda14cbcSMatt Macy 	spa_history_log_internal(spa, "detach", NULL,
7932eda14cbcSMatt Macy 	    "vdev=%s", vdpath);
7933eda14cbcSMatt Macy 	spa_strfree(vdpath);
7934eda14cbcSMatt Macy 
7935c0a83fe0SMartin Matuska 	/*
7936c0a83fe0SMartin Matuska 	 * If this was the removal of the original device in a hot spare vdev,
7937c0a83fe0SMartin Matuska 	 * then we want to go through and remove the device from the hot spare
7938eda14cbcSMatt Macy 	 * list of every other pool.
7939eda14cbcSMatt Macy 	 */
7940eda14cbcSMatt Macy 	if (unspare) {
7941eda14cbcSMatt Macy 		spa_t *altspa = NULL;
7942eda14cbcSMatt Macy 
7943eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
7944eda14cbcSMatt Macy 		while ((altspa = spa_next(altspa)) != NULL) {
7945eda14cbcSMatt Macy 			if (altspa->spa_state != POOL_STATE_ACTIVE ||
7946eda14cbcSMatt Macy 			    altspa == spa)
7947eda14cbcSMatt Macy 				continue;
7948eda14cbcSMatt Macy 
7949eda14cbcSMatt Macy 			spa_open_ref(altspa, FTAG);
7950eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
7951eda14cbcSMatt Macy 			(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
7952eda14cbcSMatt Macy 			mutex_enter(&spa_namespace_lock);
7953eda14cbcSMatt Macy 			spa_close(altspa, FTAG);
7954eda14cbcSMatt Macy 		}
7955eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
7956eda14cbcSMatt Macy 
7957eda14cbcSMatt Macy 		/* search the rest of the vdevs for spares to remove */
7958eda14cbcSMatt Macy 		spa_vdev_resilver_done(spa);
7959eda14cbcSMatt Macy 	}
7960eda14cbcSMatt Macy 
7961eda14cbcSMatt Macy 	/* all done with the spa; OK to release */
7962eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
7963eda14cbcSMatt Macy 	spa_close(spa, FTAG);
7964eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7965eda14cbcSMatt Macy 
7966eda14cbcSMatt Macy 	return (error);
7967eda14cbcSMatt Macy }
7968eda14cbcSMatt Macy 
7969eda14cbcSMatt Macy static int
7970eda14cbcSMatt Macy spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
7971eda14cbcSMatt Macy     list_t *vd_list)
7972eda14cbcSMatt Macy {
7973eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
7974eda14cbcSMatt Macy 
7975eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
7976eda14cbcSMatt Macy 
7977eda14cbcSMatt Macy 	/* Look up vdev and ensure it's a leaf. */
7978eda14cbcSMatt Macy 	vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
7979eda14cbcSMatt Macy 	if (vd == NULL || vd->vdev_detached) {
7980eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7981eda14cbcSMatt Macy 		return (SET_ERROR(ENODEV));
7982eda14cbcSMatt Macy 	} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
7983eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7984eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
7985eda14cbcSMatt Macy 	} else if (!vdev_writeable(vd)) {
7986eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7987eda14cbcSMatt Macy 		return (SET_ERROR(EROFS));
7988eda14cbcSMatt Macy 	}
7989eda14cbcSMatt Macy 	mutex_enter(&vd->vdev_initialize_lock);
7990eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7991eda14cbcSMatt Macy 
7992eda14cbcSMatt Macy 	/*
7993eda14cbcSMatt Macy 	 * When we activate an initialize action we check to see
7994eda14cbcSMatt Macy 	 * if the vdev_initialize_thread is NULL. We do this instead
7995eda14cbcSMatt Macy 	 * of using the vdev_initialize_state since there might be
7996eda14cbcSMatt Macy 	 * a previous initialization process which has completed but
7997eda14cbcSMatt Macy 	 * the thread is not exited.
7998eda14cbcSMatt Macy 	 */
7999eda14cbcSMatt Macy 	if (cmd_type == POOL_INITIALIZE_START &&
8000eda14cbcSMatt Macy 	    (vd->vdev_initialize_thread != NULL ||
8001eda14cbcSMatt Macy 	    vd->vdev_top->vdev_removing || vd->vdev_top->vdev_rz_expanding)) {
8002eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
8003eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
8004eda14cbcSMatt Macy 	} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
8005eda14cbcSMatt Macy 	    (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
8006eda14cbcSMatt Macy 	    vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
8007eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
8008eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
8009eda14cbcSMatt Macy 	} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
8010eda14cbcSMatt Macy 	    vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
8011eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
8012eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
8013eda14cbcSMatt Macy 	} else if (cmd_type == POOL_INITIALIZE_UNINIT &&
8014eda14cbcSMatt Macy 	    vd->vdev_initialize_thread != NULL) {
8015eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
8016eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
8017eda14cbcSMatt Macy 	}
8018eda14cbcSMatt Macy 
8019eda14cbcSMatt Macy 	switch (cmd_type) {
8020eda14cbcSMatt Macy 	case POOL_INITIALIZE_START:
8021eda14cbcSMatt Macy 		vdev_initialize(vd);
8022eda14cbcSMatt Macy 		break;
8023eda14cbcSMatt Macy 	case POOL_INITIALIZE_CANCEL:
8024eda14cbcSMatt Macy 		vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
8025eda14cbcSMatt Macy 		break;
8026eda14cbcSMatt Macy 	case POOL_INITIALIZE_SUSPEND:
8027eda14cbcSMatt Macy 		vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
8028eda14cbcSMatt Macy 		break;
8029e716630dSMartin Matuska 	case POOL_INITIALIZE_UNINIT:
8030e716630dSMartin Matuska 		vdev_uninitialize(vd);
8031eda14cbcSMatt Macy 		break;
8032eda14cbcSMatt Macy 	default:
8033eda14cbcSMatt Macy 		panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
8034eda14cbcSMatt Macy 	}
8035eda14cbcSMatt Macy 	mutex_exit(&vd->vdev_initialize_lock);
8036eda14cbcSMatt Macy 
8037eda14cbcSMatt Macy 	return (0);
8038eda14cbcSMatt Macy }
8039eda14cbcSMatt Macy 
8040eda14cbcSMatt Macy int
8041eda14cbcSMatt Macy spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
8042eda14cbcSMatt Macy     nvlist_t *vdev_errlist)
8043eda14cbcSMatt Macy {
8044eda14cbcSMatt Macy 	int total_errors = 0;
8045eda14cbcSMatt Macy 	list_t vd_list;
8046eda14cbcSMatt Macy 
8047eda14cbcSMatt Macy 	list_create(&vd_list, sizeof (vdev_t),
8048eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_initialize_node));
8049eda14cbcSMatt Macy 
8050eda14cbcSMatt Macy 	/*
8051eda14cbcSMatt Macy 	 * We hold the namespace lock through the whole function
8052eda14cbcSMatt Macy 	 * to prevent any changes to the pool while we're starting or
8053eda14cbcSMatt Macy 	 * stopping initialization. The config and state locks are held so that
8054eda14cbcSMatt Macy 	 * we can properly assess the vdev state before we commit to
8055eda14cbcSMatt Macy 	 * the initializing operation.
8056eda14cbcSMatt Macy 	 */
8057eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
8058eda14cbcSMatt Macy 
8059eda14cbcSMatt Macy 	for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
8060eda14cbcSMatt Macy 	    pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
8061eda14cbcSMatt Macy 		uint64_t vdev_guid = fnvpair_value_uint64(pair);
8062eda14cbcSMatt Macy 
8063eda14cbcSMatt Macy 		int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
8064eda14cbcSMatt Macy 		    &vd_list);
8065eda14cbcSMatt Macy 		if (error != 0) {
8066eda14cbcSMatt Macy 			char guid_as_str[MAXNAMELEN];
8067eda14cbcSMatt Macy 
8068eda14cbcSMatt Macy 			(void) snprintf(guid_as_str, sizeof (guid_as_str),
8069eda14cbcSMatt Macy 			    "%llu", (unsigned long long)vdev_guid);
8070eda14cbcSMatt Macy 			fnvlist_add_int64(vdev_errlist, guid_as_str, error);
8071eda14cbcSMatt Macy 			total_errors++;
8072eda14cbcSMatt Macy 		}
8073eda14cbcSMatt Macy 	}
8074eda14cbcSMatt Macy 
8075eda14cbcSMatt Macy 	/* Wait for all initialize threads to stop. */
8076eda14cbcSMatt Macy 	vdev_initialize_stop_wait(spa, &vd_list);
8077eda14cbcSMatt Macy 
8078eda14cbcSMatt Macy 	/* Sync out the initializing state */
8079eda14cbcSMatt Macy 	txg_wait_synced(spa->spa_dsl_pool, 0);
8080eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
8081eda14cbcSMatt Macy 
8082eda14cbcSMatt Macy 	list_destroy(&vd_list);
8083eda14cbcSMatt Macy 
8084eda14cbcSMatt Macy 	return (total_errors);
8085eda14cbcSMatt Macy }
8086eda14cbcSMatt Macy 
8087eda14cbcSMatt Macy static int
8088eda14cbcSMatt Macy spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
8089eda14cbcSMatt Macy     uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
8090eda14cbcSMatt Macy {
8091eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
8092eda14cbcSMatt Macy 
8093eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
8094eda14cbcSMatt Macy 
8095eda14cbcSMatt Macy 	/* Look up vdev and ensure it's a leaf. */
8096eda14cbcSMatt Macy 	vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
8097eda14cbcSMatt Macy 	if (vd == NULL || vd->vdev_detached) {
8098eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8099eda14cbcSMatt Macy 		return (SET_ERROR(ENODEV));
8100eda14cbcSMatt Macy 	} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
8101eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8102eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
8103eda14cbcSMatt Macy 	} else if (!vdev_writeable(vd)) {
8104eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8105eda14cbcSMatt Macy 		return (SET_ERROR(EROFS));
8106eda14cbcSMatt Macy 	} else if (!vd->vdev_has_trim) {
8107eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8108eda14cbcSMatt Macy 		return (SET_ERROR(EOPNOTSUPP));
8109eda14cbcSMatt Macy 	} else if (secure && !vd->vdev_has_securetrim) {
8110eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8111eda14cbcSMatt Macy 		return (SET_ERROR(EOPNOTSUPP));
8112eda14cbcSMatt Macy 	}
8113eda14cbcSMatt Macy 	mutex_enter(&vd->vdev_trim_lock);
8114eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8115eda14cbcSMatt Macy 
8116eda14cbcSMatt Macy 	/*
8117eda14cbcSMatt Macy 	 * When we activate a TRIM action we check to see if the
8118a0b956f5SMartin Matuska 	 * vdev_trim_thread is NULL. We do this instead of using the
8119eda14cbcSMatt Macy 	 * vdev_trim_state since there might be a previous TRIM process
8120eda14cbcSMatt Macy 	 * which has completed but the thread is not exited.
8121eda14cbcSMatt Macy 	 */
8122eda14cbcSMatt Macy 	if (cmd_type == POOL_TRIM_START &&
8123eda14cbcSMatt Macy 	    (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing ||
8124eda14cbcSMatt Macy 	    vd->vdev_top->vdev_rz_expanding)) {
8125eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_trim_lock);
8126eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
81272a58b312SMartin Matuska 	} else if (cmd_type == POOL_TRIM_CANCEL &&
8128eda14cbcSMatt Macy 	    (vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
8129eda14cbcSMatt Macy 	    vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
8130eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_trim_lock);
8131eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
8132eda14cbcSMatt Macy 	} else if (cmd_type == POOL_TRIM_SUSPEND &&
8133eda14cbcSMatt Macy 	    vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
8134eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_trim_lock);
8135eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
8136eda14cbcSMatt Macy 	}
8137eda14cbcSMatt Macy 
8138eda14cbcSMatt Macy 	switch (cmd_type) {
8139eda14cbcSMatt Macy 	case POOL_TRIM_START:
8140eda14cbcSMatt Macy 		vdev_trim(vd, rate, partial, secure);
8141eda14cbcSMatt Macy 		break;
8142eda14cbcSMatt Macy 	case POOL_TRIM_CANCEL:
8143eda14cbcSMatt Macy 		vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
8144eda14cbcSMatt Macy 		break;
8145eda14cbcSMatt Macy 	case POOL_TRIM_SUSPEND:
8146eda14cbcSMatt Macy 		vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
8147eda14cbcSMatt Macy 		break;
8148eda14cbcSMatt Macy 	default:
8149eda14cbcSMatt Macy 		panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
8150eda14cbcSMatt Macy 	}
8151eda14cbcSMatt Macy 	mutex_exit(&vd->vdev_trim_lock);
8152eda14cbcSMatt Macy 
8153eda14cbcSMatt Macy 	return (0);
8154eda14cbcSMatt Macy }
8155eda14cbcSMatt Macy 
8156eda14cbcSMatt Macy /*
8157eda14cbcSMatt Macy  * Initiates a manual TRIM for the requested vdevs. This kicks off individual
8158eda14cbcSMatt Macy  * TRIM threads for each child vdev.  These threads pass over all of the free
8159eda14cbcSMatt Macy  * space in the vdev's metaslabs and issues TRIM commands for that space.
8160eda14cbcSMatt Macy  */
8161eda14cbcSMatt Macy int
8162eda14cbcSMatt Macy spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
8163eda14cbcSMatt Macy     boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
8164eda14cbcSMatt Macy {
8165eda14cbcSMatt Macy 	int total_errors = 0;
8166eda14cbcSMatt Macy 	list_t vd_list;
8167eda14cbcSMatt Macy 
8168eda14cbcSMatt Macy 	list_create(&vd_list, sizeof (vdev_t),
8169eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_trim_node));
8170eda14cbcSMatt Macy 
8171eda14cbcSMatt Macy 	/*
8172eda14cbcSMatt Macy 	 * We hold the namespace lock through the whole function
8173eda14cbcSMatt Macy 	 * to prevent any changes to the pool while we're starting or
8174eda14cbcSMatt Macy 	 * stopping TRIM. The config and state locks are held so that
8175eda14cbcSMatt Macy 	 * we can properly assess the vdev state before we commit to
8176eda14cbcSMatt Macy 	 * the TRIM operation.
8177eda14cbcSMatt Macy 	 */
8178eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
8179eda14cbcSMatt Macy 
8180eda14cbcSMatt Macy 	for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
8181eda14cbcSMatt Macy 	    pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
8182eda14cbcSMatt Macy 		uint64_t vdev_guid = fnvpair_value_uint64(pair);
8183eda14cbcSMatt Macy 
8184eda14cbcSMatt Macy 		int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
8185eda14cbcSMatt Macy 		    rate, partial, secure, &vd_list);
8186eda14cbcSMatt Macy 		if (error != 0) {
8187eda14cbcSMatt Macy 			char guid_as_str[MAXNAMELEN];
8188eda14cbcSMatt Macy 
8189eda14cbcSMatt Macy 			(void) snprintf(guid_as_str, sizeof (guid_as_str),
8190eda14cbcSMatt Macy 			    "%llu", (unsigned long long)vdev_guid);
8191eda14cbcSMatt Macy 			fnvlist_add_int64(vdev_errlist, guid_as_str, error);
8192eda14cbcSMatt Macy 			total_errors++;
8193eda14cbcSMatt Macy 		}
8194eda14cbcSMatt Macy 	}
8195eda14cbcSMatt Macy 
8196eda14cbcSMatt Macy 	/* Wait for all TRIM threads to stop. */
8197eda14cbcSMatt Macy 	vdev_trim_stop_wait(spa, &vd_list);
8198eda14cbcSMatt Macy 
8199eda14cbcSMatt Macy 	/* Sync out the TRIM state */
8200eda14cbcSMatt Macy 	txg_wait_synced(spa->spa_dsl_pool, 0);
8201eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
8202eda14cbcSMatt Macy 
8203eda14cbcSMatt Macy 	list_destroy(&vd_list);
8204eda14cbcSMatt Macy 
8205eda14cbcSMatt Macy 	return (total_errors);
8206eda14cbcSMatt Macy }
8207eda14cbcSMatt Macy 
8208eda14cbcSMatt Macy /*
8209eda14cbcSMatt Macy  * Split a set of devices from their mirrors, and create a new pool from them.
8210eda14cbcSMatt Macy  */
8211eda14cbcSMatt Macy int
8212eda14cbcSMatt Macy spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
8213eda14cbcSMatt Macy     nvlist_t *props, boolean_t exp)
8214eda14cbcSMatt Macy {
8215eda14cbcSMatt Macy 	int error = 0;
8216eda14cbcSMatt Macy 	uint64_t txg, *glist;
8217eda14cbcSMatt Macy 	spa_t *newspa;
8218eda14cbcSMatt Macy 	uint_t c, children, lastlog;
8219eda14cbcSMatt Macy 	nvlist_t **child, *nvl, *tmp;
8220eda14cbcSMatt Macy 	dmu_tx_t *tx;
8221eda14cbcSMatt Macy 	const char *altroot = NULL;
8222eda14cbcSMatt Macy 	vdev_t *rvd, **vml = NULL;			/* vdev modify list */
8223eda14cbcSMatt Macy 	boolean_t activate_slog;
8224eda14cbcSMatt Macy 
8225eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
8226eda14cbcSMatt Macy 
8227eda14cbcSMatt Macy 	txg = spa_vdev_enter(spa);
8228eda14cbcSMatt Macy 
8229eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
8230eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
8231eda14cbcSMatt Macy 		error = (spa_has_checkpoint(spa)) ?
8232eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
8233eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
8234eda14cbcSMatt Macy 	}
8235eda14cbcSMatt Macy 
8236eda14cbcSMatt Macy 	/* clear the log and flush everything up to now */
8237eda14cbcSMatt Macy 	activate_slog = spa_passivate_log(spa);
8238eda14cbcSMatt Macy 	(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
8239eda14cbcSMatt Macy 	error = spa_reset_logs(spa);
8240eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(spa);
8241eda14cbcSMatt Macy 
8242eda14cbcSMatt Macy 	if (activate_slog)
8243eda14cbcSMatt Macy 		spa_activate_log(spa);
8244eda14cbcSMatt Macy 
8245eda14cbcSMatt Macy 	if (error != 0)
8246eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
8247eda14cbcSMatt Macy 
8248eda14cbcSMatt Macy 	/* check new spa name before going any further */
8249eda14cbcSMatt Macy 	if (spa_lookup(newname) != NULL)
825081b22a98SMartin Matuska 		return (spa_vdev_exit(spa, NULL, txg, EEXIST));
825181b22a98SMartin Matuska 
825281b22a98SMartin Matuska 	/*
825381b22a98SMartin Matuska 	 * scan through all the children to ensure they're all mirrors
825481b22a98SMartin Matuska 	 */
825581b22a98SMartin Matuska 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
825681b22a98SMartin Matuska 	    nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
825781b22a98SMartin Matuska 	    &children) != 0)
8258eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
8259eda14cbcSMatt Macy 
8260eda14cbcSMatt Macy 	/* first, check to ensure we've got the right child count */
8261eda14cbcSMatt Macy 	rvd = spa->spa_root_vdev;
8262eda14cbcSMatt Macy 	lastlog = 0;
8263eda14cbcSMatt Macy 	for (c = 0; c < rvd->vdev_children; c++) {
8264eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[c];
8265eda14cbcSMatt Macy 
8266eda14cbcSMatt Macy 		/* don't count the holes & logs as children */
8267eda14cbcSMatt Macy 		if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
8268eda14cbcSMatt Macy 		    !vdev_is_concrete(vd))) {
8269eda14cbcSMatt Macy 			if (lastlog == 0)
8270eda14cbcSMatt Macy 				lastlog = c;
8271eda14cbcSMatt Macy 			continue;
8272eda14cbcSMatt Macy 		}
8273eda14cbcSMatt Macy 
8274eda14cbcSMatt Macy 		lastlog = 0;
8275eda14cbcSMatt Macy 	}
8276eda14cbcSMatt Macy 	if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
8277eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
8278eda14cbcSMatt Macy 
8279eda14cbcSMatt Macy 	/* next, ensure no spare or cache devices are part of the split */
8280eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
8281eda14cbcSMatt Macy 	    nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
8282eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
8283eda14cbcSMatt Macy 
8284eda14cbcSMatt Macy 	vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
8285eda14cbcSMatt Macy 	glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
8286eda14cbcSMatt Macy 
828781b22a98SMartin Matuska 	/* then, loop over each vdev and validate it */
828881b22a98SMartin Matuska 	for (c = 0; c < children; c++) {
8289eda14cbcSMatt Macy 		uint64_t is_hole = 0;
8290eda14cbcSMatt Macy 
8291eda14cbcSMatt Macy 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
829281b22a98SMartin Matuska 		    &is_hole);
8293eda14cbcSMatt Macy 
8294eda14cbcSMatt Macy 		if (is_hole != 0) {
8295eda14cbcSMatt Macy 			if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
8296eda14cbcSMatt Macy 			    spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
8297eda14cbcSMatt Macy 				continue;
829881b22a98SMartin Matuska 			} else {
829981b22a98SMartin Matuska 				error = SET_ERROR(EINVAL);
830081b22a98SMartin Matuska 				break;
830181b22a98SMartin Matuska 			}
830281b22a98SMartin Matuska 		}
830381b22a98SMartin Matuska 
830481b22a98SMartin Matuska 		/* deal with indirect vdevs */
8305eda14cbcSMatt Macy 		if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
8306eda14cbcSMatt Macy 		    &vdev_indirect_ops)
8307eda14cbcSMatt Macy 			continue;
8308eda14cbcSMatt Macy 
8309eda14cbcSMatt Macy 		/* which disk is going to be split? */
8310eda14cbcSMatt Macy 		if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
8311eda14cbcSMatt Macy 		    &glist[c]) != 0) {
8312eda14cbcSMatt Macy 			error = SET_ERROR(EINVAL);
8313eda14cbcSMatt Macy 			break;
8314eda14cbcSMatt Macy 		}
8315eda14cbcSMatt Macy 
8316eda14cbcSMatt Macy 		/* look it up in the spa */
8317eda14cbcSMatt Macy 		vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
8318eda14cbcSMatt Macy 		if (vml[c] == NULL) {
8319eda14cbcSMatt Macy 			error = SET_ERROR(ENODEV);
8320eda14cbcSMatt Macy 			break;
8321eda14cbcSMatt Macy 		}
8322eda14cbcSMatt Macy 
8323eda14cbcSMatt Macy 		/* make sure there's nothing stopping the split */
8324eda14cbcSMatt Macy 		if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
8325eda14cbcSMatt Macy 		    vml[c]->vdev_islog ||
8326eda14cbcSMatt Macy 		    !vdev_is_concrete(vml[c]) ||
8327eda14cbcSMatt Macy 		    vml[c]->vdev_isspare ||
8328eda14cbcSMatt Macy 		    vml[c]->vdev_isl2cache ||
8329eda14cbcSMatt Macy 		    !vdev_writeable(vml[c]) ||
8330eda14cbcSMatt Macy 		    vml[c]->vdev_children != 0 ||
8331eda14cbcSMatt Macy 		    vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
8332eda14cbcSMatt Macy 		    c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
8333eda14cbcSMatt Macy 			error = SET_ERROR(EINVAL);
8334eda14cbcSMatt Macy 			break;
8335eda14cbcSMatt Macy 		}
8336eda14cbcSMatt Macy 
8337eda14cbcSMatt Macy 		if (vdev_dtl_required(vml[c]) ||
8338eda14cbcSMatt Macy 		    vdev_resilver_needed(vml[c], NULL, NULL)) {
8339eda14cbcSMatt Macy 			error = SET_ERROR(EBUSY);
8340eda14cbcSMatt Macy 			break;
8341eda14cbcSMatt Macy 		}
8342eda14cbcSMatt Macy 
8343eda14cbcSMatt Macy 		/* we need certain info from the top level */
8344eda14cbcSMatt Macy 		fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
8345eda14cbcSMatt Macy 		    vml[c]->vdev_top->vdev_ms_array);
8346eda14cbcSMatt Macy 		fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
8347eda14cbcSMatt Macy 		    vml[c]->vdev_top->vdev_ms_shift);
8348eda14cbcSMatt Macy 		fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
8349eda14cbcSMatt Macy 		    vml[c]->vdev_top->vdev_asize);
8350eda14cbcSMatt Macy 		fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
8351eda14cbcSMatt Macy 		    vml[c]->vdev_top->vdev_ashift);
8352eda14cbcSMatt Macy 
8353eda14cbcSMatt Macy 		/* transfer per-vdev ZAPs */
8354eda14cbcSMatt Macy 		ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
8355eda14cbcSMatt Macy 		VERIFY0(nvlist_add_uint64(child[c],
8356eda14cbcSMatt Macy 		    ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
8357eda14cbcSMatt Macy 
8358eda14cbcSMatt Macy 		ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
8359eda14cbcSMatt Macy 		VERIFY0(nvlist_add_uint64(child[c],
8360eda14cbcSMatt Macy 		    ZPOOL_CONFIG_VDEV_TOP_ZAP,
8361eda14cbcSMatt Macy 		    vml[c]->vdev_parent->vdev_top_zap));
8362eda14cbcSMatt Macy 	}
8363eda14cbcSMatt Macy 
8364eda14cbcSMatt Macy 	if (error != 0) {
8365eda14cbcSMatt Macy 		kmem_free(vml, children * sizeof (vdev_t *));
836681b22a98SMartin Matuska 		kmem_free(glist, children * sizeof (uint64_t));
836781b22a98SMartin Matuska 		return (spa_vdev_exit(spa, NULL, txg, error));
836881b22a98SMartin Matuska 	}
8369eda14cbcSMatt Macy 
8370eda14cbcSMatt Macy 	/* stop writers from using the disks */
8371eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
8372eda14cbcSMatt Macy 		if (vml[c] != NULL)
8373eda14cbcSMatt Macy 			vml[c]->vdev_offline = B_TRUE;
8374eda14cbcSMatt Macy 	}
8375eda14cbcSMatt Macy 	vdev_reopen(spa->spa_root_vdev);
8376eda14cbcSMatt Macy 
8377eda14cbcSMatt Macy 	/*
8378eda14cbcSMatt Macy 	 * Temporarily record the splitting vdevs in the spa config.  This
8379eda14cbcSMatt Macy 	 * will disappear once the config is regenerated.
8380eda14cbcSMatt Macy 	 */
8381eda14cbcSMatt Macy 	nvl = fnvlist_alloc();
8382eda14cbcSMatt Macy 	fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children);
8383eda14cbcSMatt Macy 	kmem_free(glist, children * sizeof (uint64_t));
8384eda14cbcSMatt Macy 
8385eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);
8386eda14cbcSMatt Macy 	fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl);
8387eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
8388eda14cbcSMatt Macy 	spa->spa_config_splitting = nvl;
8389eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
8390eda14cbcSMatt Macy 
8391eda14cbcSMatt Macy 	/* configure and create the new pool */
8392eda14cbcSMatt Macy 	fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname);
8393eda14cbcSMatt Macy 	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
8394eda14cbcSMatt Macy 	    exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE);
8395eda14cbcSMatt Macy 	fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
8396eda14cbcSMatt Macy 	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
8397eda14cbcSMatt Macy 	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
8398eda14cbcSMatt Macy 	    spa_generate_guid(NULL));
8399eda14cbcSMatt Macy 	VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
8400eda14cbcSMatt Macy 	(void) nvlist_lookup_string(props,
8401eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
8402eda14cbcSMatt Macy 
8403eda14cbcSMatt Macy 	/* add the new pool to the namespace */
8404eda14cbcSMatt Macy 	newspa = spa_add(newname, config, altroot);
8405eda14cbcSMatt Macy 	newspa->spa_avz_action = AVZ_ACTION_REBUILD;
8406eda14cbcSMatt Macy 	newspa->spa_config_txg = spa->spa_config_txg;
8407eda14cbcSMatt Macy 	spa_set_log_state(newspa, SPA_LOG_CLEAR);
8408eda14cbcSMatt Macy 
8409eda14cbcSMatt Macy 	/* release the spa config lock, retaining the namespace lock */
8410eda14cbcSMatt Macy 	spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
8411eda14cbcSMatt Macy 
8412eda14cbcSMatt Macy 	if (zio_injection_enabled)
8413eda14cbcSMatt Macy 		zio_handle_panic_injection(spa, FTAG, 1);
8414eda14cbcSMatt Macy 
8415eda14cbcSMatt Macy 	spa_activate(newspa, spa_mode_global);
8416eda14cbcSMatt Macy 	spa_async_suspend(newspa);
8417eda14cbcSMatt Macy 
8418eda14cbcSMatt Macy 	/*
8419eda14cbcSMatt Macy 	 * Temporarily stop the initializing and TRIM activity.  We set the
8420eda14cbcSMatt Macy 	 * state to ACTIVE so that we know to resume initializing or TRIM
8421eda14cbcSMatt Macy 	 * once the split has completed.
8422eda14cbcSMatt Macy 	 */
8423eda14cbcSMatt Macy 	list_t vd_initialize_list;
8424eda14cbcSMatt Macy 	list_create(&vd_initialize_list, sizeof (vdev_t),
8425eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_initialize_node));
8426eda14cbcSMatt Macy 
8427eda14cbcSMatt Macy 	list_t vd_trim_list;
8428eda14cbcSMatt Macy 	list_create(&vd_trim_list, sizeof (vdev_t),
8429eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_trim_node));
8430eda14cbcSMatt Macy 
8431eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
8432eda14cbcSMatt Macy 		if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
8433eda14cbcSMatt Macy 			mutex_enter(&vml[c]->vdev_initialize_lock);
8434eda14cbcSMatt Macy 			vdev_initialize_stop(vml[c],
8435eda14cbcSMatt Macy 			    VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
8436eda14cbcSMatt Macy 			mutex_exit(&vml[c]->vdev_initialize_lock);
8437eda14cbcSMatt Macy 
8438eda14cbcSMatt Macy 			mutex_enter(&vml[c]->vdev_trim_lock);
8439eda14cbcSMatt Macy 			vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
8440eda14cbcSMatt Macy 			mutex_exit(&vml[c]->vdev_trim_lock);
8441eda14cbcSMatt Macy 		}
8442eda14cbcSMatt Macy 	}
8443eda14cbcSMatt Macy 
8444eda14cbcSMatt Macy 	vdev_initialize_stop_wait(spa, &vd_initialize_list);
8445eda14cbcSMatt Macy 	vdev_trim_stop_wait(spa, &vd_trim_list);
8446eda14cbcSMatt Macy 
8447eda14cbcSMatt Macy 	list_destroy(&vd_initialize_list);
8448eda14cbcSMatt Macy 	list_destroy(&vd_trim_list);
8449eda14cbcSMatt Macy 
8450eda14cbcSMatt Macy 	newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
8451eda14cbcSMatt Macy 	newspa->spa_is_splitting = B_TRUE;
8452eda14cbcSMatt Macy 
8453eda14cbcSMatt Macy 	/* create the new pool from the disks of the original pool */
8454eda14cbcSMatt Macy 	error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
8455eda14cbcSMatt Macy 	if (error)
8456eda14cbcSMatt Macy 		goto out;
8457eda14cbcSMatt Macy 
8458eda14cbcSMatt Macy 	/* if that worked, generate a real config for the new pool */
8459eda14cbcSMatt Macy 	if (newspa->spa_root_vdev != NULL) {
8460eda14cbcSMatt Macy 		newspa->spa_config_splitting = fnvlist_alloc();
8461eda14cbcSMatt Macy 		fnvlist_add_uint64(newspa->spa_config_splitting,
8462eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa));
8463eda14cbcSMatt Macy 		spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
8464eda14cbcSMatt Macy 		    B_TRUE));
8465eda14cbcSMatt Macy 	}
8466eda14cbcSMatt Macy 
8467eda14cbcSMatt Macy 	/* set the props */
8468eda14cbcSMatt Macy 	if (props != NULL) {
8469eda14cbcSMatt Macy 		spa_configfile_set(newspa, props, B_FALSE);
8470eda14cbcSMatt Macy 		error = spa_prop_set(newspa, props);
8471eda14cbcSMatt Macy 		if (error)
8472eda14cbcSMatt Macy 			goto out;
8473eda14cbcSMatt Macy 	}
8474eda14cbcSMatt Macy 
8475eda14cbcSMatt Macy 	/* flush everything */
8476eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(newspa);
8477eda14cbcSMatt Macy 	vdev_config_dirty(newspa->spa_root_vdev);
8478eda14cbcSMatt Macy 	(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
8479eda14cbcSMatt Macy 
8480eda14cbcSMatt Macy 	if (zio_injection_enabled)
8481eda14cbcSMatt Macy 		zio_handle_panic_injection(spa, FTAG, 2);
8482eda14cbcSMatt Macy 
8483eda14cbcSMatt Macy 	spa_async_resume(newspa);
8484eda14cbcSMatt Macy 
8485eda14cbcSMatt Macy 	/* finally, update the original pool's config */
8486eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(spa);
8487eda14cbcSMatt Macy 	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
8488eda14cbcSMatt Macy 	error = dmu_tx_assign(tx, TXG_WAIT);
8489eda14cbcSMatt Macy 	if (error != 0)
8490eda14cbcSMatt Macy 		dmu_tx_abort(tx);
8491eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
8492eda14cbcSMatt Macy 		if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
8493eda14cbcSMatt Macy 			vdev_t *tvd = vml[c]->vdev_top;
8494eda14cbcSMatt Macy 
8495eda14cbcSMatt Macy 			/*
8496eda14cbcSMatt Macy 			 * Need to be sure the detachable VDEV is not
8497eda14cbcSMatt Macy 			 * on any *other* txg's DTL list to prevent it
8498eda14cbcSMatt Macy 			 * from being accessed after it's freed.
8499eda14cbcSMatt Macy 			 */
8500eda14cbcSMatt Macy 			for (int t = 0; t < TXG_SIZE; t++) {
8501eda14cbcSMatt Macy 				(void) txg_list_remove_this(
8502eda14cbcSMatt Macy 				    &tvd->vdev_dtl_list, vml[c], t);
8503eda14cbcSMatt Macy 			}
8504eda14cbcSMatt Macy 
8505eda14cbcSMatt Macy 			vdev_split(vml[c]);
8506eda14cbcSMatt Macy 			if (error == 0)
8507eda14cbcSMatt Macy 				spa_history_log_internal(spa, "detach", tx,
8508eda14cbcSMatt Macy 				    "vdev=%s", vml[c]->vdev_path);
8509eda14cbcSMatt Macy 
8510eda14cbcSMatt Macy 			vdev_free(vml[c]);
8511eda14cbcSMatt Macy 		}
8512eda14cbcSMatt Macy 	}
8513eda14cbcSMatt Macy 	spa->spa_avz_action = AVZ_ACTION_REBUILD;
8514eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
8515eda14cbcSMatt Macy 	spa->spa_config_splitting = NULL;
8516eda14cbcSMatt Macy 	nvlist_free(nvl);
8517eda14cbcSMatt Macy 	if (error == 0)
8518eda14cbcSMatt Macy 		dmu_tx_commit(tx);
8519eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, NULL, txg, 0);
8520eda14cbcSMatt Macy 
8521eda14cbcSMatt Macy 	if (zio_injection_enabled)
8522eda14cbcSMatt Macy 		zio_handle_panic_injection(spa, FTAG, 3);
8523eda14cbcSMatt Macy 
8524eda14cbcSMatt Macy 	/* split is complete; log a history record */
8525eda14cbcSMatt Macy 	spa_history_log_internal(newspa, "split", NULL,
8526eda14cbcSMatt Macy 	    "from pool %s", spa_name(spa));
8527eda14cbcSMatt Macy 
8528eda14cbcSMatt Macy 	newspa->spa_is_splitting = B_FALSE;
8529eda14cbcSMatt Macy 	kmem_free(vml, children * sizeof (vdev_t *));
8530eda14cbcSMatt Macy 
8531eda14cbcSMatt Macy 	/* if we're not going to mount the filesystems in userland, export */
8532eda14cbcSMatt Macy 	if (exp)
8533eda14cbcSMatt Macy 		error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
8534eda14cbcSMatt Macy 		    B_FALSE, B_FALSE);
8535eda14cbcSMatt Macy 
8536eda14cbcSMatt Macy 	return (error);
8537eda14cbcSMatt Macy 
8538eda14cbcSMatt Macy out:
8539eda14cbcSMatt Macy 	spa_unload(newspa);
8540eda14cbcSMatt Macy 	spa_deactivate(newspa);
8541eda14cbcSMatt Macy 	spa_remove(newspa);
8542eda14cbcSMatt Macy 
8543eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(spa);
8544eda14cbcSMatt Macy 
8545eda14cbcSMatt Macy 	/* re-online all offlined disks */
8546eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
8547eda14cbcSMatt Macy 		if (vml[c] != NULL)
8548eda14cbcSMatt Macy 			vml[c]->vdev_offline = B_FALSE;
8549eda14cbcSMatt Macy 	}
8550eda14cbcSMatt Macy 
8551eda14cbcSMatt Macy 	/* restart initializing or trimming disks as necessary */
8552eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
8553eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
8554eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
8555eda14cbcSMatt Macy 
8556eda14cbcSMatt Macy 	vdev_reopen(spa->spa_root_vdev);
8557eda14cbcSMatt Macy 
8558eda14cbcSMatt Macy 	nvlist_free(spa->spa_config_splitting);
8559eda14cbcSMatt Macy 	spa->spa_config_splitting = NULL;
8560eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, NULL, txg, error);
8561eda14cbcSMatt Macy 
8562eda14cbcSMatt Macy 	kmem_free(vml, children * sizeof (vdev_t *));
8563eda14cbcSMatt Macy 	return (error);
8564eda14cbcSMatt Macy }
8565eda14cbcSMatt Macy 
8566eda14cbcSMatt Macy /*
8567eda14cbcSMatt Macy  * Find any device that's done replacing, or a vdev marked 'unspare' that's
8568eda14cbcSMatt Macy  * currently spared, so we can detach it.
8569eda14cbcSMatt Macy  */
8570eda14cbcSMatt Macy static vdev_t *
8571eda14cbcSMatt Macy spa_vdev_resilver_done_hunt(vdev_t *vd)
8572eda14cbcSMatt Macy {
8573eda14cbcSMatt Macy 	vdev_t *newvd, *oldvd;
8574eda14cbcSMatt Macy 
8575eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
8576eda14cbcSMatt Macy 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
8577eda14cbcSMatt Macy 		if (oldvd != NULL)
8578eda14cbcSMatt Macy 			return (oldvd);
8579eda14cbcSMatt Macy 	}
8580eda14cbcSMatt Macy 
8581eda14cbcSMatt Macy 	/*
8582eda14cbcSMatt Macy 	 * Check for a completed replacement.  We always consider the first
8583eda14cbcSMatt Macy 	 * vdev in the list to be the oldest vdev, and the last one to be
8584eda14cbcSMatt Macy 	 * the newest (see spa_vdev_attach() for how that works).  In
8585eda14cbcSMatt Macy 	 * the case where the newest vdev is faulted, we will not automatically
8586eda14cbcSMatt Macy 	 * remove it after a resilver completes.  This is OK as it will require
8587eda14cbcSMatt Macy 	 * user intervention to determine which disk the admin wishes to keep.
8588eda14cbcSMatt Macy 	 */
8589eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_replacing_ops) {
8590eda14cbcSMatt Macy 		ASSERT(vd->vdev_children > 1);
8591eda14cbcSMatt Macy 
8592eda14cbcSMatt Macy 		newvd = vd->vdev_child[vd->vdev_children - 1];
8593eda14cbcSMatt Macy 		oldvd = vd->vdev_child[0];
8594eda14cbcSMatt Macy 
8595eda14cbcSMatt Macy 		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
8596eda14cbcSMatt Macy 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
8597eda14cbcSMatt Macy 		    !vdev_dtl_required(oldvd))
8598eda14cbcSMatt Macy 			return (oldvd);
8599eda14cbcSMatt Macy 	}
8600eda14cbcSMatt Macy 
8601eda14cbcSMatt Macy 	/*
8602eda14cbcSMatt Macy 	 * Check for a completed resilver with the 'unspare' flag set.
8603eda14cbcSMatt Macy 	 * Also potentially update faulted state.
8604eda14cbcSMatt Macy 	 */
8605eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_spare_ops) {
8606eda14cbcSMatt Macy 		vdev_t *first = vd->vdev_child[0];
8607eda14cbcSMatt Macy 		vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
8608eda14cbcSMatt Macy 
8609eda14cbcSMatt Macy 		if (last->vdev_unspare) {
8610eda14cbcSMatt Macy 			oldvd = first;
8611eda14cbcSMatt Macy 			newvd = last;
8612eda14cbcSMatt Macy 		} else if (first->vdev_unspare) {
8613eda14cbcSMatt Macy 			oldvd = last;
8614eda14cbcSMatt Macy 			newvd = first;
8615eda14cbcSMatt Macy 		} else {
8616eda14cbcSMatt Macy 			oldvd = NULL;
8617eda14cbcSMatt Macy 		}
8618eda14cbcSMatt Macy 
8619eda14cbcSMatt Macy 		if (oldvd != NULL &&
8620eda14cbcSMatt Macy 		    vdev_dtl_empty(newvd, DTL_MISSING) &&
8621eda14cbcSMatt Macy 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
8622eda14cbcSMatt Macy 		    !vdev_dtl_required(oldvd))
8623eda14cbcSMatt Macy 			return (oldvd);
8624eda14cbcSMatt Macy 
8625eda14cbcSMatt Macy 		vdev_propagate_state(vd);
8626eda14cbcSMatt Macy 
8627eda14cbcSMatt Macy 		/*
8628eda14cbcSMatt Macy 		 * If there are more than two spares attached to a disk,
8629eda14cbcSMatt Macy 		 * and those spares are not required, then we want to
8630eda14cbcSMatt Macy 		 * attempt to free them up now so that they can be used
8631eda14cbcSMatt Macy 		 * by other pools.  Once we're back down to a single
8632eda14cbcSMatt Macy 		 * disk+spare, we stop removing them.
8633eda14cbcSMatt Macy 		 */
8634eda14cbcSMatt Macy 		if (vd->vdev_children > 2) {
8635eda14cbcSMatt Macy 			newvd = vd->vdev_child[1];
8636eda14cbcSMatt Macy 
8637eda14cbcSMatt Macy 			if (newvd->vdev_isspare && last->vdev_isspare &&
8638eda14cbcSMatt Macy 			    vdev_dtl_empty(last, DTL_MISSING) &&
8639eda14cbcSMatt Macy 			    vdev_dtl_empty(last, DTL_OUTAGE) &&
8640eda14cbcSMatt Macy 			    !vdev_dtl_required(newvd))
8641eda14cbcSMatt Macy 				return (newvd);
8642eda14cbcSMatt Macy 		}
8643eda14cbcSMatt Macy 	}
8644eda14cbcSMatt Macy 
8645eda14cbcSMatt Macy 	return (NULL);
8646eda14cbcSMatt Macy }
8647eda14cbcSMatt Macy 
8648eda14cbcSMatt Macy static void
8649eda14cbcSMatt Macy spa_vdev_resilver_done(spa_t *spa)
8650eda14cbcSMatt Macy {
8651eda14cbcSMatt Macy 	vdev_t *vd, *pvd, *ppvd;
8652eda14cbcSMatt Macy 	uint64_t guid, sguid, pguid, ppguid;
8653eda14cbcSMatt Macy 
8654eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
8655eda14cbcSMatt Macy 
8656eda14cbcSMatt Macy 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
8657eda14cbcSMatt Macy 		pvd = vd->vdev_parent;
8658eda14cbcSMatt Macy 		ppvd = pvd->vdev_parent;
8659eda14cbcSMatt Macy 		guid = vd->vdev_guid;
8660eda14cbcSMatt Macy 		pguid = pvd->vdev_guid;
8661eda14cbcSMatt Macy 		ppguid = ppvd->vdev_guid;
8662eda14cbcSMatt Macy 		sguid = 0;
8663eda14cbcSMatt Macy 		/*
8664eda14cbcSMatt Macy 		 * If we have just finished replacing a hot spared device, then
8665eda14cbcSMatt Macy 		 * we need to detach the parent's first child (the original hot
8666eda14cbcSMatt Macy 		 * spare) as well.
8667eda14cbcSMatt Macy 		 */
8668eda14cbcSMatt Macy 		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
8669eda14cbcSMatt Macy 		    ppvd->vdev_children == 2) {
8670eda14cbcSMatt Macy 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
8671eda14cbcSMatt Macy 			sguid = ppvd->vdev_child[1]->vdev_guid;
8672c0a83fe0SMartin Matuska 		}
8673eda14cbcSMatt Macy 		ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
8674eda14cbcSMatt Macy 
8675eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
8676eda14cbcSMatt Macy 		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
8677eda14cbcSMatt Macy 			return;
8678eda14cbcSMatt Macy 		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
8679eda14cbcSMatt Macy 			return;
8680eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
8681eda14cbcSMatt Macy 	}
8682eda14cbcSMatt Macy 
8683eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
8684eda14cbcSMatt Macy 
8685eda14cbcSMatt Macy 	/*
8686eda14cbcSMatt Macy 	 * If a detach was not performed above replace waiters will not have
8687eda14cbcSMatt Macy 	 * been notified.  In which case we must do so now.
8688eda14cbcSMatt Macy 	 */
8689eda14cbcSMatt Macy 	spa_notify_waiters(spa);
8690eda14cbcSMatt Macy }
8691eda14cbcSMatt Macy 
8692eda14cbcSMatt Macy /*
8693eda14cbcSMatt Macy  * Update the stored path or FRU for this vdev.
8694eda14cbcSMatt Macy  */
8695eda14cbcSMatt Macy static int
8696eda14cbcSMatt Macy spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
8697eda14cbcSMatt Macy     boolean_t ispath)
8698c0a83fe0SMartin Matuska {
8699c0a83fe0SMartin Matuska 	vdev_t *vd;
8700c0a83fe0SMartin Matuska 	boolean_t sync = B_FALSE;
8701c0a83fe0SMartin Matuska 
8702eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
8703eda14cbcSMatt Macy 
8704eda14cbcSMatt Macy 	spa_vdev_state_enter(spa, SCL_ALL);
8705eda14cbcSMatt Macy 
8706eda14cbcSMatt Macy 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
8707eda14cbcSMatt Macy 		return (spa_vdev_state_exit(spa, NULL, ENOENT));
8708eda14cbcSMatt Macy 
8709eda14cbcSMatt Macy 	if (!vd->vdev_ops->vdev_op_leaf)
8710eda14cbcSMatt Macy 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
8711eda14cbcSMatt Macy 
8712eda14cbcSMatt Macy 	if (ispath) {
8713eda14cbcSMatt Macy 		if (strcmp(value, vd->vdev_path) != 0) {
8714eda14cbcSMatt Macy 			spa_strfree(vd->vdev_path);
8715eda14cbcSMatt Macy 			vd->vdev_path = spa_strdup(value);
8716eda14cbcSMatt Macy 			sync = B_TRUE;
8717eda14cbcSMatt Macy 		}
8718eda14cbcSMatt Macy 	} else {
8719eda14cbcSMatt Macy 		if (vd->vdev_fru == NULL) {
8720eda14cbcSMatt Macy 			vd->vdev_fru = spa_strdup(value);
8721eda14cbcSMatt Macy 			sync = B_TRUE;
8722eda14cbcSMatt Macy 		} else if (strcmp(value, vd->vdev_fru) != 0) {
8723eda14cbcSMatt Macy 			spa_strfree(vd->vdev_fru);
8724eda14cbcSMatt Macy 			vd->vdev_fru = spa_strdup(value);
8725eda14cbcSMatt Macy 			sync = B_TRUE;
8726eda14cbcSMatt Macy 		}
8727eda14cbcSMatt Macy 	}
8728eda14cbcSMatt Macy 
8729eda14cbcSMatt Macy 	return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
87307877fdebSMatt Macy }
87317877fdebSMatt Macy 
87327877fdebSMatt Macy int
8733eda14cbcSMatt Macy spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
8734eda14cbcSMatt Macy {
8735eda14cbcSMatt Macy 	return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
8736eda14cbcSMatt Macy }
8737eda14cbcSMatt Macy 
8738eda14cbcSMatt Macy int
8739eda14cbcSMatt Macy spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
8740eda14cbcSMatt Macy {
8741eda14cbcSMatt Macy 	return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
8742eda14cbcSMatt Macy }
8743eda14cbcSMatt Macy 
8744eda14cbcSMatt Macy /*
8745eda14cbcSMatt Macy  * ==========================================================================
8746eda14cbcSMatt Macy  * SPA Scanning
8747eda14cbcSMatt Macy  * ==========================================================================
8748eda14cbcSMatt Macy  */
8749eda14cbcSMatt Macy int
8750eda14cbcSMatt Macy spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
8751eda14cbcSMatt Macy {
8752eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
8753eda14cbcSMatt Macy 
8754eda14cbcSMatt Macy 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
8755eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
8756eda14cbcSMatt Macy 
8757eda14cbcSMatt Macy 	return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
8758eda14cbcSMatt Macy }
8759eda14cbcSMatt Macy 
8760eda14cbcSMatt Macy int
8761eda14cbcSMatt Macy spa_scan_stop(spa_t *spa)
8762eda14cbcSMatt Macy {
8763eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
8764eda14cbcSMatt Macy 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
8765eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
8766eda14cbcSMatt Macy 
8767eda14cbcSMatt Macy 	return (dsl_scan_cancel(spa->spa_dsl_pool));
8768da5137abSMartin Matuska }
8769eda14cbcSMatt Macy 
8770eda14cbcSMatt Macy int
8771eda14cbcSMatt Macy spa_scan(spa_t *spa, pool_scan_func_t func)
8772eda14cbcSMatt Macy {
8773eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
8774eda14cbcSMatt Macy 
8775eda14cbcSMatt Macy 	if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
8776eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
8777eda14cbcSMatt Macy 
8778eda14cbcSMatt Macy 	if (func == POOL_SCAN_RESILVER &&
8779eda14cbcSMatt Macy 	    !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
8780eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
8781eda14cbcSMatt Macy 
8782eda14cbcSMatt Macy 	/*
8783eda14cbcSMatt Macy 	 * If a resilver was requested, but there is no DTL on a
8784eda14cbcSMatt Macy 	 * writeable leaf device, we have nothing to do.
8785eda14cbcSMatt Macy 	 */
8786eda14cbcSMatt Macy 	if (func == POOL_SCAN_RESILVER &&
8787eda14cbcSMatt Macy 	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
8788eda14cbcSMatt Macy 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
8789eda14cbcSMatt Macy 		return (0);
8790eda14cbcSMatt Macy 	}
8791eda14cbcSMatt Macy 
8792184c1b94SMartin Matuska 	if (func == POOL_SCAN_ERRORSCRUB &&
8793184c1b94SMartin Matuska 	    !spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG))
8794eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
8795eda14cbcSMatt Macy 
8796eda14cbcSMatt Macy 	return (dsl_scan(spa->spa_dsl_pool, func));
8797eda14cbcSMatt Macy }
8798eda14cbcSMatt Macy 
8799eda14cbcSMatt Macy /*
8800184c1b94SMartin Matuska  * ==========================================================================
8801184c1b94SMartin Matuska  * SPA async task processing
8802eda14cbcSMatt Macy  * ==========================================================================
8803eda14cbcSMatt Macy  */
8804eda14cbcSMatt Macy 
8805eda14cbcSMatt Macy static void
8806eda14cbcSMatt Macy spa_async_remove(spa_t *spa, vdev_t *vd)
8807eda14cbcSMatt Macy {
8808eda14cbcSMatt Macy 	if (vd->vdev_remove_wanted) {
8809eda14cbcSMatt Macy 		vd->vdev_remove_wanted = B_FALSE;
8810eda14cbcSMatt Macy 		vd->vdev_delayed_close = B_FALSE;
8811eda14cbcSMatt Macy 		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
8812eda14cbcSMatt Macy 
8813eda14cbcSMatt Macy 		/*
8814eda14cbcSMatt Macy 		 * We want to clear the stats, but we don't want to do a full
8815eda14cbcSMatt Macy 		 * vdev_clear() as that will cause us to throw away
8816eda14cbcSMatt Macy 		 * degraded/faulted state as well as attempt to reopen the
8817eda14cbcSMatt Macy 		 * device, all of which is a waste.
8818eda14cbcSMatt Macy 		 */
8819eda14cbcSMatt Macy 		vd->vdev_stat.vs_read_errors = 0;
8820eda14cbcSMatt Macy 		vd->vdev_stat.vs_write_errors = 0;
8821eda14cbcSMatt Macy 		vd->vdev_stat.vs_checksum_errors = 0;
8822eda14cbcSMatt Macy 
8823eda14cbcSMatt Macy 		vdev_state_dirty(vd->vdev_top);
8824eda14cbcSMatt Macy 
8825eda14cbcSMatt Macy 		/* Tell userspace that the vdev is gone. */
8826eda14cbcSMatt Macy 		zfs_post_remove(spa, vd);
8827eda14cbcSMatt Macy 	}
8828eda14cbcSMatt Macy 
8829eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++)
8830eda14cbcSMatt Macy 		spa_async_remove(spa, vd->vdev_child[c]);
8831eda14cbcSMatt Macy }
8832eda14cbcSMatt Macy 
8833eda14cbcSMatt Macy static void
8834eda14cbcSMatt Macy spa_async_fault_vdev(spa_t *spa, vdev_t *vd)
8835eda14cbcSMatt Macy {
8836eda14cbcSMatt Macy 	if (vd->vdev_fault_wanted) {
8837eda14cbcSMatt Macy 		vd->vdev_fault_wanted = B_FALSE;
8838eda14cbcSMatt Macy 		vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
8839eda14cbcSMatt Macy 		    VDEV_AUX_ERR_EXCEEDED);
8840eda14cbcSMatt Macy 	}
8841eda14cbcSMatt Macy 
8842eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++)
8843eda14cbcSMatt Macy 		spa_async_fault_vdev(spa, vd->vdev_child[c]);
8844eda14cbcSMatt Macy }
8845eda14cbcSMatt Macy 
8846eda14cbcSMatt Macy static void
88477877fdebSMatt Macy spa_async_autoexpand(spa_t *spa, vdev_t *vd)
8848d411c1d6SMartin Matuska {
8849d411c1d6SMartin Matuska 	if (!spa->spa_autoexpand)
8850eda14cbcSMatt Macy 		return;
8851eda14cbcSMatt Macy 
8852eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
8853eda14cbcSMatt Macy 		vdev_t *cvd = vd->vdev_child[c];
8854eda14cbcSMatt Macy 		spa_async_autoexpand(spa, cvd);
8855eda14cbcSMatt Macy 	}
8856eda14cbcSMatt Macy 
8857eda14cbcSMatt Macy 	if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
8858eda14cbcSMatt Macy 		return;
8859eda14cbcSMatt Macy 
8860eda14cbcSMatt Macy 	spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
8861eda14cbcSMatt Macy }
8862eda14cbcSMatt Macy 
8863eda14cbcSMatt Macy static __attribute__((noreturn)) void
8864eda14cbcSMatt Macy spa_async_thread(void *arg)
8865eda14cbcSMatt Macy {
8866eda14cbcSMatt Macy 	spa_t *spa = (spa_t *)arg;
8867eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
8868eda14cbcSMatt Macy 	int tasks;
8869eda14cbcSMatt Macy 
8870eda14cbcSMatt Macy 	ASSERT(spa->spa_sync_on);
8871eda14cbcSMatt Macy 
8872eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
8873eda14cbcSMatt Macy 	tasks = spa->spa_async_tasks;
8874eda14cbcSMatt Macy 	spa->spa_async_tasks = 0;
8875eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
8876eda14cbcSMatt Macy 
8877eda14cbcSMatt Macy 	/*
8878eda14cbcSMatt Macy 	 * See if the config needs to be updated.
8879eda14cbcSMatt Macy 	 */
8880eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
8881eda14cbcSMatt Macy 		uint64_t old_space, new_space;
8882eda14cbcSMatt Macy 
8883eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8884eda14cbcSMatt Macy 		old_space = metaslab_class_get_space(spa_normal_class(spa));
8885eda14cbcSMatt Macy 		old_space += metaslab_class_get_space(spa_special_class(spa));
8886eda14cbcSMatt Macy 		old_space += metaslab_class_get_space(spa_dedup_class(spa));
8887eda14cbcSMatt Macy 		old_space += metaslab_class_get_space(
8888eda14cbcSMatt Macy 		    spa_embedded_log_class(spa));
8889eda14cbcSMatt Macy 
8890eda14cbcSMatt Macy 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
8891eda14cbcSMatt Macy 
8892eda14cbcSMatt Macy 		new_space = metaslab_class_get_space(spa_normal_class(spa));
8893eda14cbcSMatt Macy 		new_space += metaslab_class_get_space(spa_special_class(spa));
8894eda14cbcSMatt Macy 		new_space += metaslab_class_get_space(spa_dedup_class(spa));
8895eda14cbcSMatt Macy 		new_space += metaslab_class_get_space(
8896eda14cbcSMatt Macy 		    spa_embedded_log_class(spa));
8897eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8898eda14cbcSMatt Macy 
8899eda14cbcSMatt Macy 		/*
8900eda14cbcSMatt Macy 		 * If the pool grew as a result of the config update,
8901eda14cbcSMatt Macy 		 * then log an internal history event.
8902eda14cbcSMatt Macy 		 */
8903eda14cbcSMatt Macy 		if (new_space != old_space) {
8904eda14cbcSMatt Macy 			spa_history_log_internal(spa, "vdev online", NULL,
8905eda14cbcSMatt Macy 			    "pool '%s' size: %llu(+%llu)",
8906eda14cbcSMatt Macy 			    spa_name(spa), (u_longlong_t)new_space,
8907eda14cbcSMatt Macy 			    (u_longlong_t)(new_space - old_space));
8908eda14cbcSMatt Macy 		}
8909eda14cbcSMatt Macy 	}
8910eda14cbcSMatt Macy 
8911eda14cbcSMatt Macy 	/*
8912eda14cbcSMatt Macy 	 * See if any devices need to be marked REMOVED.
8913eda14cbcSMatt Macy 	 */
8914eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_REMOVE) {
8915eda14cbcSMatt Macy 		spa_vdev_state_enter(spa, SCL_NONE);
8916eda14cbcSMatt Macy 		spa_async_remove(spa, spa->spa_root_vdev);
8917eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
8918eda14cbcSMatt Macy 			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
8919eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
8920eda14cbcSMatt Macy 			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
8921eda14cbcSMatt Macy 		(void) spa_vdev_state_exit(spa, NULL, 0);
8922eda14cbcSMatt Macy 	}
8923eda14cbcSMatt Macy 
8924eda14cbcSMatt Macy 	if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
8925eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8926eda14cbcSMatt Macy 		spa_async_autoexpand(spa, spa->spa_root_vdev);
8927eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8928eda14cbcSMatt Macy 	}
8929eda14cbcSMatt Macy 
8930eda14cbcSMatt Macy 	/*
8931eda14cbcSMatt Macy 	 * See if any devices need to be marked faulted.
8932eda14cbcSMatt Macy 	 */
8933e716630dSMartin Matuska 	if (tasks & SPA_ASYNC_FAULT_VDEV) {
8934e716630dSMartin Matuska 		spa_vdev_state_enter(spa, SCL_NONE);
8935e716630dSMartin Matuska 		spa_async_fault_vdev(spa, spa->spa_root_vdev);
8936e716630dSMartin Matuska 		(void) spa_vdev_state_exit(spa, NULL, 0);
8937eda14cbcSMatt Macy 	}
8938eda14cbcSMatt Macy 
8939eda14cbcSMatt Macy 	/*
8940eda14cbcSMatt Macy 	 * If any devices are done replacing, detach them.
8941eda14cbcSMatt Macy 	 */
8942eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_RESILVER_DONE ||
8943eda14cbcSMatt Macy 	    tasks & SPA_ASYNC_REBUILD_DONE ||
8944eda14cbcSMatt Macy 	    tasks & SPA_ASYNC_DETACH_SPARE) {
8945eda14cbcSMatt Macy 		spa_vdev_resilver_done(spa);
8946eda14cbcSMatt Macy 	}
8947eda14cbcSMatt Macy 
8948eda14cbcSMatt Macy 	/*
8949eda14cbcSMatt Macy 	 * Kick off a resilver.
8950eda14cbcSMatt Macy 	 */
8951eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_RESILVER &&
8952eda14cbcSMatt Macy 	    !vdev_rebuild_active(spa->spa_root_vdev) &&
8953eda14cbcSMatt Macy 	    (!dsl_scan_resilvering(dp) ||
8954eda14cbcSMatt Macy 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
8955eda14cbcSMatt Macy 		dsl_scan_restart_resilver(dp, 0);
8956eda14cbcSMatt Macy 
8957eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
8958eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8959eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8960eda14cbcSMatt Macy 		vdev_initialize_restart(spa->spa_root_vdev);
8961eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8962eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8963e716630dSMartin Matuska 	}
8964e716630dSMartin Matuska 
8965e716630dSMartin Matuska 	if (tasks & SPA_ASYNC_TRIM_RESTART) {
8966e716630dSMartin Matuska 		mutex_enter(&spa_namespace_lock);
8967eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8968eda14cbcSMatt Macy 		vdev_trim_restart(spa->spa_root_vdev);
8969eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8970eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8971eda14cbcSMatt Macy 	}
8972eda14cbcSMatt Macy 
8973eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
8974eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8975eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8976eda14cbcSMatt Macy 		vdev_autotrim_restart(spa);
8977eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8978eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8979eda14cbcSMatt Macy 	}
8980eda14cbcSMatt Macy 
8981eda14cbcSMatt Macy 	/*
8982eda14cbcSMatt Macy 	 * Kick off L2 cache whole device TRIM.
8983eda14cbcSMatt Macy 	 */
8984eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
8985eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8986eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8987eda14cbcSMatt Macy 		vdev_trim_l2arc(spa);
8988eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8989eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8990eda14cbcSMatt Macy 	}
8991eda14cbcSMatt Macy 
8992eda14cbcSMatt Macy 	/*
8993eda14cbcSMatt Macy 	 * Kick off L2 cache rebuilding.
8994eda14cbcSMatt Macy 	 */
8995eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
8996eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8997eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
8998eda14cbcSMatt Macy 		l2arc_spa_rebuild_start(spa);
8999eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_L2ARC, FTAG);
9000eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
9001eda14cbcSMatt Macy 	}
9002eda14cbcSMatt Macy 
9003eda14cbcSMatt Macy 	/*
9004eda14cbcSMatt Macy 	 * Let the world know that we're done.
9005eda14cbcSMatt Macy 	 */
9006eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9007eda14cbcSMatt Macy 	spa->spa_async_thread = NULL;
9008eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_async_cv);
9009eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9010eda14cbcSMatt Macy 	thread_exit();
9011eda14cbcSMatt Macy }
9012eda14cbcSMatt Macy 
9013eda14cbcSMatt Macy void
9014eda14cbcSMatt Macy spa_async_suspend(spa_t *spa)
9015eda14cbcSMatt Macy {
9016eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9017eda14cbcSMatt Macy 	spa->spa_async_suspended++;
9018eda14cbcSMatt Macy 	while (spa->spa_async_thread != NULL)
9019eda14cbcSMatt Macy 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
9020eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9021eda14cbcSMatt Macy 
9022eda14cbcSMatt Macy 	spa_vdev_remove_suspend(spa);
9023eda14cbcSMatt Macy 
9024eda14cbcSMatt Macy 	zthr_t *condense_thread = spa->spa_condense_zthr;
9025eda14cbcSMatt Macy 	if (condense_thread != NULL)
9026eda14cbcSMatt Macy 		zthr_cancel(condense_thread);
9027eda14cbcSMatt Macy 
9028eda14cbcSMatt Macy 	zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
9029eda14cbcSMatt Macy 	if (raidz_expand_thread != NULL)
9030eda14cbcSMatt Macy 		zthr_cancel(raidz_expand_thread);
9031eda14cbcSMatt Macy 
9032eda14cbcSMatt Macy 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
9033eda14cbcSMatt Macy 	if (discard_thread != NULL)
9034eda14cbcSMatt Macy 		zthr_cancel(discard_thread);
9035eda14cbcSMatt Macy 
9036eda14cbcSMatt Macy 	zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
9037eda14cbcSMatt Macy 	if (ll_delete_thread != NULL)
9038eda14cbcSMatt Macy 		zthr_cancel(ll_delete_thread);
9039eda14cbcSMatt Macy 
9040eda14cbcSMatt Macy 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
9041eda14cbcSMatt Macy 	if (ll_condense_thread != NULL)
9042eda14cbcSMatt Macy 		zthr_cancel(ll_condense_thread);
9043eda14cbcSMatt Macy }
9044eda14cbcSMatt Macy 
9045eda14cbcSMatt Macy void
9046eda14cbcSMatt Macy spa_async_resume(spa_t *spa)
9047eda14cbcSMatt Macy {
9048eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9049eda14cbcSMatt Macy 	ASSERT(spa->spa_async_suspended != 0);
9050eda14cbcSMatt Macy 	spa->spa_async_suspended--;
9051eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9052eda14cbcSMatt Macy 	spa_restart_removal(spa);
9053eda14cbcSMatt Macy 
9054eda14cbcSMatt Macy 	zthr_t *condense_thread = spa->spa_condense_zthr;
9055eda14cbcSMatt Macy 	if (condense_thread != NULL)
9056eda14cbcSMatt Macy 		zthr_resume(condense_thread);
9057eda14cbcSMatt Macy 
9058eda14cbcSMatt Macy 	zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
9059eda14cbcSMatt Macy 	if (raidz_expand_thread != NULL)
9060eda14cbcSMatt Macy 		zthr_resume(raidz_expand_thread);
9061eda14cbcSMatt Macy 
9062eda14cbcSMatt Macy 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
9063eda14cbcSMatt Macy 	if (discard_thread != NULL)
9064eda14cbcSMatt Macy 		zthr_resume(discard_thread);
9065eda14cbcSMatt Macy 
9066eda14cbcSMatt Macy 	zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
9067eda14cbcSMatt Macy 	if (ll_delete_thread != NULL)
9068eda14cbcSMatt Macy 		zthr_resume(ll_delete_thread);
9069eda14cbcSMatt Macy 
9070eda14cbcSMatt Macy 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
9071eda14cbcSMatt Macy 	if (ll_condense_thread != NULL)
9072eda14cbcSMatt Macy 		zthr_resume(ll_condense_thread);
9073eda14cbcSMatt Macy }
9074eda14cbcSMatt Macy 
9075eda14cbcSMatt Macy static boolean_t
9076eda14cbcSMatt Macy spa_async_tasks_pending(spa_t *spa)
9077eda14cbcSMatt Macy {
9078eda14cbcSMatt Macy 	uint_t non_config_tasks;
9079eda14cbcSMatt Macy 	uint_t config_task;
9080eda14cbcSMatt Macy 	boolean_t config_task_suspended;
9081eda14cbcSMatt Macy 
9082eda14cbcSMatt Macy 	non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
9083eda14cbcSMatt Macy 	config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
9084eda14cbcSMatt Macy 	if (spa->spa_ccw_fail_time == 0) {
9085eda14cbcSMatt Macy 		config_task_suspended = B_FALSE;
9086eda14cbcSMatt Macy 	} else {
9087eda14cbcSMatt Macy 		config_task_suspended =
9088eda14cbcSMatt Macy 		    (gethrtime() - spa->spa_ccw_fail_time) <
9089eda14cbcSMatt Macy 		    ((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
9090eda14cbcSMatt Macy 	}
9091eda14cbcSMatt Macy 
9092eda14cbcSMatt Macy 	return (non_config_tasks || (config_task && !config_task_suspended));
9093eda14cbcSMatt Macy }
9094eda14cbcSMatt Macy 
9095eda14cbcSMatt Macy static void
9096eda14cbcSMatt Macy spa_async_dispatch(spa_t *spa)
9097eda14cbcSMatt Macy {
9098eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9099eda14cbcSMatt Macy 	if (spa_async_tasks_pending(spa) &&
9100eda14cbcSMatt Macy 	    !spa->spa_async_suspended &&
9101eda14cbcSMatt Macy 	    spa->spa_async_thread == NULL)
9102eda14cbcSMatt Macy 		spa->spa_async_thread = thread_create(NULL, 0,
9103eda14cbcSMatt Macy 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
9104eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9105eda14cbcSMatt Macy }
9106eda14cbcSMatt Macy 
9107eda14cbcSMatt Macy void
9108eda14cbcSMatt Macy spa_async_request(spa_t *spa, int task)
9109eda14cbcSMatt Macy {
9110eda14cbcSMatt Macy 	zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
9111eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9112eda14cbcSMatt Macy 	spa->spa_async_tasks |= task;
9113eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9114eda14cbcSMatt Macy }
9115eda14cbcSMatt Macy 
9116eda14cbcSMatt Macy int
9117eda14cbcSMatt Macy spa_async_tasks(spa_t *spa)
9118eda14cbcSMatt Macy {
9119eda14cbcSMatt Macy 	return (spa->spa_async_tasks);
9120eda14cbcSMatt Macy }
9121eda14cbcSMatt Macy 
9122eda14cbcSMatt Macy /*
9123eda14cbcSMatt Macy  * ==========================================================================
9124eda14cbcSMatt Macy  * SPA syncing routines
9125eda14cbcSMatt Macy  * ==========================================================================
9126eda14cbcSMatt Macy  */
9127eda14cbcSMatt Macy 
9128eda14cbcSMatt Macy 
9129eda14cbcSMatt Macy static int
9130eda14cbcSMatt Macy bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
9131eda14cbcSMatt Macy     dmu_tx_t *tx)
9132eda14cbcSMatt Macy {
9133da5137abSMartin Matuska 	bpobj_t *bpo = arg;
9134eda14cbcSMatt Macy 	bpobj_enqueue(bpo, bp, bp_freed, tx);
9135eda14cbcSMatt Macy 	return (0);
9136eda14cbcSMatt Macy }
9137eda14cbcSMatt Macy 
9138eda14cbcSMatt Macy int
9139eda14cbcSMatt Macy bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
9140eda14cbcSMatt Macy {
9141eda14cbcSMatt Macy 	return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
9142eda14cbcSMatt Macy }
9143eda14cbcSMatt Macy 
9144eda14cbcSMatt Macy int
9145eda14cbcSMatt Macy bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
9146eda14cbcSMatt Macy {
9147eda14cbcSMatt Macy 	return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
9148eda14cbcSMatt Macy }
9149eda14cbcSMatt Macy 
9150eda14cbcSMatt Macy static int
9151eda14cbcSMatt Macy spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
9152eda14cbcSMatt Macy {
9153eda14cbcSMatt Macy 	zio_t *pio = arg;
9154eda14cbcSMatt Macy 
9155eda14cbcSMatt Macy 	zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
9156eda14cbcSMatt Macy 	    pio->io_flags));
9157eda14cbcSMatt Macy 	return (0);
9158eda14cbcSMatt Macy }
9159eda14cbcSMatt Macy 
9160eda14cbcSMatt Macy static int
9161eda14cbcSMatt Macy bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
9162eda14cbcSMatt Macy     dmu_tx_t *tx)
9163eda14cbcSMatt Macy {
9164eda14cbcSMatt Macy 	ASSERT(!bp_freed);
9165eda14cbcSMatt Macy 	return (spa_free_sync_cb(arg, bp, tx));
9166eda14cbcSMatt Macy }
9167eda14cbcSMatt Macy 
9168eda14cbcSMatt Macy /*
9169eda14cbcSMatt Macy  * Note: this simple function is not inlined to make it easier to dtrace the
917081b22a98SMartin Matuska  * amount of time spent syncing frees.
9171eda14cbcSMatt Macy  */
9172681ce946SMartin Matuska static void
9173681ce946SMartin Matuska spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
9174eda14cbcSMatt Macy {
9175eda14cbcSMatt Macy 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
9176eda14cbcSMatt Macy 	bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
9177eda14cbcSMatt Macy 	VERIFY(zio_wait(zio) == 0);
9178eda14cbcSMatt Macy }
9179681ce946SMartin Matuska 
9180681ce946SMartin Matuska /*
9181eda14cbcSMatt Macy  * Note: this simple function is not inlined to make it easier to dtrace the
9182eda14cbcSMatt Macy  * amount of time spent syncing deferred frees.
9183eda14cbcSMatt Macy  */
9184eda14cbcSMatt Macy static void
9185eda14cbcSMatt Macy spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
9186eda14cbcSMatt Macy {
9187eda14cbcSMatt Macy 	if (spa_sync_pass(spa) != 1)
9188eda14cbcSMatt Macy 		return;
9189eda14cbcSMatt Macy 
9190eda14cbcSMatt Macy 	/*
9191eda14cbcSMatt Macy 	 * Note:
9192eda14cbcSMatt Macy 	 * If the log space map feature is active, we stop deferring
9193eda14cbcSMatt Macy 	 * frees to the next TXG and therefore running this function
9194eda14cbcSMatt Macy 	 * would be considered a no-op as spa_deferred_bpobj should
9195eda14cbcSMatt Macy 	 * not have any entries.
9196eda14cbcSMatt Macy 	 *
9197eda14cbcSMatt Macy 	 * That said we run this function anyway (instead of returning
9198eda14cbcSMatt Macy 	 * immediately) for the edge-case scenario where we just
9199eda14cbcSMatt Macy 	 * activated the log space map feature in this TXG but we have
9200eda14cbcSMatt Macy 	 * deferred frees from the previous TXG.
9201d411c1d6SMartin Matuska 	 */
9202d411c1d6SMartin Matuska 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
9203d411c1d6SMartin Matuska 	VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
9204d411c1d6SMartin Matuska 	    bpobj_spa_free_sync_cb, zio, tx), ==, 0);
9205d411c1d6SMartin Matuska 	VERIFY0(zio_wait(zio));
9206eda14cbcSMatt Macy }
9207eda14cbcSMatt Macy 
9208eda14cbcSMatt Macy static void
9209eda14cbcSMatt Macy spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
9210eda14cbcSMatt Macy {
9211eda14cbcSMatt Macy 	char *packed = NULL;
9212eda14cbcSMatt Macy 	size_t bufsize;
9213eda14cbcSMatt Macy 	size_t nvsize = 0;
9214eda14cbcSMatt Macy 	dmu_buf_t *db;
9215eda14cbcSMatt Macy 
9216eda14cbcSMatt Macy 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
9217eda14cbcSMatt Macy 
9218eda14cbcSMatt Macy 	/*
9219eda14cbcSMatt Macy 	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
9220eda14cbcSMatt Macy 	 * information.  This avoids the dmu_buf_will_dirty() path and
9221eda14cbcSMatt Macy 	 * saves us a pre-read to get data we don't actually care about.
9222eda14cbcSMatt Macy 	 */
9223eda14cbcSMatt Macy 	bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
9224eda14cbcSMatt Macy 	packed = vmem_alloc(bufsize, KM_SLEEP);
9225eda14cbcSMatt Macy 
9226eda14cbcSMatt Macy 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
9227eda14cbcSMatt Macy 	    KM_SLEEP) == 0);
9228eda14cbcSMatt Macy 	memset(packed + nvsize, 0, bufsize - nvsize);
9229eda14cbcSMatt Macy 
9230eda14cbcSMatt Macy 	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
9231eda14cbcSMatt Macy 
9232eda14cbcSMatt Macy 	vmem_free(packed, bufsize);
9233eda14cbcSMatt Macy 
9234eda14cbcSMatt Macy 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
9235eda14cbcSMatt Macy 	dmu_buf_will_dirty(db, tx);
9236eda14cbcSMatt Macy 	*(uint64_t *)db->db_data = nvsize;
9237eda14cbcSMatt Macy 	dmu_buf_rele(db, FTAG);
9238eda14cbcSMatt Macy }
9239eda14cbcSMatt Macy 
9240eda14cbcSMatt Macy static void
9241eda14cbcSMatt Macy spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
9242eda14cbcSMatt Macy     const char *config, const char *entry)
9243eda14cbcSMatt Macy {
9244eda14cbcSMatt Macy 	nvlist_t *nvroot;
9245eda14cbcSMatt Macy 	nvlist_t **list;
9246eda14cbcSMatt Macy 	int i;
9247eda14cbcSMatt Macy 
9248eda14cbcSMatt Macy 	if (!sav->sav_sync)
9249eda14cbcSMatt Macy 		return;
9250eda14cbcSMatt Macy 
9251eda14cbcSMatt Macy 	/*
9252eda14cbcSMatt Macy 	 * Update the MOS nvlist describing the list of available devices.
9253eda14cbcSMatt Macy 	 * spa_validate_aux() will have already made sure this nvlist is
9254eda14cbcSMatt Macy 	 * valid and the vdevs are labeled appropriately.
9255eda14cbcSMatt Macy 	 */
9256eda14cbcSMatt Macy 	if (sav->sav_object == 0) {
9257eda14cbcSMatt Macy 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
9258eda14cbcSMatt Macy 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
9259eda14cbcSMatt Macy 		    sizeof (uint64_t), tx);
9260eda14cbcSMatt Macy 		VERIFY(zap_update(spa->spa_meta_objset,
9261eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
9262eda14cbcSMatt Macy 		    &sav->sav_object, tx) == 0);
9263eda14cbcSMatt Macy 	}
9264eda14cbcSMatt Macy 
9265eda14cbcSMatt Macy 	nvroot = fnvlist_alloc();
9266eda14cbcSMatt Macy 	if (sav->sav_count == 0) {
9267eda14cbcSMatt Macy 		fnvlist_add_nvlist_array(nvroot, config,
9268eda14cbcSMatt Macy 		    (const nvlist_t * const *)NULL, 0);
9269eda14cbcSMatt Macy 	} else {
9270eda14cbcSMatt Macy 		list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
9271eda14cbcSMatt Macy 		for (i = 0; i < sav->sav_count; i++)
9272eda14cbcSMatt Macy 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
9273eda14cbcSMatt Macy 			    B_FALSE, VDEV_CONFIG_L2CACHE);
9274eda14cbcSMatt Macy 		fnvlist_add_nvlist_array(nvroot, config,
9275eda14cbcSMatt Macy 		    (const nvlist_t * const *)list, sav->sav_count);
9276eda14cbcSMatt Macy 		for (i = 0; i < sav->sav_count; i++)
9277eda14cbcSMatt Macy 			nvlist_free(list[i]);
9278eda14cbcSMatt Macy 		kmem_free(list, sav->sav_count * sizeof (void *));
9279eda14cbcSMatt Macy 	}
9280eda14cbcSMatt Macy 
9281eda14cbcSMatt Macy 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
9282eda14cbcSMatt Macy 	nvlist_free(nvroot);
9283eda14cbcSMatt Macy 
9284eda14cbcSMatt Macy 	sav->sav_sync = B_FALSE;
9285eda14cbcSMatt Macy }
9286eda14cbcSMatt Macy 
9287eda14cbcSMatt Macy /*
9288eda14cbcSMatt Macy  * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
9289eda14cbcSMatt Macy  * The all-vdev ZAP must be empty.
9290eda14cbcSMatt Macy  */
9291eda14cbcSMatt Macy static void
9292eda14cbcSMatt Macy spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
9293eda14cbcSMatt Macy {
9294eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
9295eda14cbcSMatt Macy 
9296eda14cbcSMatt Macy 	if (vd->vdev_root_zap != 0 &&
9297eda14cbcSMatt Macy 	    spa_feature_is_active(spa, SPA_FEATURE_AVZ_V2)) {
9298eda14cbcSMatt Macy 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
9299eda14cbcSMatt Macy 		    vd->vdev_root_zap, tx));
9300eda14cbcSMatt Macy 	}
9301eda14cbcSMatt Macy 	if (vd->vdev_top_zap != 0) {
9302eda14cbcSMatt Macy 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
9303eda14cbcSMatt Macy 		    vd->vdev_top_zap, tx));
9304eda14cbcSMatt Macy 	}
9305eda14cbcSMatt Macy 	if (vd->vdev_leaf_zap != 0) {
9306eda14cbcSMatt Macy 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
9307eda14cbcSMatt Macy 		    vd->vdev_leaf_zap, tx));
9308eda14cbcSMatt Macy 	}
9309eda14cbcSMatt Macy 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
9310eda14cbcSMatt Macy 		spa_avz_build(vd->vdev_child[i], avz, tx);
9311eda14cbcSMatt Macy 	}
9312eda14cbcSMatt Macy }
9313eda14cbcSMatt Macy 
9314eda14cbcSMatt Macy static void
9315eda14cbcSMatt Macy spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
9316eda14cbcSMatt Macy {
9317eda14cbcSMatt Macy 	nvlist_t *config;
9318eda14cbcSMatt Macy 
9319eda14cbcSMatt Macy 	/*
9320eda14cbcSMatt Macy 	 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
9321eda14cbcSMatt Macy 	 * its config may not be dirty but we still need to build per-vdev ZAPs.
9322eda14cbcSMatt Macy 	 * Similarly, if the pool is being assembled (e.g. after a split), we
9323eda14cbcSMatt Macy 	 * need to rebuild the AVZ although the config may not be dirty.
9324eda14cbcSMatt Macy 	 */
9325eda14cbcSMatt Macy 	if (list_is_empty(&spa->spa_config_dirty_list) &&
9326eda14cbcSMatt Macy 	    spa->spa_avz_action == AVZ_ACTION_NONE)
9327eda14cbcSMatt Macy 		return;
9328eda14cbcSMatt Macy 
9329eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
9330eda14cbcSMatt Macy 
9331eda14cbcSMatt Macy 	ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
9332eda14cbcSMatt Macy 	    spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
9333eda14cbcSMatt Macy 	    spa->spa_all_vdev_zaps != 0);
9334eda14cbcSMatt Macy 
9335eda14cbcSMatt Macy 	if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
9336eda14cbcSMatt Macy 		/* Make and build the new AVZ */
9337eda14cbcSMatt Macy 		uint64_t new_avz = zap_create(spa->spa_meta_objset,
9338eda14cbcSMatt Macy 		    DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
9339eda14cbcSMatt Macy 		spa_avz_build(spa->spa_root_vdev, new_avz, tx);
9340eda14cbcSMatt Macy 
9341eda14cbcSMatt Macy 		/* Diff old AVZ with new one */
9342eda14cbcSMatt Macy 		zap_cursor_t zc;
9343eda14cbcSMatt Macy 		zap_attribute_t za;
9344eda14cbcSMatt Macy 
9345eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
9346eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps);
9347eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
9348eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
9349eda14cbcSMatt Macy 			uint64_t vdzap = za.za_first_integer;
9350eda14cbcSMatt Macy 			if (zap_lookup_int(spa->spa_meta_objset, new_avz,
9351eda14cbcSMatt Macy 			    vdzap) == ENOENT) {
9352eda14cbcSMatt Macy 				/*
9353eda14cbcSMatt Macy 				 * ZAP is listed in old AVZ but not in new one;
9354eda14cbcSMatt Macy 				 * destroy it
9355eda14cbcSMatt Macy 				 */
9356eda14cbcSMatt Macy 				VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
9357eda14cbcSMatt Macy 				    tx));
9358eda14cbcSMatt Macy 			}
9359eda14cbcSMatt Macy 		}
9360eda14cbcSMatt Macy 
9361eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
9362eda14cbcSMatt Macy 
9363eda14cbcSMatt Macy 		/* Destroy the old AVZ */
9364eda14cbcSMatt Macy 		VERIFY0(zap_destroy(spa->spa_meta_objset,
9365eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, tx));
93662a58b312SMartin Matuska 
9367eda14cbcSMatt Macy 		/* Replace the old AVZ in the dir obj with the new one */
9368eda14cbcSMatt Macy 		VERIFY0(zap_update(spa->spa_meta_objset,
9369c98ecfceSAllan Jude 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
9370eda14cbcSMatt Macy 		    sizeof (new_avz), 1, &new_avz, tx));
9371eda14cbcSMatt Macy 
9372eda14cbcSMatt Macy 		spa->spa_all_vdev_zaps = new_avz;
9373c98ecfceSAllan Jude 	} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
9374eda14cbcSMatt Macy 		zap_cursor_t zc;
9375eda14cbcSMatt Macy 		zap_attribute_t za;
9376eda14cbcSMatt Macy 
9377eda14cbcSMatt Macy 		/* Walk through the AVZ and destroy all listed ZAPs */
9378eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
9379eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps);
9380eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
9381eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
9382eda14cbcSMatt Macy 			uint64_t zap = za.za_first_integer;
9383eda14cbcSMatt Macy 			VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
9384eda14cbcSMatt Macy 		}
9385eda14cbcSMatt Macy 
9386eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
9387eda14cbcSMatt Macy 
9388eda14cbcSMatt Macy 		/* Destroy and unlink the AVZ itself */
9389eda14cbcSMatt Macy 		VERIFY0(zap_destroy(spa->spa_meta_objset,
9390eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, tx));
9391eda14cbcSMatt Macy 		VERIFY0(zap_remove(spa->spa_meta_objset,
9392eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
9393eda14cbcSMatt Macy 		spa->spa_all_vdev_zaps = 0;
9394eda14cbcSMatt Macy 	}
9395eda14cbcSMatt Macy 
9396eda14cbcSMatt Macy 	if (spa->spa_all_vdev_zaps == 0) {
9397eda14cbcSMatt Macy 		spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
9398eda14cbcSMatt Macy 		    DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
9399eda14cbcSMatt Macy 		    DMU_POOL_VDEV_ZAP_MAP, tx);
9400eda14cbcSMatt Macy 	}
9401eda14cbcSMatt Macy 	spa->spa_avz_action = AVZ_ACTION_NONE;
9402eda14cbcSMatt Macy 
9403eda14cbcSMatt Macy 	/* Create ZAPs for vdevs that don't have them. */
9404eda14cbcSMatt Macy 	vdev_construct_zaps(spa->spa_root_vdev, tx);
940533b8c039SMartin Matuska 
940633b8c039SMartin Matuska 	config = spa_config_generate(spa, spa->spa_root_vdev,
940733b8c039SMartin Matuska 	    dmu_tx_get_txg(tx), B_FALSE);
940833b8c039SMartin Matuska 
940933b8c039SMartin Matuska 	/*
9410eda14cbcSMatt Macy 	 * If we're upgrading the spa version then make sure that
941133b8c039SMartin Matuska 	 * the config object gets updated with the correct version.
9412eda14cbcSMatt Macy 	 */
941333b8c039SMartin Matuska 	if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
941433b8c039SMartin Matuska 		fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
9415eda14cbcSMatt Macy 		    spa->spa_uberblock.ub_version);
9416c98ecfceSAllan Jude 
9417eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
9418ee36e25aSMartin Matuska 
9419ee36e25aSMartin Matuska 	nvlist_free(spa->spa_config_syncing);
9420ee36e25aSMartin Matuska 	spa->spa_config_syncing = config;
9421ee36e25aSMartin Matuska 
9422ee36e25aSMartin Matuska 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
9423ee36e25aSMartin Matuska }
9424ee36e25aSMartin Matuska 
9425ee36e25aSMartin Matuska static void
942633b8c039SMartin Matuska spa_sync_version(void *arg, dmu_tx_t *tx)
9427ee36e25aSMartin Matuska {
942833b8c039SMartin Matuska 	uint64_t *versionp = arg;
942933b8c039SMartin Matuska 	uint64_t version = *versionp;
943033b8c039SMartin Matuska 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
9431ee36e25aSMartin Matuska 
9432ee36e25aSMartin Matuska 	/*
9433ee36e25aSMartin Matuska 	 * Setting the version is special cased when first creating the pool.
9434ee36e25aSMartin Matuska 	 */
9435c98ecfceSAllan Jude 	ASSERT(tx->tx_txg != TXG_INITIAL);
9436c98ecfceSAllan Jude 
9437c98ecfceSAllan Jude 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
9438c98ecfceSAllan Jude 	ASSERT(version >= spa_version(spa));
9439c98ecfceSAllan Jude 
9440c98ecfceSAllan Jude 	spa->spa_uberblock.ub_version = version;
9441c98ecfceSAllan Jude 	vdev_config_dirty(spa->spa_root_vdev);
9442c98ecfceSAllan Jude 	spa_history_log_internal(spa, "set", tx, "version=%lld",
9443c98ecfceSAllan Jude 	    (longlong_t)version);
9444c98ecfceSAllan Jude }
9445c98ecfceSAllan Jude 
9446c98ecfceSAllan Jude /*
9447c98ecfceSAllan Jude  * Set zpool properties.
9448c98ecfceSAllan Jude  */
9449eda14cbcSMatt Macy static void
9450eda14cbcSMatt Macy spa_sync_props(void *arg, dmu_tx_t *tx)
9451eda14cbcSMatt Macy {
9452eda14cbcSMatt Macy 	nvlist_t *nvp = arg;
9453eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
9454eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
9455eda14cbcSMatt Macy 	nvpair_t *elem = NULL;
9456eda14cbcSMatt Macy 
9457eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);
9458eda14cbcSMatt Macy 
9459eda14cbcSMatt Macy 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
9460eda14cbcSMatt Macy 		uint64_t intval;
9461c0a83fe0SMartin Matuska 		const char *strval, *fname;
9462c98ecfceSAllan Jude 		zpool_prop_t prop;
9463c98ecfceSAllan Jude 		const char *propname;
9464c0a83fe0SMartin Matuska 		const char *elemname = nvpair_name(elem);
9465c0a83fe0SMartin Matuska 		zprop_type_t proptype;
9466c0a83fe0SMartin Matuska 		spa_feature_t fid;
9467c98ecfceSAllan Jude 
9468eda14cbcSMatt Macy 		switch (prop = zpool_name_to_prop(elemname)) {
9469eda14cbcSMatt Macy 		case ZPOOL_PROP_VERSION:
9470eda14cbcSMatt Macy 			intval = fnvpair_value_uint64(elem);
9471eda14cbcSMatt Macy 			/*
9472eda14cbcSMatt Macy 			 * The version is synced separately before other
9473eda14cbcSMatt Macy 			 * properties and should be correct by now.
9474eda14cbcSMatt Macy 			 */
9475eda14cbcSMatt Macy 			ASSERT3U(spa_version(spa), >=, intval);
9476c98ecfceSAllan Jude 			break;
9477eda14cbcSMatt Macy 
9478eda14cbcSMatt Macy 		case ZPOOL_PROP_ALTROOT:
9479eda14cbcSMatt Macy 			/*
9480eda14cbcSMatt Macy 			 * 'altroot' is a non-persistent property. It should
9481eda14cbcSMatt Macy 			 * have been set temporarily at creation or import time.
9482eda14cbcSMatt Macy 			 */
9483eda14cbcSMatt Macy 			ASSERT(spa->spa_root != NULL);
9484eda14cbcSMatt Macy 			break;
9485eda14cbcSMatt Macy 
9486eda14cbcSMatt Macy 		case ZPOOL_PROP_READONLY:
9487eda14cbcSMatt Macy 		case ZPOOL_PROP_CACHEFILE:
9488eda14cbcSMatt Macy 			/*
9489c98ecfceSAllan Jude 			 * 'readonly' and 'cachefile' are also non-persistent
9490eda14cbcSMatt Macy 			 * properties.
9491eda14cbcSMatt Macy 			 */
9492eda14cbcSMatt Macy 			break;
9493eda14cbcSMatt Macy 		case ZPOOL_PROP_COMMENT:
9494eda14cbcSMatt Macy 			strval = fnvpair_value_string(elem);
9495eda14cbcSMatt Macy 			if (spa->spa_comment != NULL)
9496eda14cbcSMatt Macy 				spa_strfree(spa->spa_comment);
9497eda14cbcSMatt Macy 			spa->spa_comment = spa_strdup(strval);
9498eda14cbcSMatt Macy 			/*
9499eda14cbcSMatt Macy 			 * We need to dirty the configuration on all the vdevs
9500eda14cbcSMatt Macy 			 * so that their labels get updated.  We also need to
9501eda14cbcSMatt Macy 			 * update the cache file to keep it in sync with the
9502eda14cbcSMatt Macy 			 * MOS version. It's unnecessary to do this for pool
9503eda14cbcSMatt Macy 			 * creation since the vdev's configuration has already
9504eda14cbcSMatt Macy 			 * been dirtied.
9505eda14cbcSMatt Macy 			 */
9506eda14cbcSMatt Macy 			if (tx->tx_txg != TXG_INITIAL) {
9507eda14cbcSMatt Macy 				vdev_config_dirty(spa->spa_root_vdev);
9508eda14cbcSMatt Macy 				spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
9509eda14cbcSMatt Macy 			}
9510eda14cbcSMatt Macy 			spa_history_log_internal(spa, "set", tx,
9511eda14cbcSMatt Macy 			    "%s=%s", elemname, strval);
9512eda14cbcSMatt Macy 			break;
9513eda14cbcSMatt Macy 		case ZPOOL_PROP_COMPATIBILITY:
9514eda14cbcSMatt Macy 			strval = fnvpair_value_string(elem);
9515eda14cbcSMatt Macy 			if (spa->spa_compatibility != NULL)
9516eda14cbcSMatt Macy 				spa_strfree(spa->spa_compatibility);
9517eda14cbcSMatt Macy 			spa->spa_compatibility = spa_strdup(strval);
9518eda14cbcSMatt Macy 			/*
9519dbd5678dSMartin Matuska 			 * Dirty the configuration on vdevs as above.
9520dbd5678dSMartin Matuska 			 */
9521dbd5678dSMartin Matuska 			if (tx->tx_txg != TXG_INITIAL) {
9522eda14cbcSMatt Macy 				vdev_config_dirty(spa->spa_root_vdev);
9523eda14cbcSMatt Macy 				spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
9524eda14cbcSMatt Macy 			}
9525eda14cbcSMatt Macy 
9526eda14cbcSMatt Macy 			spa_history_log_internal(spa, "set", tx,
9527eda14cbcSMatt Macy 			    "%s=%s", nvpair_name(elem), strval);
9528eda14cbcSMatt Macy 			break;
9529eda14cbcSMatt Macy 
9530eda14cbcSMatt Macy 		case ZPOOL_PROP_INVAL:
9531eda14cbcSMatt Macy 			if (zpool_prop_feature(elemname)) {
9532eda14cbcSMatt Macy 				fname = strchr(elemname, '@') + 1;
9533eda14cbcSMatt Macy 				VERIFY0(zfeature_lookup_name(fname, &fid));
9534eda14cbcSMatt Macy 
9535eda14cbcSMatt Macy 				spa_feature_enable(spa, fid, tx);
9536eda14cbcSMatt Macy 				spa_history_log_internal(spa, "set", tx,
9537eda14cbcSMatt Macy 				    "%s=enabled", elemname);
9538eda14cbcSMatt Macy 				break;
9539eda14cbcSMatt Macy 			} else if (!zfs_prop_user(elemname)) {
9540eda14cbcSMatt Macy 				ASSERT(zpool_prop_feature(elemname));
9541eda14cbcSMatt Macy 				break;
9542eda14cbcSMatt Macy 			}
9543eda14cbcSMatt Macy 			zfs_fallthrough;
9544eda14cbcSMatt Macy 		default:
9545eda14cbcSMatt Macy 			/*
9546eda14cbcSMatt Macy 			 * Set pool property values in the poolprops mos object.
9547eda14cbcSMatt Macy 			 */
9548eda14cbcSMatt Macy 			if (spa->spa_pool_props_object == 0) {
9549eda14cbcSMatt Macy 				spa->spa_pool_props_object =
9550eda14cbcSMatt Macy 				    zap_create_link(mos, DMU_OT_POOL_PROPS,
9551eda14cbcSMatt Macy 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
9552eda14cbcSMatt Macy 				    tx);
9553eda14cbcSMatt Macy 			}
9554eda14cbcSMatt Macy 
9555eda14cbcSMatt Macy 			/* normalize the property name */
9556eda14cbcSMatt Macy 			if (prop == ZPOOL_PROP_INVAL) {
9557eda14cbcSMatt Macy 				propname = elemname;
9558eda14cbcSMatt Macy 				proptype = PROP_TYPE_STRING;
9559eda14cbcSMatt Macy 			} else {
9560eda14cbcSMatt Macy 				propname = zpool_prop_to_name(prop);
9561eda14cbcSMatt Macy 				proptype = zpool_prop_get_type(prop);
9562eda14cbcSMatt Macy 			}
9563eda14cbcSMatt Macy 
9564eda14cbcSMatt Macy 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
9565eda14cbcSMatt Macy 				ASSERT(proptype == PROP_TYPE_STRING);
9566eda14cbcSMatt Macy 				strval = fnvpair_value_string(elem);
9567eda14cbcSMatt Macy 				VERIFY0(zap_update(mos,
9568eda14cbcSMatt Macy 				    spa->spa_pool_props_object, propname,
9569eda14cbcSMatt Macy 				    1, strlen(strval) + 1, strval, tx));
9570eda14cbcSMatt Macy 				spa_history_log_internal(spa, "set", tx,
9571eda14cbcSMatt Macy 				    "%s=%s", elemname, strval);
9572eda14cbcSMatt Macy 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
9573eda14cbcSMatt Macy 				intval = fnvpair_value_uint64(elem);
9574eda14cbcSMatt Macy 
9575eda14cbcSMatt Macy 				if (proptype == PROP_TYPE_INDEX) {
9576eda14cbcSMatt Macy 					const char *unused;
9577eda14cbcSMatt Macy 					VERIFY0(zpool_prop_index_to_string(
9578eda14cbcSMatt Macy 					    prop, intval, &unused));
9579eda14cbcSMatt Macy 				}
9580eda14cbcSMatt Macy 				VERIFY0(zap_update(mos,
9581eda14cbcSMatt Macy 				    spa->spa_pool_props_object, propname,
9582eda14cbcSMatt Macy 				    8, 1, &intval, tx));
9583eda14cbcSMatt Macy 				spa_history_log_internal(spa, "set", tx,
9584eda14cbcSMatt Macy 				    "%s=%lld", elemname,
9585eda14cbcSMatt Macy 				    (longlong_t)intval);
9586eda14cbcSMatt Macy 
9587eda14cbcSMatt Macy 				switch (prop) {
9588eda14cbcSMatt Macy 				case ZPOOL_PROP_DELEGATION:
9589eda14cbcSMatt Macy 					spa->spa_delegation = intval;
9590eda14cbcSMatt Macy 					break;
9591eda14cbcSMatt Macy 				case ZPOOL_PROP_BOOTFS:
9592eda14cbcSMatt Macy 					spa->spa_bootfs = intval;
9593eda14cbcSMatt Macy 					break;
9594eda14cbcSMatt Macy 				case ZPOOL_PROP_FAILUREMODE:
9595eda14cbcSMatt Macy 					spa->spa_failmode = intval;
9596eda14cbcSMatt Macy 					break;
9597eda14cbcSMatt Macy 				case ZPOOL_PROP_AUTOTRIM:
9598eda14cbcSMatt Macy 					spa->spa_autotrim = intval;
9599eda14cbcSMatt Macy 					spa_async_request(spa,
9600eda14cbcSMatt Macy 					    SPA_ASYNC_AUTOTRIM_RESTART);
9601eda14cbcSMatt Macy 					break;
9602eda14cbcSMatt Macy 				case ZPOOL_PROP_AUTOEXPAND:
9603eda14cbcSMatt Macy 					spa->spa_autoexpand = intval;
9604eda14cbcSMatt Macy 					if (tx->tx_txg != TXG_INITIAL)
9605eda14cbcSMatt Macy 						spa_async_request(spa,
9606eda14cbcSMatt Macy 						    SPA_ASYNC_AUTOEXPAND);
9607eda14cbcSMatt Macy 					break;
9608eda14cbcSMatt Macy 				case ZPOOL_PROP_MULTIHOST:
9609eda14cbcSMatt Macy 					spa->spa_multihost = intval;
9610eda14cbcSMatt Macy 					break;
9611eda14cbcSMatt Macy 				default:
9612eda14cbcSMatt Macy 					break;
9613eda14cbcSMatt Macy 				}
9614eda14cbcSMatt Macy 			} else {
9615eda14cbcSMatt Macy 				ASSERT(0); /* not allowed */
9616eda14cbcSMatt Macy 			}
9617eda14cbcSMatt Macy 		}
9618eda14cbcSMatt Macy 
9619eda14cbcSMatt Macy 	}
9620eda14cbcSMatt Macy 
9621eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
9622eda14cbcSMatt Macy }
9623eda14cbcSMatt Macy 
9624eda14cbcSMatt Macy /*
9625eda14cbcSMatt Macy  * Perform one-time upgrade on-disk changes.  spa_version() does not
9626eda14cbcSMatt Macy  * reflect the new version this txg, so there must be no changes this
9627eda14cbcSMatt Macy  * txg to anything that the upgrade code depends on after it executes.
9628eda14cbcSMatt Macy  * Therefore this must be called after dsl_pool_sync() does the sync
9629eda14cbcSMatt Macy  * tasks.
9630eda14cbcSMatt Macy  */
9631eda14cbcSMatt Macy static void
9632eda14cbcSMatt Macy spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
9633eda14cbcSMatt Macy {
9634eda14cbcSMatt Macy 	if (spa_sync_pass(spa) != 1)
9635eda14cbcSMatt Macy 		return;
9636eda14cbcSMatt Macy 
9637eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
9638eda14cbcSMatt Macy 	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
9639eda14cbcSMatt Macy 
9640eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
9641eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
9642eda14cbcSMatt Macy 		dsl_pool_create_origin(dp, tx);
9643eda14cbcSMatt Macy 
9644eda14cbcSMatt Macy 		/* Keeping the origin open increases spa_minref */
9645eda14cbcSMatt Macy 		spa->spa_minref += 3;
9646eda14cbcSMatt Macy 	}
9647eda14cbcSMatt Macy 
9648eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
9649eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
9650eda14cbcSMatt Macy 		dsl_pool_upgrade_clones(dp, tx);
9651eda14cbcSMatt Macy 	}
9652eda14cbcSMatt Macy 
9653eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
9654eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
9655eda14cbcSMatt Macy 		dsl_pool_upgrade_dir_clones(dp, tx);
9656eda14cbcSMatt Macy 
9657eda14cbcSMatt Macy 		/* Keeping the freedir open increases spa_minref */
9658eda14cbcSMatt Macy 		spa->spa_minref += 3;
9659eda14cbcSMatt Macy 	}
9660eda14cbcSMatt Macy 
9661eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
9662eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
9663eda14cbcSMatt Macy 		spa_feature_create_zap_objects(spa, tx);
9664eda14cbcSMatt Macy 	}
9665eda14cbcSMatt Macy 
9666eda14cbcSMatt Macy 	/*
9667eda14cbcSMatt Macy 	 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
9668eda14cbcSMatt Macy 	 * when possibility to use lz4 compression for metadata was added
9669eda14cbcSMatt Macy 	 * Old pools that have this feature enabled must be upgraded to have
9670eda14cbcSMatt Macy 	 * this feature active
9671eda14cbcSMatt Macy 	 */
9672eda14cbcSMatt Macy 	if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
9673eda14cbcSMatt Macy 		boolean_t lz4_en = spa_feature_is_enabled(spa,
9674eda14cbcSMatt Macy 		    SPA_FEATURE_LZ4_COMPRESS);
9675eda14cbcSMatt Macy 		boolean_t lz4_ac = spa_feature_is_active(spa,
9676eda14cbcSMatt Macy 		    SPA_FEATURE_LZ4_COMPRESS);
9677eda14cbcSMatt Macy 
9678eda14cbcSMatt Macy 		if (lz4_en && !lz4_ac)
9679eda14cbcSMatt Macy 			spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
9680eda14cbcSMatt Macy 	}
9681eda14cbcSMatt Macy 
9682eda14cbcSMatt Macy 	/*
9683eda14cbcSMatt Macy 	 * If we haven't written the salt, do so now.  Note that the
9684eda14cbcSMatt Macy 	 * feature may not be activated yet, but that's fine since
96857877fdebSMatt Macy 	 * the presence of this ZAP entry is backwards compatible.
96867877fdebSMatt Macy 	 */
96877877fdebSMatt Macy 	if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
96887877fdebSMatt Macy 	    DMU_POOL_CHECKSUM_SALT) == ENOENT) {
96897877fdebSMatt Macy 		VERIFY0(zap_add(spa->spa_meta_objset,
96907877fdebSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
96917877fdebSMatt Macy 		    sizeof (spa->spa_cksum_salt.zcs_bytes),
96927877fdebSMatt Macy 		    spa->spa_cksum_salt.zcs_bytes, tx));
96937877fdebSMatt Macy 	}
96947877fdebSMatt Macy 
96957877fdebSMatt Macy 	rrw_exit(&dp->dp_config_rwlock, FTAG);
96967877fdebSMatt Macy }
9697eda14cbcSMatt Macy 
9698eda14cbcSMatt Macy static void
9699eda14cbcSMatt Macy vdev_indirect_state_sync_verify(vdev_t *vd)
9700eda14cbcSMatt Macy {
9701eda14cbcSMatt Macy 	vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
9702eda14cbcSMatt Macy 	vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
9703eda14cbcSMatt Macy 
9704eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_indirect_ops) {
9705eda14cbcSMatt Macy 		ASSERT(vim != NULL);
9706eda14cbcSMatt Macy 		ASSERT(vib != NULL);
9707eda14cbcSMatt Macy 	}
9708eda14cbcSMatt Macy 
9709eda14cbcSMatt Macy 	uint64_t obsolete_sm_object = 0;
9710eda14cbcSMatt Macy 	ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
9711eda14cbcSMatt Macy 	if (obsolete_sm_object != 0) {
9712eda14cbcSMatt Macy 		ASSERT(vd->vdev_obsolete_sm != NULL);
9713eda14cbcSMatt Macy 		ASSERT(vd->vdev_removing ||
9714eda14cbcSMatt Macy 		    vd->vdev_ops == &vdev_indirect_ops);
9715eda14cbcSMatt Macy 		ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
9716eda14cbcSMatt Macy 		ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
9717eda14cbcSMatt Macy 		ASSERT3U(obsolete_sm_object, ==,
9718eda14cbcSMatt Macy 		    space_map_object(vd->vdev_obsolete_sm));
9719eda14cbcSMatt Macy 		ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
9720eda14cbcSMatt Macy 		    space_map_allocated(vd->vdev_obsolete_sm));
9721eda14cbcSMatt Macy 	}
9722eda14cbcSMatt Macy 	ASSERT(vd->vdev_obsolete_segments != NULL);
9723eda14cbcSMatt Macy 
9724eda14cbcSMatt Macy 	/*
9725eda14cbcSMatt Macy 	 * Since frees / remaps to an indirect vdev can only
9726eda14cbcSMatt Macy 	 * happen in syncing context, the obsolete segments
9727eda14cbcSMatt Macy 	 * tree must be empty when we start syncing.
9728eda14cbcSMatt Macy 	 */
9729eda14cbcSMatt Macy 	ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
9730eda14cbcSMatt Macy }
9731eda14cbcSMatt Macy 
9732eda14cbcSMatt Macy /*
9733eda14cbcSMatt Macy  * Set the top-level vdev's max queue depth. Evaluate each top-level's
9734eda14cbcSMatt Macy  * async write queue depth in case it changed. The max queue depth will
9735eda14cbcSMatt Macy  * not change in the middle of syncing out this txg.
9736eda14cbcSMatt Macy  */
9737eda14cbcSMatt Macy static void
9738eda14cbcSMatt Macy spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
9739eda14cbcSMatt Macy {
9740eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
9741eda14cbcSMatt Macy 
9742eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
9743eda14cbcSMatt Macy 	uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
9744eda14cbcSMatt Macy 	    zfs_vdev_queue_depth_pct / 100;
9745eda14cbcSMatt Macy 	metaslab_class_t *normal = spa_normal_class(spa);
9746eda14cbcSMatt Macy 	metaslab_class_t *special = spa_special_class(spa);
9747eda14cbcSMatt Macy 	metaslab_class_t *dedup = spa_dedup_class(spa);
9748eda14cbcSMatt Macy 
9749eda14cbcSMatt Macy 	uint64_t slots_per_allocator = 0;
9750eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
9751eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
9752eda14cbcSMatt Macy 
9753eda14cbcSMatt Macy 		metaslab_group_t *mg = tvd->vdev_mg;
9754eda14cbcSMatt Macy 		if (mg == NULL || !metaslab_group_initialized(mg))
9755eda14cbcSMatt Macy 			continue;
9756eda14cbcSMatt Macy 
9757eda14cbcSMatt Macy 		metaslab_class_t *mc = mg->mg_class;
97582a58b312SMartin Matuska 		if (mc != normal && mc != special && mc != dedup)
9759eda14cbcSMatt Macy 			continue;
9760eda14cbcSMatt Macy 
9761c0a83fe0SMartin Matuska 		/*
9762eda14cbcSMatt Macy 		 * It is safe to do a lock-free check here because only async
9763eda14cbcSMatt Macy 		 * allocations look at mg_max_alloc_queue_depth, and async
9764eda14cbcSMatt Macy 		 * allocations all happen from spa_sync().
9765eda14cbcSMatt Macy 		 */
9766eda14cbcSMatt Macy 		for (int i = 0; i < mg->mg_allocators; i++) {
9767eda14cbcSMatt Macy 			ASSERT0(zfs_refcount_count(
9768eda14cbcSMatt Macy 			    &(mg->mg_allocator[i].mga_alloc_queue_depth)));
9769eda14cbcSMatt Macy 		}
9770eda14cbcSMatt Macy 		mg->mg_max_alloc_queue_depth = max_queue_depth;
9771eda14cbcSMatt Macy 
9772e716630dSMartin Matuska 		for (int i = 0; i < mg->mg_allocators; i++) {
9773e716630dSMartin Matuska 			mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
9774e716630dSMartin Matuska 			    zfs_vdev_def_queue_depth;
9775e716630dSMartin Matuska 		}
9776e716630dSMartin Matuska 		slots_per_allocator += zfs_vdev_def_queue_depth;
9777e716630dSMartin Matuska 	}
9778e716630dSMartin Matuska 
9779e716630dSMartin Matuska 	for (int i = 0; i < spa->spa_alloc_count; i++) {
9780e716630dSMartin Matuska 		ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
9781e716630dSMartin Matuska 		    mca_alloc_slots));
9782e716630dSMartin Matuska 		ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
9783e716630dSMartin Matuska 		    mca_alloc_slots));
9784e716630dSMartin Matuska 		ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
9785e716630dSMartin Matuska 		    mca_alloc_slots));
9786e716630dSMartin Matuska 		normal->mc_allocator[i].mca_alloc_max_slots =
9787e716630dSMartin Matuska 		    slots_per_allocator;
9788e716630dSMartin Matuska 		special->mc_allocator[i].mca_alloc_max_slots =
9789e716630dSMartin Matuska 		    slots_per_allocator;
9790e716630dSMartin Matuska 		dedup->mc_allocator[i].mca_alloc_max_slots =
9791e716630dSMartin Matuska 		    slots_per_allocator;
9792e716630dSMartin Matuska 	}
9793eda14cbcSMatt Macy 	normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
9794eda14cbcSMatt Macy 	special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
9795eda14cbcSMatt Macy 	dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
9796eda14cbcSMatt Macy }
9797eda14cbcSMatt Macy 
9798eda14cbcSMatt Macy static void
9799eda14cbcSMatt Macy spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
9800eda14cbcSMatt Macy {
9801eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
9802eda14cbcSMatt Macy 
9803eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
9804eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
9805eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[c];
9806eda14cbcSMatt Macy 		vdev_indirect_state_sync_verify(vd);
9807eda14cbcSMatt Macy 
9808eda14cbcSMatt Macy 		if (vdev_indirect_should_condense(vd)) {
9809eda14cbcSMatt Macy 			spa_condense_indirect_start_sync(vd, tx);
9810eda14cbcSMatt Macy 			break;
9811eda14cbcSMatt Macy 		}
9812eda14cbcSMatt Macy 	}
9813eda14cbcSMatt Macy }
9814eda14cbcSMatt Macy 
9815eda14cbcSMatt Macy static void
9816eda14cbcSMatt Macy spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
9817eda14cbcSMatt Macy {
9818eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
9819eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
9820eda14cbcSMatt Macy 	uint64_t txg = tx->tx_txg;
9821eda14cbcSMatt Macy 	bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
9822eda14cbcSMatt Macy 
9823eda14cbcSMatt Macy 	do {
9824eda14cbcSMatt Macy 		int pass = ++spa->spa_sync_pass;
9825eda14cbcSMatt Macy 
9826eda14cbcSMatt Macy 		spa_sync_config_object(spa, tx);
9827eda14cbcSMatt Macy 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
9828eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
9829eda14cbcSMatt Macy 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
9830eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
9831eda14cbcSMatt Macy 		spa_errlog_sync(spa, txg);
9832eda14cbcSMatt Macy 		dsl_pool_sync(dp, txg);
9833eda14cbcSMatt Macy 
9834eda14cbcSMatt Macy 		if (pass < zfs_sync_pass_deferred_free ||
9835eda14cbcSMatt Macy 		    spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
9836eda14cbcSMatt Macy 			/*
9837eda14cbcSMatt Macy 			 * If the log space map feature is active we don't
9838eda14cbcSMatt Macy 			 * care about deferred frees and the deferred bpobj
9839eda14cbcSMatt Macy 			 * as the log space map should effectively have the
9840eda14cbcSMatt Macy 			 * same results (i.e. appending only to one object).
9841eda14cbcSMatt Macy 			 */
9842eda14cbcSMatt Macy 			spa_sync_frees(spa, free_bpl, tx);
9843eda14cbcSMatt Macy 		} else {
9844eda14cbcSMatt Macy 			/*
9845eda14cbcSMatt Macy 			 * We can not defer frees in pass 1, because
9846eda14cbcSMatt Macy 			 * we sync the deferred frees later in pass 1.
9847eda14cbcSMatt Macy 			 */
9848eda14cbcSMatt Macy 			ASSERT3U(pass, >, 1);
984933b8c039SMartin Matuska 			bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
9850eda14cbcSMatt Macy 			    &spa->spa_deferred_bpobj, tx);
9851eda14cbcSMatt Macy 		}
9852eda14cbcSMatt Macy 
9853eda14cbcSMatt Macy 		brt_sync(spa, txg);
9854eda14cbcSMatt Macy 		ddt_sync(spa, txg);
9855eda14cbcSMatt Macy 		dsl_scan_sync(dp, tx);
9856eda14cbcSMatt Macy 		dsl_errorscrub_sync(dp, tx);
9857eda14cbcSMatt Macy 		svr_sync(spa, tx);
9858eda14cbcSMatt Macy 		spa_sync_upgrades(spa, tx);
9859eda14cbcSMatt Macy 
9860eda14cbcSMatt Macy 		spa_flush_metaslabs(spa, tx);
9861eda14cbcSMatt Macy 
9862eda14cbcSMatt Macy 		vdev_t *vd = NULL;
9863eda14cbcSMatt Macy 		while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
9864eda14cbcSMatt Macy 		    != NULL)
9865eda14cbcSMatt Macy 			vdev_sync(vd, txg);
9866eda14cbcSMatt Macy 
9867eda14cbcSMatt Macy 		if (pass == 1) {
9868eda14cbcSMatt Macy 			/*
9869eda14cbcSMatt Macy 			 * dsl_pool_sync() -> dp_sync_tasks may have dirtied
9870eda14cbcSMatt Macy 			 * the config. If that happens, this txg should not
9871eda14cbcSMatt Macy 			 * be a no-op. So we must sync the config to the MOS
9872eda14cbcSMatt Macy 			 * before checking for no-op.
9873eda14cbcSMatt Macy 			 *
9874eda14cbcSMatt Macy 			 * Note that when the config is dirty, it will
9875eda14cbcSMatt Macy 			 * be written to the MOS (i.e. the MOS will be
9876eda14cbcSMatt Macy 			 * dirtied) every time we call spa_sync_config_object()
9877eda14cbcSMatt Macy 			 * in this txg.  Therefore we can't call this after
9878eda14cbcSMatt Macy 			 * dsl_pool_sync() every pass, because it would
9879eda14cbcSMatt Macy 			 * prevent us from converging, since we'd dirty
9880eda14cbcSMatt Macy 			 * the MOS every pass.
9881eda14cbcSMatt Macy 			 *
9882eda14cbcSMatt Macy 			 * Sync tasks can only be processed in pass 1, so
9883eda14cbcSMatt Macy 			 * there's no need to do this in later passes.
9884eda14cbcSMatt Macy 			 */
9885eda14cbcSMatt Macy 			spa_sync_config_object(spa, tx);
9886eda14cbcSMatt Macy 		}
9887eda14cbcSMatt Macy 
9888eda14cbcSMatt Macy 		/*
9889eda14cbcSMatt Macy 		 * Note: We need to check if the MOS is dirty because we could
9890eda14cbcSMatt Macy 		 * have marked the MOS dirty without updating the uberblock
9891eda14cbcSMatt Macy 		 * (e.g. if we have sync tasks but no dirty user data). We need
9892eda14cbcSMatt Macy 		 * to check the uberblock's rootbp because it is updated if we
9893eda14cbcSMatt Macy 		 * have synced out dirty data (though in this case the MOS will
9894eda14cbcSMatt Macy 		 * most likely also be dirty due to second order effects, we
9895eda14cbcSMatt Macy 		 * don't want to rely on that here).
9896eda14cbcSMatt Macy 		 */
9897eda14cbcSMatt Macy 		if (pass == 1 &&
9898eda14cbcSMatt Macy 		    BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp) < txg &&
9899eda14cbcSMatt Macy 		    !dmu_objset_is_dirty(mos, txg)) {
9900eda14cbcSMatt Macy 			/*
9901eda14cbcSMatt Macy 			 * Nothing changed on the first pass, therefore this
9902eda14cbcSMatt Macy 			 * TXG is a no-op. Avoid syncing deferred frees, so
9903eda14cbcSMatt Macy 			 * that we can keep this TXG as a no-op.
9904eda14cbcSMatt Macy 			 */
9905eda14cbcSMatt Macy 			ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
99062a58b312SMartin Matuska 			ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
99072a58b312SMartin Matuska 			ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
99082a58b312SMartin Matuska 			ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
99092a58b312SMartin Matuska 			break;
99102a58b312SMartin Matuska 		}
99112a58b312SMartin Matuska 
99122a58b312SMartin Matuska 		spa_sync_deferred_frees(spa, tx);
9913eda14cbcSMatt Macy 	} while (dmu_objset_is_dirty(mos, txg));
9914eda14cbcSMatt Macy }
9915eda14cbcSMatt Macy 
9916eda14cbcSMatt Macy /*
9917eda14cbcSMatt Macy  * Rewrite the vdev configuration (which includes the uberblock) to
9918eda14cbcSMatt Macy  * commit the transaction group.
9919eda14cbcSMatt Macy  *
9920eda14cbcSMatt Macy  * If there are no dirty vdevs, we sync the uberblock to a few random
99213f9d360cSMartin Matuska  * top-level vdevs that are known to be visible in the config cache
99223f9d360cSMartin Matuska  * (see spa_vdev_add() for a complete description). If there *are* dirty
99233f9d360cSMartin Matuska  * vdevs, sync the uberblock to all vdevs.
9924eda14cbcSMatt Macy  */
9925eda14cbcSMatt Macy static void
9926eda14cbcSMatt Macy spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
9927eda14cbcSMatt Macy {
9928eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
9929eda14cbcSMatt Macy 	uint64_t txg = tx->tx_txg;
9930eda14cbcSMatt Macy 
99312a58b312SMartin Matuska 	for (;;) {
99322a58b312SMartin Matuska 		int error = 0;
99332a58b312SMartin Matuska 
99342a58b312SMartin Matuska 		/*
99352a58b312SMartin Matuska 		 * We hold SCL_STATE to prevent vdev open/close/etc.
99362a58b312SMartin Matuska 		 * while we're attempting to write the vdev labels.
99372a58b312SMartin Matuska 		 */
9938eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
9939eda14cbcSMatt Macy 
9940eda14cbcSMatt Macy 		if (list_is_empty(&spa->spa_config_dirty_list)) {
9941eda14cbcSMatt Macy 			vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
9942eda14cbcSMatt Macy 			int svdcount = 0;
9943eda14cbcSMatt Macy 			int children = rvd->vdev_children;
9944eda14cbcSMatt Macy 			int c0 = random_in_range(children);
9945eda14cbcSMatt Macy 
9946eda14cbcSMatt Macy 			for (int c = 0; c < children; c++) {
9947eda14cbcSMatt Macy 				vdev_t *vd =
9948eda14cbcSMatt Macy 				    rvd->vdev_child[(c0 + c) % children];
9949eda14cbcSMatt Macy 
9950eda14cbcSMatt Macy 				/* Stop when revisiting the first vdev */
9951eda14cbcSMatt Macy 				if (c > 0 && svd[0] == vd)
9952eda14cbcSMatt Macy 					break;
9953eda14cbcSMatt Macy 
9954eda14cbcSMatt Macy 				if (vd->vdev_ms_array == 0 ||
9955eda14cbcSMatt Macy 				    vd->vdev_islog ||
9956eda14cbcSMatt Macy 				    !vdev_is_concrete(vd))
9957eda14cbcSMatt Macy 					continue;
9958eda14cbcSMatt Macy 
9959eda14cbcSMatt Macy 				svd[svdcount++] = vd;
9960eda14cbcSMatt Macy 				if (svdcount == SPA_SYNC_MIN_VDEVS)
9961eda14cbcSMatt Macy 					break;
9962eda14cbcSMatt Macy 			}
9963eda14cbcSMatt Macy 			error = vdev_config_sync(svd, svdcount, txg);
9964eda14cbcSMatt Macy 		} else {
9965eda14cbcSMatt Macy 			error = vdev_config_sync(rvd->vdev_child,
9966eda14cbcSMatt Macy 			    rvd->vdev_children, txg);
9967eda14cbcSMatt Macy 		}
9968eda14cbcSMatt Macy 
9969eda14cbcSMatt Macy 		if (error == 0)
9970eda14cbcSMatt Macy 			spa->spa_last_synced_guid = rvd->vdev_guid;
9971eda14cbcSMatt Macy 
9972eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_STATE, FTAG);
9973eda14cbcSMatt Macy 
9974eda14cbcSMatt Macy 		if (error == 0)
9975eda14cbcSMatt Macy 			break;
9976eda14cbcSMatt Macy 		zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
9977eda14cbcSMatt Macy 		zio_resume_wait(spa);
9978eda14cbcSMatt Macy 	}
9979eda14cbcSMatt Macy }
9980eda14cbcSMatt Macy 
9981eda14cbcSMatt Macy /*
9982eda14cbcSMatt Macy  * Sync the specified transaction group.  New blocks may be dirtied as
9983eda14cbcSMatt Macy  * part of the process, so we iterate until it converges.
9984eda14cbcSMatt Macy  */
9985eda14cbcSMatt Macy void
9986eda14cbcSMatt Macy spa_sync(spa_t *spa, uint64_t txg)
9987eda14cbcSMatt Macy {
9988eda14cbcSMatt Macy 	vdev_t *vd = NULL;
9989eda14cbcSMatt Macy 
9990eda14cbcSMatt Macy 	VERIFY(spa_writeable(spa));
9991eda14cbcSMatt Macy 
9992eda14cbcSMatt Macy 	/*
9993eda14cbcSMatt Macy 	 * Wait for i/os issued in open context that need to complete
9994eda14cbcSMatt Macy 	 * before this txg syncs.
9995eda14cbcSMatt Macy 	 */
9996eda14cbcSMatt Macy 	(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
9997eda14cbcSMatt Macy 	spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
9998eda14cbcSMatt Macy 	    ZIO_FLAG_CANFAIL);
9999eda14cbcSMatt Macy 
10000eda14cbcSMatt Macy 	/*
10001eda14cbcSMatt Macy 	 * Now that there can be no more cloning in this transaction group,
10002eda14cbcSMatt Macy 	 * but we are still before issuing frees, we can process pending BRT
10003eda14cbcSMatt Macy 	 * updates.
10004eda14cbcSMatt Macy 	 */
10005eda14cbcSMatt Macy 	brt_pending_apply(spa, txg);
10006eda14cbcSMatt Macy 
10007eda14cbcSMatt Macy 	/*
10008eda14cbcSMatt Macy 	 * Lock out configuration changes.
10009eda14cbcSMatt Macy 	 */
10010eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
10011eda14cbcSMatt Macy 
10012eda14cbcSMatt Macy 	spa->spa_syncing_txg = txg;
10013eda14cbcSMatt Macy 	spa->spa_sync_pass = 0;
10014eda14cbcSMatt Macy 
10015eda14cbcSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
10016eda14cbcSMatt Macy 		mutex_enter(&spa->spa_allocs[i].spaa_lock);
10017eda14cbcSMatt Macy 		VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
10018eda14cbcSMatt Macy 		mutex_exit(&spa->spa_allocs[i].spaa_lock);
10019eda14cbcSMatt Macy 	}
10020eda14cbcSMatt Macy 
10021eda14cbcSMatt Macy 	/*
10022eda14cbcSMatt Macy 	 * If there are any pending vdev state changes, convert them
10023eda14cbcSMatt Macy 	 * into config changes that go out with this transaction group.
10024eda14cbcSMatt Macy 	 */
10025eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
10026eda14cbcSMatt Macy 	while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
10027eda14cbcSMatt Macy 		/* Avoid holding the write lock unless actually necessary */
10028eda14cbcSMatt Macy 		if (vd->vdev_aux == NULL) {
10029eda14cbcSMatt Macy 			vdev_state_clean(vd);
10030eda14cbcSMatt Macy 			vdev_config_dirty(vd);
10031eda14cbcSMatt Macy 			continue;
10032eda14cbcSMatt Macy 		}
10033eda14cbcSMatt Macy 		/*
10034eda14cbcSMatt Macy 		 * We need the write lock here because, for aux vdevs,
10035eda14cbcSMatt Macy 		 * calling vdev_config_dirty() modifies sav_config.
10036eda14cbcSMatt Macy 		 * This is ugly and will become unnecessary when we
10037eda14cbcSMatt Macy 		 * eliminate the aux vdev wart by integrating all vdevs
10038eda14cbcSMatt Macy 		 * into the root vdev tree.
100393f9d360cSMartin Matuska 		 */
100403f9d360cSMartin Matuska 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
100413f9d360cSMartin Matuska 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
10042eda14cbcSMatt Macy 		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
10043eda14cbcSMatt Macy 			vdev_state_clean(vd);
10044eda14cbcSMatt Macy 			vdev_config_dirty(vd);
10045eda14cbcSMatt Macy 		}
10046eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10047eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
10048eda14cbcSMatt Macy 	}
10049eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
10050eda14cbcSMatt Macy 
10051eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
10052eda14cbcSMatt Macy 	dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
10053eda14cbcSMatt Macy 
10054eda14cbcSMatt Macy 	spa->spa_sync_starttime = gethrtime();
10055eda14cbcSMatt Macy 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
10056eda14cbcSMatt Macy 	spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
10057eda14cbcSMatt Macy 	    spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
100582a58b312SMartin Matuska 	    NSEC_TO_TICK(spa->spa_deadman_synctime));
100592a58b312SMartin Matuska 
100602a58b312SMartin Matuska 	/*
10061eda14cbcSMatt Macy 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
10062eda14cbcSMatt Macy 	 * set spa_deflate if we have no raid-z vdevs.
10063eda14cbcSMatt Macy 	 */
10064eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
10065eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
10066eda14cbcSMatt Macy 		vdev_t *rvd = spa->spa_root_vdev;
10067eda14cbcSMatt Macy 
10068eda14cbcSMatt Macy 		int i;
10069eda14cbcSMatt Macy 		for (i = 0; i < rvd->vdev_children; i++) {
10070eda14cbcSMatt Macy 			vd = rvd->vdev_child[i];
10071eda14cbcSMatt Macy 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
10072eda14cbcSMatt Macy 				break;
10073eda14cbcSMatt Macy 		}
10074eda14cbcSMatt Macy 		if (i == rvd->vdev_children) {
10075eda14cbcSMatt Macy 			spa->spa_deflate = TRUE;
10076eda14cbcSMatt Macy 			VERIFY0(zap_add(spa->spa_meta_objset,
10077eda14cbcSMatt Macy 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
10078eda14cbcSMatt Macy 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
10079eda14cbcSMatt Macy 		}
10080eda14cbcSMatt Macy 	}
10081eda14cbcSMatt Macy 
10082eda14cbcSMatt Macy 	spa_sync_adjust_vdev_max_queue_depth(spa);
10083eda14cbcSMatt Macy 
10084eda14cbcSMatt Macy 	spa_sync_condense_indirect(spa, tx);
10085eda14cbcSMatt Macy 
10086eda14cbcSMatt Macy 	spa_sync_iterate_to_convergence(spa, tx);
10087eda14cbcSMatt Macy 
10088eda14cbcSMatt Macy #ifdef ZFS_DEBUG
10089eda14cbcSMatt Macy 	if (!list_is_empty(&spa->spa_config_dirty_list)) {
10090eda14cbcSMatt Macy 	/*
10091eda14cbcSMatt Macy 	 * Make sure that the number of ZAPs for all the vdevs matches
10092eda14cbcSMatt Macy 	 * the number of ZAPs in the per-vdev ZAP list. This only gets
10093eda14cbcSMatt Macy 	 * called if the config is dirty; otherwise there may be
10094eda14cbcSMatt Macy 	 * outstanding AVZ operations that weren't completed in
10095eda14cbcSMatt Macy 	 * spa_sync_config_object.
10096eda14cbcSMatt Macy 	 */
10097eda14cbcSMatt Macy 		uint64_t all_vdev_zap_entry_count;
10098eda14cbcSMatt Macy 		ASSERT0(zap_count(spa->spa_meta_objset,
10099eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
10100eda14cbcSMatt Macy 		ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
10101eda14cbcSMatt Macy 		    all_vdev_zap_entry_count);
10102eda14cbcSMatt Macy 	}
10103eda14cbcSMatt Macy #endif
10104eda14cbcSMatt Macy 
10105eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL) {
10106eda14cbcSMatt Macy 		ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
10107eda14cbcSMatt Macy 	}
10108eda14cbcSMatt Macy 
10109eda14cbcSMatt Macy 	spa_sync_rewrite_vdev_config(spa, tx);
10110eda14cbcSMatt Macy 	dmu_tx_commit(tx);
10111eda14cbcSMatt Macy 
10112eda14cbcSMatt Macy 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
10113eda14cbcSMatt Macy 	spa->spa_deadman_tqid = 0;
1011414c2e0a0SMartin Matuska 
1011514c2e0a0SMartin Matuska 	/*
1011614c2e0a0SMartin Matuska 	 * Clear the dirty config list.
1011714c2e0a0SMartin Matuska 	 */
1011814c2e0a0SMartin Matuska 	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
1011914c2e0a0SMartin Matuska 		vdev_config_clean(vd);
1012014c2e0a0SMartin Matuska 
1012114c2e0a0SMartin Matuska 	/*
1012214c2e0a0SMartin Matuska 	 * Now that the new config has synced transactionally,
1012314c2e0a0SMartin Matuska 	 * let it become visible to the config cache.
1012414c2e0a0SMartin Matuska 	 */
1012514c2e0a0SMartin Matuska 	if (spa->spa_config_syncing != NULL) {
1012614c2e0a0SMartin Matuska 		spa_config_set(spa, spa->spa_config_syncing);
1012714c2e0a0SMartin Matuska 		spa->spa_config_txg = txg;
1012814c2e0a0SMartin Matuska 		spa->spa_config_syncing = NULL;
1012914c2e0a0SMartin Matuska 	}
1013014c2e0a0SMartin Matuska 
1013114c2e0a0SMartin Matuska 	dsl_pool_sync_done(dp, txg);
1013214c2e0a0SMartin Matuska 
1013314c2e0a0SMartin Matuska 	for (int i = 0; i < spa->spa_alloc_count; i++) {
1013414c2e0a0SMartin Matuska 		mutex_enter(&spa->spa_allocs[i].spaa_lock);
1013514c2e0a0SMartin Matuska 		VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
1013614c2e0a0SMartin Matuska 		mutex_exit(&spa->spa_allocs[i].spaa_lock);
1013714c2e0a0SMartin Matuska 	}
1013814c2e0a0SMartin Matuska 
1013914c2e0a0SMartin Matuska 	/*
1014014c2e0a0SMartin Matuska 	 * Update usable space statistics.
1014114c2e0a0SMartin Matuska 	 */
1014214c2e0a0SMartin Matuska 	while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
1014314c2e0a0SMartin Matuska 	    != NULL)
1014414c2e0a0SMartin Matuska 		vdev_sync_done(vd, txg);
1014514c2e0a0SMartin Matuska 
1014614c2e0a0SMartin Matuska 	metaslab_class_evict_old(spa->spa_normal_class, txg);
1014714c2e0a0SMartin Matuska 	metaslab_class_evict_old(spa->spa_log_class, txg);
1014814c2e0a0SMartin Matuska 
1014914c2e0a0SMartin Matuska 	spa_sync_close_syncing_log_sm(spa);
1015014c2e0a0SMartin Matuska 
1015114c2e0a0SMartin Matuska 	spa_update_dspace(spa);
1015214c2e0a0SMartin Matuska 
1015314c2e0a0SMartin Matuska 	if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON)
1015414c2e0a0SMartin Matuska 		vdev_autotrim_kick(spa);
1015514c2e0a0SMartin Matuska 
1015614c2e0a0SMartin Matuska 	/*
1015714c2e0a0SMartin Matuska 	 * It had better be the case that we didn't dirty anything
1015814c2e0a0SMartin Matuska 	 * since vdev_config_sync().
1015914c2e0a0SMartin Matuska 	 */
1016014c2e0a0SMartin Matuska 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
1016114c2e0a0SMartin Matuska 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
1016214c2e0a0SMartin Matuska 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
1016314c2e0a0SMartin Matuska 
1016414c2e0a0SMartin Matuska 	while (zfs_pause_spa_sync)
1016514c2e0a0SMartin Matuska 		delay(1);
1016614c2e0a0SMartin Matuska 
1016714c2e0a0SMartin Matuska 	spa->spa_sync_pass = 0;
1016814c2e0a0SMartin Matuska 
1016914c2e0a0SMartin Matuska 	/*
1017014c2e0a0SMartin Matuska 	 * Update the last synced uberblock here. We want to do this at
1017114c2e0a0SMartin Matuska 	 * the end of spa_sync() so that consumers of spa_last_synced_txg()
1017214c2e0a0SMartin Matuska 	 * will be guaranteed that all the processing associated with
1017314c2e0a0SMartin Matuska 	 * that txg has been completed.
1017414c2e0a0SMartin Matuska 	 */
1017514c2e0a0SMartin Matuska 	spa->spa_ubsync = spa->spa_uberblock;
1017614c2e0a0SMartin Matuska 	spa_config_exit(spa, SCL_CONFIG, FTAG);
1017714c2e0a0SMartin Matuska 
1017814c2e0a0SMartin Matuska 	spa_handle_ignored_writes(spa);
1017914c2e0a0SMartin Matuska 
1018014c2e0a0SMartin Matuska 	/*
1018114c2e0a0SMartin Matuska 	 * If any async tasks have been requested, kick them off.
1018214c2e0a0SMartin Matuska 	 */
1018314c2e0a0SMartin Matuska 	spa_async_dispatch(spa);
1018414c2e0a0SMartin Matuska }
1018514c2e0a0SMartin Matuska 
1018614c2e0a0SMartin Matuska /*
1018714c2e0a0SMartin Matuska  * Sync all pools.  We don't want to hold the namespace lock across these
1018814c2e0a0SMartin Matuska  * operations, so we take a reference on the spa_t and drop the lock during the
1018914c2e0a0SMartin Matuska  * sync.
1019014c2e0a0SMartin Matuska  */
1019114c2e0a0SMartin Matuska void
1019214c2e0a0SMartin Matuska spa_sync_allpools(void)
1019314c2e0a0SMartin Matuska {
1019414c2e0a0SMartin Matuska 	spa_t *spa = NULL;
1019514c2e0a0SMartin Matuska 	mutex_enter(&spa_namespace_lock);
1019614c2e0a0SMartin Matuska 	while ((spa = spa_next(spa)) != NULL) {
1019714c2e0a0SMartin Matuska 		if (spa_state(spa) != POOL_STATE_ACTIVE ||
1019814c2e0a0SMartin Matuska 		    !spa_writeable(spa) || spa_suspended(spa))
1019914c2e0a0SMartin Matuska 			continue;
1020014c2e0a0SMartin Matuska 		spa_open_ref(spa, FTAG);
1020114c2e0a0SMartin Matuska 		mutex_exit(&spa_namespace_lock);
1020214c2e0a0SMartin Matuska 		txg_wait_synced(spa_get_dsl(spa), 0);
1020314c2e0a0SMartin Matuska 		mutex_enter(&spa_namespace_lock);
1020414c2e0a0SMartin Matuska 		spa_close(spa, FTAG);
1020514c2e0a0SMartin Matuska 	}
1020614c2e0a0SMartin Matuska 	mutex_exit(&spa_namespace_lock);
1020714c2e0a0SMartin Matuska }
1020814c2e0a0SMartin Matuska 
1020914c2e0a0SMartin Matuska taskq_t *
1021014c2e0a0SMartin Matuska spa_sync_tq_create(spa_t *spa, const char *name)
1021114c2e0a0SMartin Matuska {
10212eda14cbcSMatt Macy 	kthread_t **kthreads;
10213eda14cbcSMatt Macy 
10214eda14cbcSMatt Macy 	ASSERT(spa->spa_sync_tq == NULL);
10215eda14cbcSMatt Macy 	ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus);
10216eda14cbcSMatt Macy 
10217eda14cbcSMatt Macy 	/*
10218eda14cbcSMatt Macy 	 * - do not allow more allocators than cpus.
10219eda14cbcSMatt Macy 	 * - there may be more cpus than allocators.
10220eda14cbcSMatt Macy 	 * - do not allow more sync taskq threads than allocators or cpus.
10221eda14cbcSMatt Macy 	 */
10222eda14cbcSMatt Macy 	int nthreads = spa->spa_alloc_count;
10223eda14cbcSMatt Macy 	spa->spa_syncthreads = kmem_zalloc(sizeof (spa_syncthread_info_t) *
10224eda14cbcSMatt Macy 	    nthreads, KM_SLEEP);
10225eda14cbcSMatt Macy 
10226eda14cbcSMatt Macy 	spa->spa_sync_tq = taskq_create_synced(name, nthreads, minclsyspri,
10227eda14cbcSMatt Macy 	    nthreads, INT_MAX, TASKQ_PREPOPULATE, &kthreads);
10228eda14cbcSMatt Macy 	VERIFY(spa->spa_sync_tq != NULL);
10229eda14cbcSMatt Macy 	VERIFY(kthreads != NULL);
10230eda14cbcSMatt Macy 
10231eda14cbcSMatt Macy 	spa_syncthread_info_t *ti = spa->spa_syncthreads;
10232eda14cbcSMatt Macy 	for (int i = 0; i < nthreads; i++, ti++) {
10233eda14cbcSMatt Macy 		ti->sti_thread = kthreads[i];
10234eda14cbcSMatt Macy 		ti->sti_allocator = i;
10235eda14cbcSMatt Macy 	}
10236eda14cbcSMatt Macy 
10237eda14cbcSMatt Macy 	kmem_free(kthreads, sizeof (*kthreads) * nthreads);
10238eda14cbcSMatt Macy 	return (spa->spa_sync_tq);
10239eda14cbcSMatt Macy }
10240eda14cbcSMatt Macy 
10241eda14cbcSMatt Macy void
10242eda14cbcSMatt Macy spa_sync_tq_destroy(spa_t *spa)
10243eda14cbcSMatt Macy {
10244eda14cbcSMatt Macy 	ASSERT(spa->spa_sync_tq != NULL);
10245eda14cbcSMatt Macy 
10246eda14cbcSMatt Macy 	taskq_wait(spa->spa_sync_tq);
10247eda14cbcSMatt Macy 	taskq_destroy(spa->spa_sync_tq);
10248eda14cbcSMatt Macy 	kmem_free(spa->spa_syncthreads,
10249eda14cbcSMatt Macy 	    sizeof (spa_syncthread_info_t) * spa->spa_alloc_count);
10250eda14cbcSMatt Macy 	spa->spa_sync_tq = NULL;
10251eda14cbcSMatt Macy }
10252eda14cbcSMatt Macy 
10253eda14cbcSMatt Macy uint_t
10254eda14cbcSMatt Macy spa_acq_allocator(spa_t *spa)
10255eda14cbcSMatt Macy {
10256eda14cbcSMatt Macy 	int i;
10257eda14cbcSMatt Macy 
10258eda14cbcSMatt Macy 	if (spa->spa_alloc_count == 1)
10259eda14cbcSMatt Macy 		return (0);
10260eda14cbcSMatt Macy 
10261eda14cbcSMatt Macy 	mutex_enter(&spa->spa_allocs_use->sau_lock);
10262eda14cbcSMatt Macy 	uint_t r = spa->spa_allocs_use->sau_rotor;
10263eda14cbcSMatt Macy 	do {
10264eda14cbcSMatt Macy 		if (++r == spa->spa_alloc_count)
10265eda14cbcSMatt Macy 			r = 0;
10266eda14cbcSMatt Macy 	} while (spa->spa_allocs_use->sau_inuse[r]);
10267eda14cbcSMatt Macy 	spa->spa_allocs_use->sau_inuse[r] = B_TRUE;
10268eda14cbcSMatt Macy 	spa->spa_allocs_use->sau_rotor = r;
10269eda14cbcSMatt Macy 	mutex_exit(&spa->spa_allocs_use->sau_lock);
10270eda14cbcSMatt Macy 
10271eda14cbcSMatt Macy 	spa_syncthread_info_t *ti = spa->spa_syncthreads;
10272eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_alloc_count; i++, ti++) {
10273eda14cbcSMatt Macy 		if (ti->sti_thread == curthread) {
10274eda14cbcSMatt Macy 			ti->sti_allocator = r;
10275eda14cbcSMatt Macy 			break;
10276eda14cbcSMatt Macy 		}
10277eda14cbcSMatt Macy 	}
10278eda14cbcSMatt Macy 	ASSERT3S(i, <, spa->spa_alloc_count);
10279eda14cbcSMatt Macy 	return (r);
10280eda14cbcSMatt Macy }
10281eda14cbcSMatt Macy 
10282eda14cbcSMatt Macy void
10283eda14cbcSMatt Macy spa_rel_allocator(spa_t *spa, uint_t allocator)
10284eda14cbcSMatt Macy {
10285eda14cbcSMatt Macy 	if (spa->spa_alloc_count > 1)
10286eda14cbcSMatt Macy 		spa->spa_allocs_use->sau_inuse[allocator] = B_FALSE;
10287eda14cbcSMatt Macy }
10288eda14cbcSMatt Macy 
10289eda14cbcSMatt Macy void
10290eda14cbcSMatt Macy spa_select_allocator(zio_t *zio)
10291eda14cbcSMatt Macy {
10292eda14cbcSMatt Macy 	zbookmark_phys_t *bm = &zio->io_bookmark;
10293eda14cbcSMatt Macy 	spa_t *spa = zio->io_spa;
10294eda14cbcSMatt Macy 
10295eda14cbcSMatt Macy 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
10296eda14cbcSMatt Macy 
10297eda14cbcSMatt Macy 	/*
10298eda14cbcSMatt Macy 	 * A gang block (for example) may have inherited its parent's
10299eda14cbcSMatt Macy 	 * allocator, in which case there is nothing further to do here.
10300eda14cbcSMatt Macy 	 */
10301dae17134SMartin Matuska 	if (ZIO_HAS_ALLOCATOR(zio))
10302dae17134SMartin Matuska 		return;
10303eda14cbcSMatt Macy 
10304e92ffd9bSMartin Matuska 	ASSERT(spa != NULL);
10305eda14cbcSMatt Macy 	ASSERT(bm != NULL);
10306dae17134SMartin Matuska 
10307eda14cbcSMatt Macy 	/*
10308eda14cbcSMatt Macy 	 * First try to use an allocator assigned to the syncthread, and set
10309eda14cbcSMatt Macy 	 * the corresponding write issue taskq for the allocator.
10310eda14cbcSMatt Macy 	 * Note, we must have an open pool to do this.
10311eda14cbcSMatt Macy 	 */
10312eda14cbcSMatt Macy 	if (spa->spa_sync_tq != NULL) {
10313eda14cbcSMatt Macy 		spa_syncthread_info_t *ti = spa->spa_syncthreads;
10314dae17134SMartin Matuska 		for (int i = 0; i < spa->spa_alloc_count; i++, ti++) {
10315eda14cbcSMatt Macy 			if (ti->sti_thread == curthread) {
10316eda14cbcSMatt Macy 				zio->io_allocator = ti->sti_allocator;
10317eda14cbcSMatt Macy 				return;
10318eda14cbcSMatt Macy 			}
10319eda14cbcSMatt Macy 		}
10320eda14cbcSMatt Macy 	}
10321dae17134SMartin Matuska 
10322dae17134SMartin Matuska 	/*
10323dae17134SMartin Matuska 	 * We want to try to use as many allocators as possible to help improve
10324dae17134SMartin Matuska 	 * performance, but we also want logically adjacent IOs to be physically
10325dae17134SMartin Matuska 	 * adjacent to improve sequential read performance. We chunk each object
10326dae17134SMartin Matuska 	 * into 2^20 block regions, and then hash based on the objset, object,
10327dae17134SMartin Matuska 	 * level, and region to accomplish both of these goals.
10328dae17134SMartin Matuska 	 */
10329dae17134SMartin Matuska 	uint64_t hv = cityhash4(bm->zb_objset, bm->zb_object, bm->zb_level,
10330dae17134SMartin Matuska 	    bm->zb_blkid >> 20);
10331dae17134SMartin Matuska 
10332dae17134SMartin Matuska 	zio->io_allocator = (uint_t)hv % spa->spa_alloc_count;
10333eda14cbcSMatt Macy }
10334eda14cbcSMatt Macy 
10335eda14cbcSMatt Macy /*
10336eda14cbcSMatt Macy  * ==========================================================================
10337eda14cbcSMatt Macy  * Miscellaneous routines
10338eda14cbcSMatt Macy  * ==========================================================================
10339eda14cbcSMatt Macy  */
10340eda14cbcSMatt Macy 
10341eda14cbcSMatt Macy /*
10342eda14cbcSMatt Macy  * Remove all pools in the system.
10343eda14cbcSMatt Macy  */
10344eda14cbcSMatt Macy void
10345eda14cbcSMatt Macy spa_evict_all(void)
10346eda14cbcSMatt Macy {
10347eda14cbcSMatt Macy 	spa_t *spa;
10348eda14cbcSMatt Macy 
10349eda14cbcSMatt Macy 	/*
10350eda14cbcSMatt Macy 	 * Remove all cached state.  All pools should be closed now,
10351eda14cbcSMatt Macy 	 * so every spa in the AVL tree should be unreferenced.
10352eda14cbcSMatt Macy 	 */
10353eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
10354eda14cbcSMatt Macy 	while ((spa = spa_next(NULL)) != NULL) {
10355eda14cbcSMatt Macy 		/*
10356eda14cbcSMatt Macy 		 * Stop async tasks.  The async thread may need to detach
10357eda14cbcSMatt Macy 		 * a device that's been replaced, which requires grabbing
10358eda14cbcSMatt Macy 		 * spa_namespace_lock, so we must drop it here.
10359eda14cbcSMatt Macy 		 */
10360eda14cbcSMatt Macy 		spa_open_ref(spa, FTAG);
10361eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
10362eda14cbcSMatt Macy 		spa_async_suspend(spa);
10363eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
10364eda14cbcSMatt Macy 		spa_close(spa, FTAG);
10365eda14cbcSMatt Macy 
10366eda14cbcSMatt Macy 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
10367eda14cbcSMatt Macy 			spa_unload(spa);
10368eda14cbcSMatt Macy 			spa_deactivate(spa);
10369eda14cbcSMatt Macy 		}
10370eda14cbcSMatt Macy 		spa_remove(spa);
10371eda14cbcSMatt Macy 	}
10372eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
10373eda14cbcSMatt Macy }
10374eda14cbcSMatt Macy 
10375eda14cbcSMatt Macy vdev_t *
10376eda14cbcSMatt Macy spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
10377eda14cbcSMatt Macy {
10378eda14cbcSMatt Macy 	vdev_t *vd;
10379eda14cbcSMatt Macy 	int i;
10380eda14cbcSMatt Macy 
10381eda14cbcSMatt Macy 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
10382eda14cbcSMatt Macy 		return (vd);
10383eda14cbcSMatt Macy 
10384eda14cbcSMatt Macy 	if (aux) {
10385eda14cbcSMatt Macy 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
10386eda14cbcSMatt Macy 			vd = spa->spa_l2cache.sav_vdevs[i];
10387eda14cbcSMatt Macy 			if (vd->vdev_guid == guid)
10388eda14cbcSMatt Macy 				return (vd);
10389eda14cbcSMatt Macy 		}
10390eda14cbcSMatt Macy 
10391eda14cbcSMatt Macy 		for (i = 0; i < spa->spa_spares.sav_count; i++) {
10392eda14cbcSMatt Macy 			vd = spa->spa_spares.sav_vdevs[i];
10393eda14cbcSMatt Macy 			if (vd->vdev_guid == guid)
10394eda14cbcSMatt Macy 				return (vd);
10395eda14cbcSMatt Macy 		}
10396eda14cbcSMatt Macy 	}
10397eda14cbcSMatt Macy 
10398eda14cbcSMatt Macy 	return (NULL);
10399eda14cbcSMatt Macy }
10400eda14cbcSMatt Macy 
10401eda14cbcSMatt Macy void
10402eda14cbcSMatt Macy spa_upgrade(spa_t *spa, uint64_t version)
10403eda14cbcSMatt Macy {
10404eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
10405eda14cbcSMatt Macy 
10406eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
10407eda14cbcSMatt Macy 
10408eda14cbcSMatt Macy 	/*
10409eda14cbcSMatt Macy 	 * This should only be called for a non-faulted pool, and since a
10410eda14cbcSMatt Macy 	 * future version would result in an unopenable pool, this shouldn't be
10411eda14cbcSMatt Macy 	 * possible.
10412eda14cbcSMatt Macy 	 */
10413eda14cbcSMatt Macy 	ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
10414eda14cbcSMatt Macy 	ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
10415eda14cbcSMatt Macy 
10416eda14cbcSMatt Macy 	spa->spa_uberblock.ub_version = version;
10417eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
10418eda14cbcSMatt Macy 
10419eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
10420eda14cbcSMatt Macy 
10421eda14cbcSMatt Macy 	txg_wait_synced(spa_get_dsl(spa), 0);
10422eda14cbcSMatt Macy }
10423eda14cbcSMatt Macy 
10424eda14cbcSMatt Macy static boolean_t
10425eda14cbcSMatt Macy spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav)
10426eda14cbcSMatt Macy {
10427eda14cbcSMatt Macy 	(void) spa;
10428eda14cbcSMatt Macy 	int i;
10429eda14cbcSMatt Macy 	uint64_t vdev_guid;
10430eda14cbcSMatt Macy 
10431eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++)
10432eda14cbcSMatt Macy 		if (sav->sav_vdevs[i]->vdev_guid == guid)
10433eda14cbcSMatt Macy 			return (B_TRUE);
10434eda14cbcSMatt Macy 
10435eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_npending; i++) {
10436eda14cbcSMatt Macy 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
10437eda14cbcSMatt Macy 		    &vdev_guid) == 0 && vdev_guid == guid)
10438eda14cbcSMatt Macy 			return (B_TRUE);
10439eda14cbcSMatt Macy 	}
10440eda14cbcSMatt Macy 
10441eda14cbcSMatt Macy 	return (B_FALSE);
10442eda14cbcSMatt Macy }
10443eda14cbcSMatt Macy 
10444eda14cbcSMatt Macy boolean_t
10445eda14cbcSMatt Macy spa_has_l2cache(spa_t *spa, uint64_t guid)
10446eda14cbcSMatt Macy {
10447eda14cbcSMatt Macy 	return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache));
10448eda14cbcSMatt Macy }
10449eda14cbcSMatt Macy 
10450eda14cbcSMatt Macy boolean_t
10451eda14cbcSMatt Macy spa_has_spare(spa_t *spa, uint64_t guid)
10452eda14cbcSMatt Macy {
10453eda14cbcSMatt Macy 	return (spa_has_aux_vdev(spa, guid, &spa->spa_spares));
10454eda14cbcSMatt Macy }
10455eda14cbcSMatt Macy 
10456eda14cbcSMatt Macy /*
10457eda14cbcSMatt Macy  * Check if a pool has an active shared spare device.
10458eda14cbcSMatt Macy  * Note: reference count of an active spare is 2, as a spare and as a replace
10459eda14cbcSMatt Macy  */
10460eda14cbcSMatt Macy static boolean_t
10461eda14cbcSMatt Macy spa_has_active_shared_spare(spa_t *spa)
10462eda14cbcSMatt Macy {
10463eda14cbcSMatt Macy 	int i, refcnt;
10464eda14cbcSMatt Macy 	uint64_t pool;
10465eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_spares;
10466eda14cbcSMatt Macy 
10467eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++) {
10468eda14cbcSMatt Macy 		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
10469eda14cbcSMatt Macy 		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
10470eda14cbcSMatt Macy 		    refcnt > 2)
10471eda14cbcSMatt Macy 			return (B_TRUE);
10472eda14cbcSMatt Macy 	}
10473eda14cbcSMatt Macy 
10474eda14cbcSMatt Macy 	return (B_FALSE);
10475eda14cbcSMatt Macy }
10476eda14cbcSMatt Macy 
10477eda14cbcSMatt Macy uint64_t
10478eda14cbcSMatt Macy spa_total_metaslabs(spa_t *spa)
10479eda14cbcSMatt Macy {
10480eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
10481eda14cbcSMatt Macy 
10482eda14cbcSMatt Macy 	uint64_t m = 0;
10483eda14cbcSMatt Macy 	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
10484eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[c];
10485eda14cbcSMatt Macy 		if (!vdev_is_concrete(vd))
10486eda14cbcSMatt Macy 			continue;
10487eda14cbcSMatt Macy 		m += vd->vdev_ms_count;
10488eda14cbcSMatt Macy 	}
10489eda14cbcSMatt Macy 	return (m);
10490eda14cbcSMatt Macy }
10491eda14cbcSMatt Macy 
10492eda14cbcSMatt Macy /*
10493eda14cbcSMatt Macy  * Notify any waiting threads that some activity has switched from being in-
10494eda14cbcSMatt Macy  * progress to not-in-progress so that the thread can wake up and determine
10495eda14cbcSMatt Macy  * whether it is finished waiting.
10496eda14cbcSMatt Macy  */
10497eda14cbcSMatt Macy void
10498eda14cbcSMatt Macy spa_notify_waiters(spa_t *spa)
10499eda14cbcSMatt Macy {
10500eda14cbcSMatt Macy 	/*
10501eda14cbcSMatt Macy 	 * Acquiring spa_activities_lock here prevents the cv_broadcast from
10502eda14cbcSMatt Macy 	 * happening between the waiting thread's check and cv_wait.
10503eda14cbcSMatt Macy 	 */
10504eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10505eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_activities_cv);
10506eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
10507eda14cbcSMatt Macy }
10508eda14cbcSMatt Macy 
10509eda14cbcSMatt Macy /*
10510eda14cbcSMatt Macy  * Notify any waiting threads that the pool is exporting, and then block until
10511eda14cbcSMatt Macy  * they are finished using the spa_t.
10512eda14cbcSMatt Macy  */
10513eda14cbcSMatt Macy void
10514eda14cbcSMatt Macy spa_wake_waiters(spa_t *spa)
10515eda14cbcSMatt Macy {
10516eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10517eda14cbcSMatt Macy 	spa->spa_waiters_cancel = B_TRUE;
10518eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_activities_cv);
10519eda14cbcSMatt Macy 	while (spa->spa_waiters != 0)
10520eda14cbcSMatt Macy 		cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
10521eda14cbcSMatt Macy 	spa->spa_waiters_cancel = B_FALSE;
10522eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
10523eda14cbcSMatt Macy }
10524eda14cbcSMatt Macy 
10525eda14cbcSMatt Macy /* Whether the vdev or any of its descendants are being initialized/trimmed. */
10526eda14cbcSMatt Macy static boolean_t
10527eda14cbcSMatt Macy spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
10528eda14cbcSMatt Macy {
10529eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
10530eda14cbcSMatt Macy 
10531eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
10532eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
10533eda14cbcSMatt Macy 	ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
10534eda14cbcSMatt Macy 	    activity == ZPOOL_WAIT_TRIM);
10535eda14cbcSMatt Macy 
10536eda14cbcSMatt Macy 	kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
10537eda14cbcSMatt Macy 	    &vd->vdev_initialize_lock : &vd->vdev_trim_lock;
10538eda14cbcSMatt Macy 
10539eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
10540eda14cbcSMatt Macy 	mutex_enter(lock);
10541eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10542eda14cbcSMatt Macy 
10543eda14cbcSMatt Macy 	boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
10544eda14cbcSMatt Macy 	    (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
10545eda14cbcSMatt Macy 	    (vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
10546eda14cbcSMatt Macy 	mutex_exit(lock);
10547eda14cbcSMatt Macy 
10548eda14cbcSMatt Macy 	if (in_progress)
10549eda14cbcSMatt Macy 		return (B_TRUE);
10550e716630dSMartin Matuska 
10551e716630dSMartin Matuska 	for (int i = 0; i < vd->vdev_children; i++) {
10552eda14cbcSMatt Macy 		if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
10553c03c5b1cSMartin Matuska 		    activity))
10554eda14cbcSMatt Macy 			return (B_TRUE);
10555eda14cbcSMatt Macy 	}
10556eda14cbcSMatt Macy 
10557eda14cbcSMatt Macy 	return (B_FALSE);
10558eda14cbcSMatt Macy }
10559eda14cbcSMatt Macy 
10560eda14cbcSMatt Macy /*
10561eda14cbcSMatt Macy  * If use_guid is true, this checks whether the vdev specified by guid is
10562eda14cbcSMatt Macy  * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
10563eda14cbcSMatt Macy  * is being initialized/trimmed. The caller must hold the config lock and
10564eda14cbcSMatt Macy  * spa_activities_lock.
10565eda14cbcSMatt Macy  */
10566e716630dSMartin Matuska static int
10567e716630dSMartin Matuska spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
10568e716630dSMartin Matuska     zpool_wait_activity_t activity, boolean_t *in_progress)
10569e716630dSMartin Matuska {
10570e716630dSMartin Matuska 	mutex_exit(&spa->spa_activities_lock);
10571e716630dSMartin Matuska 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
10572eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10573eda14cbcSMatt Macy 
10574eda14cbcSMatt Macy 	vdev_t *vd;
10575eda14cbcSMatt Macy 	if (use_guid) {
10576eda14cbcSMatt Macy 		vd = spa_lookup_by_guid(spa, guid, B_FALSE);
10577eda14cbcSMatt Macy 		if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
10578eda14cbcSMatt Macy 			spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10579eda14cbcSMatt Macy 			return (EINVAL);
10580eda14cbcSMatt Macy 		}
10581eda14cbcSMatt Macy 	} else {
10582eda14cbcSMatt Macy 		vd = spa->spa_root_vdev;
10583eda14cbcSMatt Macy 	}
10584eda14cbcSMatt Macy 
10585eda14cbcSMatt Macy 	*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
10586eda14cbcSMatt Macy 
10587eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10588eda14cbcSMatt Macy 	return (0);
10589eda14cbcSMatt Macy }
10590eda14cbcSMatt Macy 
10591eda14cbcSMatt Macy /*
10592eda14cbcSMatt Macy  * Locking for waiting threads
10593eda14cbcSMatt Macy  * ---------------------------
10594eda14cbcSMatt Macy  *
10595eda14cbcSMatt Macy  * Waiting threads need a way to check whether a given activity is in progress,
10596eda14cbcSMatt Macy  * and then, if it is, wait for it to complete. Each activity will have some
10597eda14cbcSMatt Macy  * in-memory representation of the relevant on-disk state which can be used to
10598eda14cbcSMatt Macy  * determine whether or not the activity is in progress. The in-memory state and
10599eda14cbcSMatt Macy  * the locking used to protect it will be different for each activity, and may
10600eda14cbcSMatt Macy  * not be suitable for use with a cvar (e.g., some state is protected by the
10601eda14cbcSMatt Macy  * config lock). To allow waiting threads to wait without any races, another
10602eda14cbcSMatt Macy  * lock, spa_activities_lock, is used.
10603eda14cbcSMatt Macy  *
10604eda14cbcSMatt Macy  * When the state is checked, both the activity-specific lock (if there is one)
10605eda14cbcSMatt Macy  * and spa_activities_lock are held. In some cases, the activity-specific lock
10606eda14cbcSMatt Macy  * is acquired explicitly (e.g. the config lock). In others, the locking is
10607eda14cbcSMatt Macy  * internal to some check (e.g. bpobj_is_empty). After checking, the waiting
10608eda14cbcSMatt Macy  * thread releases the activity-specific lock and, if the activity is in
10609eda14cbcSMatt Macy  * progress, then cv_waits using spa_activities_lock.
10610eda14cbcSMatt Macy  *
10611eda14cbcSMatt Macy  * The waiting thread is woken when another thread, one completing some
10612eda14cbcSMatt Macy  * activity, updates the state of the activity and then calls
10613eda14cbcSMatt Macy  * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
10614eda14cbcSMatt Macy  * needs to hold its activity-specific lock when updating the state, and this
10615eda14cbcSMatt Macy  * lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
10616eda14cbcSMatt Macy  *
10617eda14cbcSMatt Macy  * Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
10618eda14cbcSMatt Macy  * and because it is held when the waiting thread checks the state of the
10619eda14cbcSMatt Macy  * activity, it can never be the case that the completing thread both updates
10620eda14cbcSMatt Macy  * the activity state and cv_broadcasts in between the waiting thread's check
10621eda14cbcSMatt Macy  * and cv_wait. Thus, a waiting thread can never miss a wakeup.
10622eda14cbcSMatt Macy  *
10623eda14cbcSMatt Macy  * In order to prevent deadlock, when the waiting thread does its check, in some
10624eda14cbcSMatt Macy  * cases it will temporarily drop spa_activities_lock in order to acquire the
10625eda14cbcSMatt Macy  * activity-specific lock. The order in which spa_activities_lock and the
10626eda14cbcSMatt Macy  * activity specific lock are acquired in the waiting thread is determined by
10627eda14cbcSMatt Macy  * the order in which they are acquired in the completing thread; if the
10628eda14cbcSMatt Macy  * completing thread calls spa_notify_waiters with the activity-specific lock
10629eda14cbcSMatt Macy  * held, then the waiting thread must also acquire the activity-specific lock
10630eda14cbcSMatt Macy  * first.
10631eda14cbcSMatt Macy  */
10632eda14cbcSMatt Macy 
10633eda14cbcSMatt Macy static int
10634eda14cbcSMatt Macy spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
10635eda14cbcSMatt Macy     boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
10636eda14cbcSMatt Macy {
10637eda14cbcSMatt Macy 	int error = 0;
10638eda14cbcSMatt Macy 
10639eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
10640eda14cbcSMatt Macy 
10641eda14cbcSMatt Macy 	switch (activity) {
10642eda14cbcSMatt Macy 	case ZPOOL_WAIT_CKPT_DISCARD:
10643eda14cbcSMatt Macy 		*in_progress =
10644eda14cbcSMatt Macy 		    (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
10645eda14cbcSMatt Macy 		    zap_contains(spa_meta_objset(spa),
10646eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
10647eda14cbcSMatt Macy 		    ENOENT);
10648eda14cbcSMatt Macy 		break;
10649eda14cbcSMatt Macy 	case ZPOOL_WAIT_FREE:
10650eda14cbcSMatt Macy 		*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
10651eda14cbcSMatt Macy 		    !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
10652eda14cbcSMatt Macy 		    spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
10653eda14cbcSMatt Macy 		    spa_livelist_delete_check(spa));
10654eda14cbcSMatt Macy 		break;
10655eda14cbcSMatt Macy 	case ZPOOL_WAIT_INITIALIZE:
10656eda14cbcSMatt Macy 	case ZPOOL_WAIT_TRIM:
10657eda14cbcSMatt Macy 		error = spa_vdev_activity_in_progress(spa, use_tag, tag,
10658eda14cbcSMatt Macy 		    activity, in_progress);
10659eda14cbcSMatt Macy 		break;
10660eda14cbcSMatt Macy 	case ZPOOL_WAIT_REPLACE:
10661eda14cbcSMatt Macy 		mutex_exit(&spa->spa_activities_lock);
10662eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
10663eda14cbcSMatt Macy 		mutex_enter(&spa->spa_activities_lock);
10664eda14cbcSMatt Macy 
10665eda14cbcSMatt Macy 		*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
10666eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10667eda14cbcSMatt Macy 		break;
10668eda14cbcSMatt Macy 	case ZPOOL_WAIT_REMOVE:
10669eda14cbcSMatt Macy 		*in_progress = (spa->spa_removing_phys.sr_state ==
10670eda14cbcSMatt Macy 		    DSS_SCANNING);
10671eda14cbcSMatt Macy 		break;
10672eda14cbcSMatt Macy 	case ZPOOL_WAIT_RESILVER:
10673eda14cbcSMatt Macy 		*in_progress = vdev_rebuild_active(spa->spa_root_vdev);
10674eda14cbcSMatt Macy 		if (*in_progress)
10675e92ffd9bSMartin Matuska 			break;
10676e92ffd9bSMartin Matuska 		zfs_fallthrough;
10677eda14cbcSMatt Macy 	case ZPOOL_WAIT_SCRUB:
10678eda14cbcSMatt Macy 	{
10679eda14cbcSMatt Macy 		boolean_t scanning, paused, is_scrub;
10680eda14cbcSMatt Macy 		dsl_scan_t *scn =  spa->spa_dsl_pool->dp_scan;
10681eda14cbcSMatt Macy 
10682eda14cbcSMatt Macy 		is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
10683eda14cbcSMatt Macy 		scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
10684eda14cbcSMatt Macy 		paused = dsl_scan_is_paused_scrub(scn);
10685eda14cbcSMatt Macy 		*in_progress = (scanning && !paused &&
10686eda14cbcSMatt Macy 		    is_scrub == (activity == ZPOOL_WAIT_SCRUB));
10687eda14cbcSMatt Macy 		break;
10688eda14cbcSMatt Macy 	}
10689e92ffd9bSMartin Matuska 	case ZPOOL_WAIT_RAIDZ_EXPAND:
10690e92ffd9bSMartin Matuska 	{
10691eda14cbcSMatt Macy 		vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
10692eda14cbcSMatt Macy 		*in_progress = (vre != NULL && vre->vre_state == DSS_SCANNING);
10693eda14cbcSMatt Macy 		break;
10694eda14cbcSMatt Macy 	}
10695eda14cbcSMatt Macy 	default:
10696eda14cbcSMatt Macy 		panic("unrecognized value for activity %d", activity);
10697eda14cbcSMatt Macy 	}
10698eda14cbcSMatt Macy 
10699eda14cbcSMatt Macy 	return (error);
10700eda14cbcSMatt Macy }
10701eda14cbcSMatt Macy 
10702eda14cbcSMatt Macy static int
10703eda14cbcSMatt Macy spa_wait_common(const char *pool, zpool_wait_activity_t activity,
10704eda14cbcSMatt Macy     boolean_t use_tag, uint64_t tag, boolean_t *waited)
10705eda14cbcSMatt Macy {
10706eda14cbcSMatt Macy 	/*
10707eda14cbcSMatt Macy 	 * The tag is used to distinguish between instances of an activity.
10708eda14cbcSMatt Macy 	 * 'initialize' and 'trim' are the only activities that we use this for.
10709eda14cbcSMatt Macy 	 * The other activities can only have a single instance in progress in a
10710eda14cbcSMatt Macy 	 * pool at one time, making the tag unnecessary.
10711eda14cbcSMatt Macy 	 *
10712eda14cbcSMatt Macy 	 * There can be multiple devices being replaced at once, but since they
10713eda14cbcSMatt Macy 	 * all finish once resilvering finishes, we don't bother keeping track
10714eda14cbcSMatt Macy 	 * of them individually, we just wait for them all to finish.
10715eda14cbcSMatt Macy 	 */
10716eda14cbcSMatt Macy 	if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
10717eda14cbcSMatt Macy 	    activity != ZPOOL_WAIT_TRIM)
10718eda14cbcSMatt Macy 		return (EINVAL);
10719eda14cbcSMatt Macy 
10720eda14cbcSMatt Macy 	if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
10721eda14cbcSMatt Macy 		return (EINVAL);
10722eda14cbcSMatt Macy 
10723eda14cbcSMatt Macy 	spa_t *spa;
10724eda14cbcSMatt Macy 	int error = spa_open(pool, &spa, FTAG);
10725eda14cbcSMatt Macy 	if (error != 0)
10726eda14cbcSMatt Macy 		return (error);
10727eda14cbcSMatt Macy 
10728eda14cbcSMatt Macy 	/*
10729eda14cbcSMatt Macy 	 * Increment the spa's waiter count so that we can call spa_close and
10730eda14cbcSMatt Macy 	 * still ensure that the spa_t doesn't get freed before this thread is
10731eda14cbcSMatt Macy 	 * finished with it when the pool is exported. We want to call spa_close
10732eda14cbcSMatt Macy 	 * before we start waiting because otherwise the additional ref would
10733eda14cbcSMatt Macy 	 * prevent the pool from being exported or destroyed throughout the
10734eda14cbcSMatt Macy 	 * potentially long wait.
10735eda14cbcSMatt Macy 	 */
10736eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10737eda14cbcSMatt Macy 	spa->spa_waiters++;
10738eda14cbcSMatt Macy 	spa_close(spa, FTAG);
10739eda14cbcSMatt Macy 
10740eda14cbcSMatt Macy 	*waited = B_FALSE;
10741eda14cbcSMatt Macy 	for (;;) {
10742eda14cbcSMatt Macy 		boolean_t in_progress;
10743eda14cbcSMatt Macy 		error = spa_activity_in_progress(spa, activity, use_tag, tag,
10744eda14cbcSMatt Macy 		    &in_progress);
10745eda14cbcSMatt Macy 
10746eda14cbcSMatt Macy 		if (error || !in_progress || spa->spa_waiters_cancel)
10747eda14cbcSMatt Macy 			break;
10748eda14cbcSMatt Macy 
10749eda14cbcSMatt Macy 		*waited = B_TRUE;
10750eda14cbcSMatt Macy 
10751eda14cbcSMatt Macy 		if (cv_wait_sig(&spa->spa_activities_cv,
10752eda14cbcSMatt Macy 		    &spa->spa_activities_lock) == 0) {
10753eda14cbcSMatt Macy 			error = EINTR;
10754eda14cbcSMatt Macy 			break;
10755eda14cbcSMatt Macy 		}
10756eda14cbcSMatt Macy 	}
10757eda14cbcSMatt Macy 
10758eda14cbcSMatt Macy 	spa->spa_waiters--;
10759eda14cbcSMatt Macy 	cv_signal(&spa->spa_waiters_cv);
10760eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
10761eda14cbcSMatt Macy 
10762b2526e8bSMartin Matuska 	return (error);
10763b2526e8bSMartin Matuska }
10764b2526e8bSMartin Matuska 
10765eda14cbcSMatt Macy /*
10766be181ee2SMartin Matuska  * Wait for a particular instance of the specified activity to complete, where
1076716038816SMartin Matuska  * the instance is identified by 'tag'
10768eda14cbcSMatt Macy  */
10769c03c5b1cSMartin Matuska int
10770eda14cbcSMatt Macy spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
10771eda14cbcSMatt Macy     boolean_t *waited)
10772eda14cbcSMatt Macy {
10773eda14cbcSMatt Macy 	return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
10774eda14cbcSMatt Macy }
10775eda14cbcSMatt Macy 
10776eda14cbcSMatt Macy /*
10777eda14cbcSMatt Macy  * Wait for all instances of the specified activity complete
10778eda14cbcSMatt Macy  */
10779eda14cbcSMatt Macy int
10780eda14cbcSMatt Macy spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
10781eda14cbcSMatt Macy {
10782eda14cbcSMatt Macy 
1078316038816SMartin Matuska 	return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
1078416038816SMartin Matuska }
1078516038816SMartin Matuska 
10786c03c5b1cSMartin Matuska sysevent_t *
10787dbd5678dSMartin Matuska spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
10788eda14cbcSMatt Macy {
10789eda14cbcSMatt Macy 	sysevent_t *ev = NULL;
10790c03c5b1cSMartin Matuska #ifdef _KERNEL
10791eda14cbcSMatt Macy 	nvlist_t *resource;
10792c03c5b1cSMartin Matuska 
10793c03c5b1cSMartin Matuska 	resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
10794eda14cbcSMatt Macy 	if (resource) {
10795c03c5b1cSMartin Matuska 		ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
10796c03c5b1cSMartin Matuska 		ev->resource = resource;
10797eda14cbcSMatt Macy 	}
10798c03c5b1cSMartin Matuska #else
10799c03c5b1cSMartin Matuska 	(void) spa, (void) vd, (void) hist_nvl, (void) name;
10800c03c5b1cSMartin Matuska #endif
10801eda14cbcSMatt Macy 	return (ev);
10802eda14cbcSMatt Macy }
10803c03c5b1cSMartin Matuska 
10804c03c5b1cSMartin Matuska void
10805eda14cbcSMatt Macy spa_event_post(sysevent_t *ev)
10806eda14cbcSMatt Macy {
10807c03c5b1cSMartin Matuska #ifdef _KERNEL
10808c03c5b1cSMartin Matuska 	if (ev) {
10809eda14cbcSMatt Macy 		zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
10810eda14cbcSMatt Macy 		kmem_free(ev, sizeof (*ev));
10811b356da80SMartin Matuska 	}
10812b356da80SMartin Matuska #else
10813b356da80SMartin Matuska 	(void) ev;
10814b356da80SMartin Matuska #endif
10815b356da80SMartin Matuska }
10816b356da80SMartin Matuska 
10817b356da80SMartin Matuska /*
10818b356da80SMartin Matuska  * Post a zevent corresponding to the given sysevent.   The 'name' must be one
10819b356da80SMartin Matuska  * of the event definitions in sys/sysevent/eventdefs.h.  The payload will be
10820eda14cbcSMatt Macy  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
1082114c2e0a0SMartin Matuska  * in the userland libzpool, as we don't want consumers to misinterpret ztest
1082214c2e0a0SMartin Matuska  * or zdb as real changes.
1082314c2e0a0SMartin Matuska  */
10824 void
10825 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
10826 {
10827 	spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
10828 }
10829 
10830 /* state manipulation functions */
10831 EXPORT_SYMBOL(spa_open);
10832 EXPORT_SYMBOL(spa_open_rewind);
10833 EXPORT_SYMBOL(spa_get_stats);
10834 EXPORT_SYMBOL(spa_create);
10835 EXPORT_SYMBOL(spa_import);
10836 EXPORT_SYMBOL(spa_tryimport);
10837 EXPORT_SYMBOL(spa_destroy);
10838 EXPORT_SYMBOL(spa_export);
10839 EXPORT_SYMBOL(spa_reset);
10840 EXPORT_SYMBOL(spa_async_request);
10841 EXPORT_SYMBOL(spa_async_suspend);
10842 EXPORT_SYMBOL(spa_async_resume);
10843 EXPORT_SYMBOL(spa_inject_addref);
10844 EXPORT_SYMBOL(spa_inject_delref);
10845 EXPORT_SYMBOL(spa_scan_stat_init);
10846 EXPORT_SYMBOL(spa_scan_get_stats);
10847 
10848 /* device manipulation */
10849 EXPORT_SYMBOL(spa_vdev_add);
10850 EXPORT_SYMBOL(spa_vdev_attach);
10851 EXPORT_SYMBOL(spa_vdev_detach);
10852 EXPORT_SYMBOL(spa_vdev_setpath);
10853 EXPORT_SYMBOL(spa_vdev_setfru);
10854 EXPORT_SYMBOL(spa_vdev_split_mirror);
10855 
10856 /* spare statech is global across all pools) */
10857 EXPORT_SYMBOL(spa_spare_add);
10858 EXPORT_SYMBOL(spa_spare_remove);
10859 EXPORT_SYMBOL(spa_spare_exists);
10860 EXPORT_SYMBOL(spa_spare_activate);
10861 
10862 /* L2ARC statech is global across all pools) */
10863 EXPORT_SYMBOL(spa_l2cache_add);
10864 EXPORT_SYMBOL(spa_l2cache_remove);
10865 EXPORT_SYMBOL(spa_l2cache_exists);
10866 EXPORT_SYMBOL(spa_l2cache_activate);
10867 EXPORT_SYMBOL(spa_l2cache_drop);
10868 
10869 /* scanning */
10870 EXPORT_SYMBOL(spa_scan);
10871 EXPORT_SYMBOL(spa_scan_stop);
10872 
10873 /* spa syncing */
10874 EXPORT_SYMBOL(spa_sync); /* only for DMU use */
10875 EXPORT_SYMBOL(spa_sync_allpools);
10876 
10877 /* properties */
10878 EXPORT_SYMBOL(spa_prop_set);
10879 EXPORT_SYMBOL(spa_prop_get);
10880 EXPORT_SYMBOL(spa_prop_clear_bootfs);
10881 
10882 /* asynchronous event notification */
10883 EXPORT_SYMBOL(spa_event_notify);
10884 
10885 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_pct, UINT, ZMOD_RW,
10886 	"Percentage of CPUs to run a metaslab preload taskq");
10887 
10888 /* BEGIN CSTYLED */
10889 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW,
10890 	"log2 fraction of arc that can be used by inflight I/Os when "
10891 	"verifying pool during import");
10892 /* END CSTYLED */
10893 
10894 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
10895 	"Set to traverse metadata on pool import");
10896 
10897 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
10898 	"Set to traverse data on pool import");
10899 
10900 ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
10901 	"Print vdev tree to zfs_dbgmsg during pool import");
10902 
10903 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RW,
10904 	"Percentage of CPUs to run an IO worker thread");
10905 
10906 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RW,
10907 	"Number of threads per IO worker taskqueue");
10908 
10909 /* BEGIN CSTYLED */
10910 ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW,
10911 	"Allow importing pool with up to this number of missing top-level "
10912 	"vdevs (in read-only mode)");
10913 /* END CSTYLED */
10914 
10915 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT,
10916 	ZMOD_RW, "Set the livelist condense zthr to pause");
10917 
10918 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT,
10919 	ZMOD_RW, "Set the livelist condense synctask to pause");
10920 
10921 /* BEGIN CSTYLED */
10922 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel,
10923 	INT, ZMOD_RW,
10924 	"Whether livelist condensing was canceled in the synctask");
10925 
10926 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel,
10927 	INT, ZMOD_RW,
10928 	"Whether livelist condensing was canceled in the zthr function");
10929 
10930 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT,
10931 	ZMOD_RW,
10932 	"Whether extra ALLOC blkptrs were added to a livelist entry while it "
10933 	"was being condensed");
10934 
10935 #ifdef _KERNEL
10936 ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_read,
10937 	spa_taskq_read_param_set, spa_taskq_read_param_get, ZMOD_RW,
10938 	"Configure IO queues for read IO");
10939 ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_write,
10940 	spa_taskq_write_param_set, spa_taskq_write_param_get, ZMOD_RW,
10941 	"Configure IO queues for write IO");
10942 #endif
10943 /* END CSTYLED */
10944 
10945 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_write_tpq, UINT, ZMOD_RW,
10946 	"Number of CPUs per write issue taskq");
10947