1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * CDDL HEADER START
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
10eda14cbcSMatt Macy  * See the License for the specific language governing permissions
11eda14cbcSMatt Macy  * and limitations under the License.
12eda14cbcSMatt Macy  *
13eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy  *
19eda14cbcSMatt Macy  * CDDL HEADER END
20eda14cbcSMatt Macy  */
21eda14cbcSMatt Macy /*
22eda14cbcSMatt Macy  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
230d4ad640SMartin Matuska  * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
24eda14cbcSMatt Macy  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25eda14cbcSMatt Macy  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26eda14cbcSMatt Macy  * Copyright 2013 Saso Kiselkov. All rights reserved.
27eda14cbcSMatt Macy  * Copyright (c) 2016 Actifio, Inc. All rights reserved.
28eda14cbcSMatt Macy  * Copyright (c) 2017, Intel Corporation.
29271171e0SMartin Matuska  * Copyright (c) 2019 Datto Inc.
30eda14cbcSMatt Macy  */
31eda14cbcSMatt Macy 
32eda14cbcSMatt Macy #ifndef _SYS_SPA_IMPL_H
33eda14cbcSMatt Macy #define	_SYS_SPA_IMPL_H
34eda14cbcSMatt Macy 
35eda14cbcSMatt Macy #include <sys/spa.h>
36eda14cbcSMatt Macy #include <sys/spa_checkpoint.h>
37eda14cbcSMatt Macy #include <sys/spa_log_spacemap.h>
38eda14cbcSMatt Macy #include <sys/vdev.h>
39eda14cbcSMatt Macy #include <sys/vdev_rebuild.h>
40eda14cbcSMatt Macy #include <sys/vdev_removal.h>
41e716630dSMartin Matuska #include <sys/vdev_raidz.h>
42eda14cbcSMatt Macy #include <sys/metaslab.h>
43eda14cbcSMatt Macy #include <sys/dmu.h>
44eda14cbcSMatt Macy #include <sys/dsl_pool.h>
45eda14cbcSMatt Macy #include <sys/uberblock_impl.h>
46eda14cbcSMatt Macy #include <sys/zfs_context.h>
47eda14cbcSMatt Macy #include <sys/avl.h>
48eda14cbcSMatt Macy #include <sys/zfs_refcount.h>
49eda14cbcSMatt Macy #include <sys/bplist.h>
50eda14cbcSMatt Macy #include <sys/bpobj.h>
51eda14cbcSMatt Macy #include <sys/dsl_crypt.h>
52eda14cbcSMatt Macy #include <sys/zfeature.h>
53eda14cbcSMatt Macy #include <sys/zthr.h>
54eda14cbcSMatt Macy #include <sys/dsl_deadlist.h>
55eda14cbcSMatt Macy #include <zfeature_common.h>
56eda14cbcSMatt Macy 
57eda14cbcSMatt Macy #ifdef	__cplusplus
58eda14cbcSMatt Macy extern "C" {
59eda14cbcSMatt Macy #endif
60eda14cbcSMatt Macy 
613f9d360cSMartin Matuska typedef struct spa_alloc {
623f9d360cSMartin Matuska 	kmutex_t	spaa_lock;
633f9d360cSMartin Matuska 	avl_tree_t	spaa_tree;
643f9d360cSMartin Matuska } ____cacheline_aligned spa_alloc_t;
653f9d360cSMartin Matuska 
66b985c9caSMartin Matuska typedef struct spa_allocs_use {
67b985c9caSMartin Matuska 	kmutex_t	sau_lock;
68b985c9caSMartin Matuska 	uint_t		sau_rotor;
69b985c9caSMartin Matuska 	boolean_t	sau_inuse[];
70b985c9caSMartin Matuska } spa_allocs_use_t;
71b985c9caSMartin Matuska 
72eda14cbcSMatt Macy typedef struct spa_error_entry {
73eda14cbcSMatt Macy 	zbookmark_phys_t	se_bookmark;
74eda14cbcSMatt Macy 	char			*se_name;
75eda14cbcSMatt Macy 	avl_node_t		se_avl;
762a58b312SMartin Matuska 	zbookmark_err_phys_t	se_zep;		/* not accounted in avl_find */
77eda14cbcSMatt Macy } spa_error_entry_t;
78eda14cbcSMatt Macy 
79eda14cbcSMatt Macy typedef struct spa_history_phys {
80eda14cbcSMatt Macy 	uint64_t sh_pool_create_len;	/* ending offset of zpool create */
81eda14cbcSMatt Macy 	uint64_t sh_phys_max_off;	/* physical EOF */
82eda14cbcSMatt Macy 	uint64_t sh_bof;		/* logical BOF */
83eda14cbcSMatt Macy 	uint64_t sh_eof;		/* logical EOF */
84eda14cbcSMatt Macy 	uint64_t sh_records_lost;	/* num of records overwritten */
85eda14cbcSMatt Macy } spa_history_phys_t;
86eda14cbcSMatt Macy 
87eda14cbcSMatt Macy /*
88eda14cbcSMatt Macy  * All members must be uint64_t, for byteswap purposes.
89eda14cbcSMatt Macy  */
90eda14cbcSMatt Macy typedef struct spa_removing_phys {
91eda14cbcSMatt Macy 	uint64_t sr_state; /* dsl_scan_state_t */
92eda14cbcSMatt Macy 
93eda14cbcSMatt Macy 	/*
94eda14cbcSMatt Macy 	 * The vdev ID that we most recently attempted to remove,
95eda14cbcSMatt Macy 	 * or -1 if no removal has been attempted.
96eda14cbcSMatt Macy 	 */
97eda14cbcSMatt Macy 	uint64_t sr_removing_vdev;
98eda14cbcSMatt Macy 
99eda14cbcSMatt Macy 	/*
100eda14cbcSMatt Macy 	 * The vdev ID that we most recently successfully removed,
101eda14cbcSMatt Macy 	 * or -1 if no devices have been removed.
102eda14cbcSMatt Macy 	 */
103eda14cbcSMatt Macy 	uint64_t sr_prev_indirect_vdev;
104eda14cbcSMatt Macy 
105eda14cbcSMatt Macy 	uint64_t sr_start_time;
106eda14cbcSMatt Macy 	uint64_t sr_end_time;
107eda14cbcSMatt Macy 
108eda14cbcSMatt Macy 	/*
109eda14cbcSMatt Macy 	 * Note that we can not use the space map's or indirect mapping's
110eda14cbcSMatt Macy 	 * accounting as a substitute for these values, because we need to
111eda14cbcSMatt Macy 	 * count frees of not-yet-copied data as though it did the copy.
112eda14cbcSMatt Macy 	 * Otherwise, we could get into a situation where copied > to_copy,
113eda14cbcSMatt Macy 	 * or we complete before copied == to_copy.
114eda14cbcSMatt Macy 	 */
115eda14cbcSMatt Macy 	uint64_t sr_to_copy; /* bytes that need to be copied */
116eda14cbcSMatt Macy 	uint64_t sr_copied; /* bytes that have been copied or freed */
117eda14cbcSMatt Macy } spa_removing_phys_t;
118eda14cbcSMatt Macy 
119eda14cbcSMatt Macy /*
120eda14cbcSMatt Macy  * This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT
121eda14cbcSMatt Macy  * (with key DMU_POOL_CONDENSING_INDIRECT).  It is present if a condense
122eda14cbcSMatt Macy  * of an indirect vdev's mapping object is in progress.
123eda14cbcSMatt Macy  */
124eda14cbcSMatt Macy typedef struct spa_condensing_indirect_phys {
125eda14cbcSMatt Macy 	/*
126eda14cbcSMatt Macy 	 * The vdev ID of the indirect vdev whose indirect mapping is
127eda14cbcSMatt Macy 	 * being condensed.
128eda14cbcSMatt Macy 	 */
129eda14cbcSMatt Macy 	uint64_t	scip_vdev;
130eda14cbcSMatt Macy 
131eda14cbcSMatt Macy 	/*
132eda14cbcSMatt Macy 	 * The vdev's old obsolete spacemap.  This spacemap's contents are
133eda14cbcSMatt Macy 	 * being integrated into the new mapping.
134eda14cbcSMatt Macy 	 */
135eda14cbcSMatt Macy 	uint64_t	scip_prev_obsolete_sm_object;
136eda14cbcSMatt Macy 
137eda14cbcSMatt Macy 	/*
138eda14cbcSMatt Macy 	 * The new mapping object that is being created.
139eda14cbcSMatt Macy 	 */
140eda14cbcSMatt Macy 	uint64_t	scip_next_mapping_object;
141eda14cbcSMatt Macy } spa_condensing_indirect_phys_t;
142eda14cbcSMatt Macy 
143eda14cbcSMatt Macy struct spa_aux_vdev {
144eda14cbcSMatt Macy 	uint64_t	sav_object;		/* MOS object for device list */
145eda14cbcSMatt Macy 	nvlist_t	*sav_config;		/* cached device config */
146eda14cbcSMatt Macy 	vdev_t		**sav_vdevs;		/* devices */
147eda14cbcSMatt Macy 	int		sav_count;		/* number devices */
148eda14cbcSMatt Macy 	boolean_t	sav_sync;		/* sync the device list */
149eda14cbcSMatt Macy 	nvlist_t	**sav_pending;		/* pending device additions */
150eda14cbcSMatt Macy 	uint_t		sav_npending;		/* # pending devices */
151eda14cbcSMatt Macy };
152eda14cbcSMatt Macy 
153eda14cbcSMatt Macy typedef struct spa_config_lock {
154eda14cbcSMatt Macy 	kmutex_t	scl_lock;
155eda14cbcSMatt Macy 	kthread_t	*scl_writer;
156eda14cbcSMatt Macy 	int		scl_write_wanted;
1577cd22ac4SMartin Matuska 	int		scl_count;
158eda14cbcSMatt Macy 	kcondvar_t	scl_cv;
1597cd22ac4SMartin Matuska } ____cacheline_aligned spa_config_lock_t;
160eda14cbcSMatt Macy 
161eda14cbcSMatt Macy typedef struct spa_config_dirent {
162eda14cbcSMatt Macy 	list_node_t	scd_link;
163eda14cbcSMatt Macy 	char		*scd_path;
164eda14cbcSMatt Macy } spa_config_dirent_t;
165eda14cbcSMatt Macy 
166eda14cbcSMatt Macy typedef enum zio_taskq_type {
167eda14cbcSMatt Macy 	ZIO_TASKQ_ISSUE = 0,
168eda14cbcSMatt Macy 	ZIO_TASKQ_ISSUE_HIGH,
169eda14cbcSMatt Macy 	ZIO_TASKQ_INTERRUPT,
170eda14cbcSMatt Macy 	ZIO_TASKQ_INTERRUPT_HIGH,
171eda14cbcSMatt Macy 	ZIO_TASKQ_TYPES
172eda14cbcSMatt Macy } zio_taskq_type_t;
173eda14cbcSMatt Macy 
174eda14cbcSMatt Macy /*
175eda14cbcSMatt Macy  * State machine for the zpool-poolname process.  The states transitions
176eda14cbcSMatt Macy  * are done as follows:
177eda14cbcSMatt Macy  *
178eda14cbcSMatt Macy  *	From		   To			Routine
179eda14cbcSMatt Macy  *	PROC_NONE	-> PROC_CREATED		spa_activate()
180eda14cbcSMatt Macy  *	PROC_CREATED	-> PROC_ACTIVE		spa_thread()
181eda14cbcSMatt Macy  *	PROC_ACTIVE	-> PROC_DEACTIVATE	spa_deactivate()
182eda14cbcSMatt Macy  *	PROC_DEACTIVATE	-> PROC_GONE		spa_thread()
183eda14cbcSMatt Macy  *	PROC_GONE	-> PROC_NONE		spa_deactivate()
184eda14cbcSMatt Macy  */
185eda14cbcSMatt Macy typedef enum spa_proc_state {
186eda14cbcSMatt Macy 	SPA_PROC_NONE,		/* spa_proc = &p0, no process created */
187eda14cbcSMatt Macy 	SPA_PROC_CREATED,	/* spa_activate() has proc, is waiting */
188eda14cbcSMatt Macy 	SPA_PROC_ACTIVE,	/* taskqs created, spa_proc set */
189eda14cbcSMatt Macy 	SPA_PROC_DEACTIVATE,	/* spa_deactivate() requests process exit */
190eda14cbcSMatt Macy 	SPA_PROC_GONE		/* spa_thread() is exiting, spa_proc = &p0 */
191eda14cbcSMatt Macy } spa_proc_state_t;
192eda14cbcSMatt Macy 
193eda14cbcSMatt Macy typedef struct spa_taskqs {
194eda14cbcSMatt Macy 	uint_t stqs_count;
195eda14cbcSMatt Macy 	taskq_t **stqs_taskq;
196eda14cbcSMatt Macy } spa_taskqs_t;
197eda14cbcSMatt Macy 
19814c2e0a0SMartin Matuska /* one for each thread in the spa sync taskq */
19914c2e0a0SMartin Matuska typedef struct spa_syncthread_info {
20014c2e0a0SMartin Matuska 	kthread_t	*sti_thread;
201b985c9caSMartin Matuska 	uint_t		sti_allocator;
20214c2e0a0SMartin Matuska } spa_syncthread_info_t;
20314c2e0a0SMartin Matuska 
204eda14cbcSMatt Macy typedef enum spa_all_vdev_zap_action {
205eda14cbcSMatt Macy 	AVZ_ACTION_NONE = 0,
206eda14cbcSMatt Macy 	AVZ_ACTION_DESTROY,	/* Destroy all per-vdev ZAPs and the AVZ. */
207eda14cbcSMatt Macy 	AVZ_ACTION_REBUILD,	/* Populate the new AVZ, see spa_avz_rebuild */
208eda14cbcSMatt Macy 	AVZ_ACTION_INITIALIZE
209eda14cbcSMatt Macy } spa_avz_action_t;
210eda14cbcSMatt Macy 
211eda14cbcSMatt Macy typedef enum spa_config_source {
212eda14cbcSMatt Macy 	SPA_CONFIG_SRC_NONE = 0,
213eda14cbcSMatt Macy 	SPA_CONFIG_SRC_SCAN,		/* scan of path (default: /dev/dsk) */
214eda14cbcSMatt Macy 	SPA_CONFIG_SRC_CACHEFILE,	/* any cachefile */
215eda14cbcSMatt Macy 	SPA_CONFIG_SRC_TRYIMPORT,	/* returned from call to tryimport */
216eda14cbcSMatt Macy 	SPA_CONFIG_SRC_SPLIT,		/* new pool in a pool split */
217eda14cbcSMatt Macy 	SPA_CONFIG_SRC_MOS		/* MOS, but not always from right txg */
218eda14cbcSMatt Macy } spa_config_source_t;
219eda14cbcSMatt Macy 
220eda14cbcSMatt Macy struct spa {
221eda14cbcSMatt Macy 	/*
222eda14cbcSMatt Macy 	 * Fields protected by spa_namespace_lock.
223eda14cbcSMatt Macy 	 */
224eda14cbcSMatt Macy 	char		spa_name[ZFS_MAX_DATASET_NAME_LEN];	/* pool name */
225eda14cbcSMatt Macy 	char		*spa_comment;		/* comment */
226eda14cbcSMatt Macy 	avl_node_t	spa_avl;		/* node in spa_namespace_avl */
227eda14cbcSMatt Macy 	nvlist_t	*spa_config;		/* last synced config */
228eda14cbcSMatt Macy 	nvlist_t	*spa_config_syncing;	/* currently syncing config */
229eda14cbcSMatt Macy 	nvlist_t	*spa_config_splitting;	/* config for splitting */
230eda14cbcSMatt Macy 	nvlist_t	*spa_load_info;		/* info and errors from load */
231eda14cbcSMatt Macy 	uint64_t	spa_config_txg;		/* txg of last config change */
232be181ee2SMartin Matuska 	uint32_t	spa_sync_pass;		/* iterate-to-convergence */
233eda14cbcSMatt Macy 	pool_state_t	spa_state;		/* pool state */
234eda14cbcSMatt Macy 	int		spa_inject_ref;		/* injection references */
235eda14cbcSMatt Macy 	uint8_t		spa_sync_on;		/* sync threads are running */
236eda14cbcSMatt Macy 	spa_load_state_t spa_load_state;	/* current load operation */
237eda14cbcSMatt Macy 	boolean_t	spa_indirect_vdevs_loaded; /* mappings loaded? */
238eda14cbcSMatt Macy 	boolean_t	spa_trust_config;	/* do we trust vdev tree? */
239eda14cbcSMatt Macy 	boolean_t	spa_is_splitting;	/* in the middle of a split? */
240eda14cbcSMatt Macy 	spa_config_source_t spa_config_source;	/* where config comes from? */
241eda14cbcSMatt Macy 	uint64_t	spa_import_flags;	/* import specific flags */
242eda14cbcSMatt Macy 	spa_taskqs_t	spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES];
243eda14cbcSMatt Macy 	dsl_pool_t	*spa_dsl_pool;
244eda14cbcSMatt Macy 	boolean_t	spa_is_initializing;	/* true while opening pool */
245eda14cbcSMatt Macy 	boolean_t	spa_is_exporting;	/* true while exporting pool */
246*aca928a5SMartin Matuska 	kthread_t	*spa_export_thread;	/* valid during pool export */
2470d4ad640SMartin Matuska 	kthread_t	*spa_load_thread;	/* loading, no namespace lock */
248eda14cbcSMatt Macy 	metaslab_class_t *spa_normal_class;	/* normal data class */
249eda14cbcSMatt Macy 	metaslab_class_t *spa_log_class;	/* intent log data class */
250184c1b94SMartin Matuska 	metaslab_class_t *spa_embedded_log_class; /* log on normal vdevs */
251eda14cbcSMatt Macy 	metaslab_class_t *spa_special_class;	/* special allocation class */
252eda14cbcSMatt Macy 	metaslab_class_t *spa_dedup_class;	/* dedup allocation class */
253eda14cbcSMatt Macy 	uint64_t	spa_first_txg;		/* first txg after spa_open() */
254eda14cbcSMatt Macy 	uint64_t	spa_final_txg;		/* txg of export/destroy */
255eda14cbcSMatt Macy 	uint64_t	spa_freeze_txg;		/* freeze pool at this txg */
256eda14cbcSMatt Macy 	uint64_t	spa_load_max_txg;	/* best initial ub_txg */
257eda14cbcSMatt Macy 	uint64_t	spa_claim_max_txg;	/* highest claimed birth txg */
258eda14cbcSMatt Macy 	inode_timespec_t spa_loaded_ts;		/* 1st successful open time */
259eda14cbcSMatt Macy 	objset_t	*spa_meta_objset;	/* copy of dp->dp_meta_objset */
260eda14cbcSMatt Macy 	kmutex_t	spa_evicting_os_lock;	/* Evicting objset list lock */
261eda14cbcSMatt Macy 	list_t		spa_evicting_os_list;	/* Objsets being evicted. */
262eda14cbcSMatt Macy 	kcondvar_t	spa_evicting_os_cv;	/* Objset Eviction Completion */
263eda14cbcSMatt Macy 	txg_list_t	spa_vdev_txg_list;	/* per-txg dirty vdev list */
264eda14cbcSMatt Macy 	vdev_t		*spa_root_vdev;		/* top-level vdev container */
2657877fdebSMatt Macy 	uint64_t	spa_min_ashift;		/* of vdevs in normal class */
2667877fdebSMatt Macy 	uint64_t	spa_max_ashift;		/* of vdevs in normal class */
2677877fdebSMatt Macy 	uint64_t	spa_min_alloc;		/* of vdevs in normal class */
268315ee00fSMartin Matuska 	uint64_t	spa_gcd_alloc;		/* of vdevs in normal class */
269eda14cbcSMatt Macy 	uint64_t	spa_config_guid;	/* config pool guid */
270eda14cbcSMatt Macy 	uint64_t	spa_load_guid;		/* spa_load initialized guid */
271eda14cbcSMatt Macy 	uint64_t	spa_last_synced_guid;	/* last synced guid */
272eda14cbcSMatt Macy 	list_t		spa_config_dirty_list;	/* vdevs with dirty config */
273eda14cbcSMatt Macy 	list_t		spa_state_dirty_list;	/* vdevs with dirty state */
274eda14cbcSMatt Macy 	/*
2753f9d360cSMartin Matuska 	 * spa_allocs is an array, whose lengths is stored in spa_alloc_count.
2763f9d360cSMartin Matuska 	 * There is one tree and one lock for each allocator, to help improve
2773f9d360cSMartin Matuska 	 * allocation performance in write-heavy workloads.
278eda14cbcSMatt Macy 	 */
2793f9d360cSMartin Matuska 	spa_alloc_t	*spa_allocs;
280b985c9caSMartin Matuska 	spa_allocs_use_t *spa_allocs_use;
281eda14cbcSMatt Macy 	int		spa_alloc_count;
2822ad756a6SMartin Matuska 	int		spa_active_allocator;	/* selectable allocator */
283eda14cbcSMatt Macy 
28414c2e0a0SMartin Matuska 	/* per-allocator sync thread taskqs */
28514c2e0a0SMartin Matuska 	taskq_t		*spa_sync_tq;
28614c2e0a0SMartin Matuska 	spa_syncthread_info_t *spa_syncthreads;
28714c2e0a0SMartin Matuska 
288eda14cbcSMatt Macy 	spa_aux_vdev_t	spa_spares;		/* hot spares */
289eda14cbcSMatt Macy 	spa_aux_vdev_t	spa_l2cache;		/* L2ARC cache devices */
29078ae60b4SMartin Matuska 	boolean_t	spa_aux_sync_uber;	/* need to sync aux uber */
291eda14cbcSMatt Macy 	nvlist_t	*spa_label_features;	/* Features for reading MOS */
292eda14cbcSMatt Macy 	uint64_t	spa_config_object;	/* MOS object for pool config */
293eda14cbcSMatt Macy 	uint64_t	spa_config_generation;	/* config generation number */
294eda14cbcSMatt Macy 	uint64_t	spa_syncing_txg;	/* txg currently syncing */
295eda14cbcSMatt Macy 	bpobj_t		spa_deferred_bpobj;	/* deferred-free bplist */
296eda14cbcSMatt Macy 	bplist_t	spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */
297eda14cbcSMatt Macy 	zio_cksum_salt_t spa_cksum_salt;	/* secret salt for cksum */
298eda14cbcSMatt Macy 	/* checksum context templates */
299eda14cbcSMatt Macy 	kmutex_t	spa_cksum_tmpls_lock;
300eda14cbcSMatt Macy 	void		*spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
301eda14cbcSMatt Macy 	uberblock_t	spa_ubsync;		/* last synced uberblock */
302eda14cbcSMatt Macy 	uberblock_t	spa_uberblock;		/* current uberblock */
303eda14cbcSMatt Macy 	boolean_t	spa_extreme_rewind;	/* rewind past deferred frees */
304eda14cbcSMatt Macy 	kmutex_t	spa_scrub_lock;		/* resilver/scrub lock */
305eda14cbcSMatt Macy 	uint64_t	spa_scrub_inflight;	/* in-flight scrub bytes */
306eda14cbcSMatt Macy 
307eda14cbcSMatt Macy 	/* in-flight verification bytes */
308eda14cbcSMatt Macy 	uint64_t	spa_load_verify_bytes;
309eda14cbcSMatt Macy 	kcondvar_t	spa_scrub_io_cv;	/* scrub I/O completion */
310eda14cbcSMatt Macy 	uint8_t		spa_scrub_active;	/* active or suspended? */
311eda14cbcSMatt Macy 	uint8_t		spa_scrub_type;		/* type of scrub we're doing */
312eda14cbcSMatt Macy 	uint8_t		spa_scrub_finished;	/* indicator to rotate logs */
313eda14cbcSMatt Macy 	uint8_t		spa_scrub_started;	/* started since last boot */
314eda14cbcSMatt Macy 	uint8_t		spa_scrub_reopen;	/* scrub doing vdev_reopen */
315eda14cbcSMatt Macy 	uint64_t	spa_scan_pass_start;	/* start time per pass/reboot */
316eda14cbcSMatt Macy 	uint64_t	spa_scan_pass_scrub_pause; /* scrub pause time */
317eda14cbcSMatt Macy 	uint64_t	spa_scan_pass_scrub_spent_paused; /* total paused */
318eda14cbcSMatt Macy 	uint64_t	spa_scan_pass_exam;	/* examined bytes per pass */
319eda14cbcSMatt Macy 	uint64_t	spa_scan_pass_issued;	/* issued bytes per pass */
320eda14cbcSMatt Macy 
321c0a83fe0SMartin Matuska 	/* error scrub pause time in milliseconds */
322c0a83fe0SMartin Matuska 	uint64_t	spa_scan_pass_errorscrub_pause;
323c0a83fe0SMartin Matuska 	/* total error scrub paused time in milliseconds */
324c0a83fe0SMartin Matuska 	uint64_t	spa_scan_pass_errorscrub_spent_paused;
325eda14cbcSMatt Macy 	/*
326eda14cbcSMatt Macy 	 * We are in the middle of a resilver, and another resilver
327eda14cbcSMatt Macy 	 * is needed once this one completes. This is set iff any
328eda14cbcSMatt Macy 	 * vdev_resilver_deferred is set.
329eda14cbcSMatt Macy 	 */
330eda14cbcSMatt Macy 	boolean_t	spa_resilver_deferred;
331eda14cbcSMatt Macy 	kmutex_t	spa_async_lock;		/* protect async state */
332eda14cbcSMatt Macy 	kthread_t	*spa_async_thread;	/* thread doing async task */
333eda14cbcSMatt Macy 	int		spa_async_suspended;	/* async tasks suspended */
334eda14cbcSMatt Macy 	kcondvar_t	spa_async_cv;		/* wait for thread_exit() */
335eda14cbcSMatt Macy 	uint16_t	spa_async_tasks;	/* async task mask */
336eda14cbcSMatt Macy 	uint64_t	spa_missing_tvds;	/* unopenable tvds on load */
337eda14cbcSMatt Macy 	uint64_t	spa_missing_tvds_allowed; /* allow loading spa? */
338eda14cbcSMatt Macy 
339681ce946SMartin Matuska 	uint64_t	spa_nonallocating_dspace;
340eda14cbcSMatt Macy 	spa_removing_phys_t spa_removing_phys;
341eda14cbcSMatt Macy 	spa_vdev_removal_t *spa_vdev_removal;
342eda14cbcSMatt Macy 
343eda14cbcSMatt Macy 	spa_condensing_indirect_phys_t	spa_condensing_indirect_phys;
344eda14cbcSMatt Macy 	spa_condensing_indirect_t	*spa_condensing_indirect;
345eda14cbcSMatt Macy 	zthr_t		*spa_condense_zthr;	/* zthr doing condense. */
346eda14cbcSMatt Macy 
347e716630dSMartin Matuska 	vdev_raidz_expand_t	*spa_raidz_expand;
348e716630dSMartin Matuska 	zthr_t		*spa_raidz_expand_zthr;
349e716630dSMartin Matuska 
350eda14cbcSMatt Macy 	uint64_t	spa_checkpoint_txg;	/* the txg of the checkpoint */
351eda14cbcSMatt Macy 	spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */
352eda14cbcSMatt Macy 	zthr_t		*spa_checkpoint_discard_zthr;
353eda14cbcSMatt Macy 
354eda14cbcSMatt Macy 	space_map_t	*spa_syncing_log_sm;	/* current log space map */
355eda14cbcSMatt Macy 	avl_tree_t	spa_sm_logs_by_txg;
356eda14cbcSMatt Macy 	kmutex_t	spa_flushed_ms_lock;	/* for metaslabs_by_flushed */
357eda14cbcSMatt Macy 	avl_tree_t	spa_metaslabs_by_flushed;
358eda14cbcSMatt Macy 	spa_unflushed_stats_t	spa_unflushed_stats;
359eda14cbcSMatt Macy 	list_t		spa_log_summary;
360eda14cbcSMatt Macy 	uint64_t	spa_log_flushall_txg;
361eda14cbcSMatt Macy 
362eda14cbcSMatt Macy 	zthr_t		*spa_livelist_delete_zthr; /* deleting livelists */
363eda14cbcSMatt Macy 	zthr_t		*spa_livelist_condense_zthr; /* condensing livelists */
364eda14cbcSMatt Macy 	uint64_t	spa_livelists_to_delete; /* set of livelists to free */
365eda14cbcSMatt Macy 	livelist_condense_entry_t	spa_to_condense; /* next to condense */
366eda14cbcSMatt Macy 
367eda14cbcSMatt Macy 	char		*spa_root;		/* alternate root directory */
368eda14cbcSMatt Macy 	uint64_t	spa_ena;		/* spa-wide ereport ENA */
369eda14cbcSMatt Macy 	int		spa_last_open_failed;	/* error if last open failed */
370eda14cbcSMatt Macy 	uint64_t	spa_last_ubsync_txg;	/* "best" uberblock txg */
371eda14cbcSMatt Macy 	uint64_t	spa_last_ubsync_txg_ts;	/* timestamp from that ub */
372eda14cbcSMatt Macy 	uint64_t	spa_load_txg;		/* ub txg that loaded */
373eda14cbcSMatt Macy 	uint64_t	spa_load_txg_ts;	/* timestamp from that ub */
374eda14cbcSMatt Macy 	uint64_t	spa_load_meta_errors;	/* verify metadata err count */
375eda14cbcSMatt Macy 	uint64_t	spa_load_data_errors;	/* verify data err count */
376eda14cbcSMatt Macy 	uint64_t	spa_verify_min_txg;	/* start txg of verify scrub */
377eda14cbcSMatt Macy 	kmutex_t	spa_errlog_lock;	/* error log lock */
378eda14cbcSMatt Macy 	uint64_t	spa_errlog_last;	/* last error log object */
379eda14cbcSMatt Macy 	uint64_t	spa_errlog_scrub;	/* scrub error log object */
380eda14cbcSMatt Macy 	kmutex_t	spa_errlist_lock;	/* error list/ereport lock */
381eda14cbcSMatt Macy 	avl_tree_t	spa_errlist_last;	/* last error list */
382eda14cbcSMatt Macy 	avl_tree_t	spa_errlist_scrub;	/* scrub error list */
383271171e0SMartin Matuska 	avl_tree_t	spa_errlist_healed;	/* list of healed blocks */
384eda14cbcSMatt Macy 	uint64_t	spa_deflate;		/* should we deflate? */
385eda14cbcSMatt Macy 	uint64_t	spa_history;		/* history object */
386eda14cbcSMatt Macy 	kmutex_t	spa_history_lock;	/* history lock */
387eda14cbcSMatt Macy 	vdev_t		*spa_pending_vdev;	/* pending vdev additions */
388eda14cbcSMatt Macy 	kmutex_t	spa_props_lock;		/* property lock */
389eda14cbcSMatt Macy 	uint64_t	spa_pool_props_object;	/* object for properties */
390eda14cbcSMatt Macy 	uint64_t	spa_bootfs;		/* default boot filesystem */
391eda14cbcSMatt Macy 	uint64_t	spa_failmode;		/* failure mode for the pool */
392eda14cbcSMatt Macy 	uint64_t	spa_deadman_failmode;	/* failure mode for deadman */
393eda14cbcSMatt Macy 	uint64_t	spa_delegation;		/* delegation on/off */
394eda14cbcSMatt Macy 	list_t		spa_config_list;	/* previous cache file(s) */
395eda14cbcSMatt Macy 	/* per-CPU array of root of async I/O: */
396eda14cbcSMatt Macy 	zio_t		**spa_async_zio_root;
397eda14cbcSMatt Macy 	zio_t		*spa_suspend_zio_root;	/* root of all suspended I/O */
398eda14cbcSMatt Macy 	zio_t		*spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */
399eda14cbcSMatt Macy 	kmutex_t	spa_suspend_lock;	/* protects suspend_zio_root */
400eda14cbcSMatt Macy 	kcondvar_t	spa_suspend_cv;		/* notification of resume */
401eda14cbcSMatt Macy 	zio_suspend_reason_t	spa_suspended;	/* pool is suspended */
402eda14cbcSMatt Macy 	uint8_t		spa_claiming;		/* pool is doing zil_claim() */
403eda14cbcSMatt Macy 	boolean_t	spa_is_root;		/* pool is root */
404eda14cbcSMatt Macy 	int		spa_minref;		/* num refs when first opened */
405eda14cbcSMatt Macy 	spa_mode_t	spa_mode;		/* SPA_MODE_{READ|WRITE} */
40681b22a98SMartin Matuska 	boolean_t	spa_read_spacemaps;	/* spacemaps available if ro */
407eda14cbcSMatt Macy 	spa_log_state_t spa_log_state;		/* log state */
408eda14cbcSMatt Macy 	uint64_t	spa_autoexpand;		/* lun expansion on/off */
409eda14cbcSMatt Macy 	ddt_t		*spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */
410eda14cbcSMatt Macy 	uint64_t	spa_ddt_stat_object;	/* DDT statistics */
411eda14cbcSMatt Macy 	uint64_t	spa_dedup_dspace;	/* Cache get_dedup_dspace() */
412eda14cbcSMatt Macy 	uint64_t	spa_dedup_checksum;	/* default dedup checksum */
413eda14cbcSMatt Macy 	uint64_t	spa_dspace;		/* dspace in normal class */
4142a58b312SMartin Matuska 	struct brt	*spa_brt;		/* in-core BRT */
415eda14cbcSMatt Macy 	kmutex_t	spa_vdev_top_lock;	/* dueling offline/remove */
416eda14cbcSMatt Macy 	kmutex_t	spa_proc_lock;		/* protects spa_proc* */
417eda14cbcSMatt Macy 	kcondvar_t	spa_proc_cv;		/* spa_proc_state transitions */
418eda14cbcSMatt Macy 	spa_proc_state_t spa_proc_state;	/* see definition */
419eda14cbcSMatt Macy 	proc_t		*spa_proc;		/* "zpool-poolname" process */
42021ce674eSRyan Libby 	uintptr_t	spa_did;		/* if procp != p0, did of t1 */
421eda14cbcSMatt Macy 	boolean_t	spa_autoreplace;	/* autoreplace set in open */
422eda14cbcSMatt Macy 	int		spa_vdev_locks;		/* locks grabbed */
423eda14cbcSMatt Macy 	uint64_t	spa_creation_version;	/* version at pool creation */
424eda14cbcSMatt Macy 	uint64_t	spa_prev_software_version; /* See ub_software_version */
425eda14cbcSMatt Macy 	uint64_t	spa_feat_for_write_obj;	/* required to write to pool */
426eda14cbcSMatt Macy 	uint64_t	spa_feat_for_read_obj;	/* required to read from pool */
427eda14cbcSMatt Macy 	uint64_t	spa_feat_desc_obj;	/* Feature descriptions */
428eda14cbcSMatt Macy 	uint64_t	spa_feat_enabled_txg_obj; /* Feature enabled txg */
429eda14cbcSMatt Macy 	kmutex_t	spa_feat_stats_lock;	/* protects spa_feat_stats */
430eda14cbcSMatt Macy 	nvlist_t	*spa_feat_stats;	/* Cache of enabled features */
431eda14cbcSMatt Macy 	/* cache feature refcounts */
432eda14cbcSMatt Macy 	uint64_t	spa_feat_refcount_cache[SPA_FEATURES];
433eda14cbcSMatt Macy 	taskqid_t	spa_deadman_tqid;	/* Task id */
434eda14cbcSMatt Macy 	uint64_t	spa_deadman_calls;	/* number of deadman calls */
435eda14cbcSMatt Macy 	hrtime_t	spa_sync_starttime;	/* starting time of spa_sync */
436eda14cbcSMatt Macy 	uint64_t	spa_deadman_synctime;	/* deadman sync expiration */
437eda14cbcSMatt Macy 	uint64_t	spa_deadman_ziotime;	/* deadman zio expiration */
438eda14cbcSMatt Macy 	uint64_t	spa_all_vdev_zaps;	/* ZAP of per-vd ZAP obj #s */
439eda14cbcSMatt Macy 	spa_avz_action_t	spa_avz_action;	/* destroy/rebuild AVZ? */
440eda14cbcSMatt Macy 	uint64_t	spa_autotrim;		/* automatic background trim? */
441eda14cbcSMatt Macy 	uint64_t	spa_errata;		/* errata issues detected */
442eda14cbcSMatt Macy 	spa_stats_t	spa_stats;		/* assorted spa statistics */
443eda14cbcSMatt Macy 	spa_keystore_t	spa_keystore;		/* loaded crypto keys */
444eda14cbcSMatt Macy 
445eda14cbcSMatt Macy 	/* arc_memory_throttle() parameters during low memory condition */
446eda14cbcSMatt Macy 	uint64_t	spa_lowmem_page_load;	/* memory load during txg */
447eda14cbcSMatt Macy 	uint64_t	spa_lowmem_last_txg;	/* txg window start */
448eda14cbcSMatt Macy 
449eda14cbcSMatt Macy 	hrtime_t	spa_ccw_fail_time;	/* Conf cache write fail time */
450eda14cbcSMatt Macy 	taskq_t		*spa_zvol_taskq;	/* Taskq for minor management */
451b2526e8bSMartin Matuska 	taskq_t		*spa_metaslab_taskq;	/* Taskq for metaslab preload */
452eda14cbcSMatt Macy 	taskq_t		*spa_prefetch_taskq;	/* Taskq for prefetch threads */
453b2526e8bSMartin Matuska 	taskq_t		*spa_upgrade_taskq;	/* Taskq for upgrade jobs */
454eda14cbcSMatt Macy 	uint64_t	spa_multihost;		/* multihost aware (mmp) */
455eda14cbcSMatt Macy 	mmp_thread_t	spa_mmp;		/* multihost mmp thread */
456eda14cbcSMatt Macy 	list_t		spa_leaf_list;		/* list of leaf vdevs */
457eda14cbcSMatt Macy 	uint64_t	spa_leaf_list_gen;	/* track leaf_list changes */
458eda14cbcSMatt Macy 	uint32_t	spa_hostid;		/* cached system hostid */
459eda14cbcSMatt Macy 
460eda14cbcSMatt Macy 	/* synchronization for threads in spa_wait */
461eda14cbcSMatt Macy 	kmutex_t	spa_activities_lock;
462eda14cbcSMatt Macy 	kcondvar_t	spa_activities_cv;
463eda14cbcSMatt Macy 	kcondvar_t	spa_waiters_cv;
464eda14cbcSMatt Macy 	int		spa_waiters;		/* number of waiting threads */
465eda14cbcSMatt Macy 	boolean_t	spa_waiters_cancel;	/* waiters should return */
466eda14cbcSMatt Macy 
467ee36e25aSMartin Matuska 	char		*spa_compatibility;	/* compatibility file(s) */
468ee36e25aSMartin Matuska 
469eda14cbcSMatt Macy 	/*
470eda14cbcSMatt Macy 	 * spa_refcount & spa_config_lock must be the last elements
471eda14cbcSMatt Macy 	 * because zfs_refcount_t changes size based on compilation options.
472eda14cbcSMatt Macy 	 * In order for the MDB module to function correctly, the other
473eda14cbcSMatt Macy 	 * fields must remain in the same location.
474eda14cbcSMatt Macy 	 */
475eda14cbcSMatt Macy 	spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
476eda14cbcSMatt Macy 	zfs_refcount_t	spa_refcount;		/* number of opens */
477eda14cbcSMatt Macy };
478eda14cbcSMatt Macy 
479eda14cbcSMatt Macy extern char *spa_config_path;
480e92ffd9bSMartin Matuska extern const char *zfs_deadman_failmode;
481be181ee2SMartin Matuska extern uint_t spa_slop_shift;
482*aca928a5SMartin Matuska extern void spa_taskq_dispatch(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
483*aca928a5SMartin Matuska     task_func_t *func, zio_t *zio, boolean_t cutinline);
484eda14cbcSMatt Macy extern void spa_load_spares(spa_t *spa);
485eda14cbcSMatt Macy extern void spa_load_l2cache(spa_t *spa);
486eda14cbcSMatt Macy extern sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl,
487eda14cbcSMatt Macy     const char *name);
488eda14cbcSMatt Macy extern void spa_event_post(sysevent_t *ev);
489eda14cbcSMatt Macy extern int param_set_deadman_failmode_common(const char *val);
490eda14cbcSMatt Macy extern void spa_set_deadman_synctime(hrtime_t ns);
491eda14cbcSMatt Macy extern void spa_set_deadman_ziotime(hrtime_t ns);
492eda14cbcSMatt Macy extern const char *spa_history_zone(void);
4932ad756a6SMartin Matuska extern const char *zfs_active_allocator;
4942ad756a6SMartin Matuska extern int param_set_active_allocator_common(const char *val);
495eda14cbcSMatt Macy 
496eda14cbcSMatt Macy #ifdef	__cplusplus
497eda14cbcSMatt Macy }
498eda14cbcSMatt Macy #endif
499eda14cbcSMatt Macy 
500eda14cbcSMatt Macy #endif	/* _SYS_SPA_IMPL_H */
501