1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 28 * Copyright (c) 2017 Datto Inc. 29 * Copyright (c) 2017, Intel Corporation. 30 */ 31 32 #ifndef _SYS_SPA_IMPL_H 33 #define _SYS_SPA_IMPL_H 34 35 #include <sys/spa.h> 36 #include <sys/spa_checkpoint.h> 37 #include <sys/spa_log_spacemap.h> 38 #include <sys/vdev.h> 39 #include <sys/vdev_rebuild.h> 40 #include <sys/vdev_removal.h> 41 #include <sys/metaslab.h> 42 #include <sys/dmu.h> 43 #include <sys/dsl_pool.h> 44 #include <sys/uberblock_impl.h> 45 #include <sys/zfs_context.h> 46 #include <sys/avl.h> 47 #include <sys/zfs_refcount.h> 48 #include <sys/bplist.h> 49 #include <sys/bpobj.h> 50 #include <sys/dsl_crypt.h> 51 #include <sys/zfeature.h> 52 #include <sys/zthr.h> 53 #include <sys/dsl_deadlist.h> 54 #include <zfeature_common.h> 55 56 #ifdef __cplusplus 57 extern "C" { 58 #endif 59 60 typedef struct spa_error_entry { 61 zbookmark_phys_t se_bookmark; 62 char *se_name; 63 avl_node_t se_avl; 64 } spa_error_entry_t; 65 66 typedef struct spa_history_phys { 67 uint64_t sh_pool_create_len; /* ending offset of zpool create */ 68 uint64_t sh_phys_max_off; /* physical EOF */ 69 uint64_t sh_bof; /* logical BOF */ 70 uint64_t sh_eof; /* logical EOF */ 71 uint64_t sh_records_lost; /* num of records overwritten */ 72 } spa_history_phys_t; 73 74 /* 75 * All members must be uint64_t, for byteswap purposes. 76 */ 77 typedef struct spa_removing_phys { 78 uint64_t sr_state; /* dsl_scan_state_t */ 79 80 /* 81 * The vdev ID that we most recently attempted to remove, 82 * or -1 if no removal has been attempted. 83 */ 84 uint64_t sr_removing_vdev; 85 86 /* 87 * The vdev ID that we most recently successfully removed, 88 * or -1 if no devices have been removed. 89 */ 90 uint64_t sr_prev_indirect_vdev; 91 92 uint64_t sr_start_time; 93 uint64_t sr_end_time; 94 95 /* 96 * Note that we can not use the space map's or indirect mapping's 97 * accounting as a substitute for these values, because we need to 98 * count frees of not-yet-copied data as though it did the copy. 99 * Otherwise, we could get into a situation where copied > to_copy, 100 * or we complete before copied == to_copy. 101 */ 102 uint64_t sr_to_copy; /* bytes that need to be copied */ 103 uint64_t sr_copied; /* bytes that have been copied or freed */ 104 } spa_removing_phys_t; 105 106 /* 107 * This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT 108 * (with key DMU_POOL_CONDENSING_INDIRECT). It is present if a condense 109 * of an indirect vdev's mapping object is in progress. 110 */ 111 typedef struct spa_condensing_indirect_phys { 112 /* 113 * The vdev ID of the indirect vdev whose indirect mapping is 114 * being condensed. 115 */ 116 uint64_t scip_vdev; 117 118 /* 119 * The vdev's old obsolete spacemap. This spacemap's contents are 120 * being integrated into the new mapping. 121 */ 122 uint64_t scip_prev_obsolete_sm_object; 123 124 /* 125 * The new mapping object that is being created. 126 */ 127 uint64_t scip_next_mapping_object; 128 } spa_condensing_indirect_phys_t; 129 130 struct spa_aux_vdev { 131 uint64_t sav_object; /* MOS object for device list */ 132 nvlist_t *sav_config; /* cached device config */ 133 vdev_t **sav_vdevs; /* devices */ 134 int sav_count; /* number devices */ 135 boolean_t sav_sync; /* sync the device list */ 136 nvlist_t **sav_pending; /* pending device additions */ 137 uint_t sav_npending; /* # pending devices */ 138 }; 139 140 typedef struct spa_config_lock { 141 kmutex_t scl_lock; 142 kthread_t *scl_writer; 143 int scl_write_wanted; 144 kcondvar_t scl_cv; 145 zfs_refcount_t scl_count; 146 } spa_config_lock_t; 147 148 typedef struct spa_config_dirent { 149 list_node_t scd_link; 150 char *scd_path; 151 } spa_config_dirent_t; 152 153 typedef enum zio_taskq_type { 154 ZIO_TASKQ_ISSUE = 0, 155 ZIO_TASKQ_ISSUE_HIGH, 156 ZIO_TASKQ_INTERRUPT, 157 ZIO_TASKQ_INTERRUPT_HIGH, 158 ZIO_TASKQ_TYPES 159 } zio_taskq_type_t; 160 161 /* 162 * State machine for the zpool-poolname process. The states transitions 163 * are done as follows: 164 * 165 * From To Routine 166 * PROC_NONE -> PROC_CREATED spa_activate() 167 * PROC_CREATED -> PROC_ACTIVE spa_thread() 168 * PROC_ACTIVE -> PROC_DEACTIVATE spa_deactivate() 169 * PROC_DEACTIVATE -> PROC_GONE spa_thread() 170 * PROC_GONE -> PROC_NONE spa_deactivate() 171 */ 172 typedef enum spa_proc_state { 173 SPA_PROC_NONE, /* spa_proc = &p0, no process created */ 174 SPA_PROC_CREATED, /* spa_activate() has proc, is waiting */ 175 SPA_PROC_ACTIVE, /* taskqs created, spa_proc set */ 176 SPA_PROC_DEACTIVATE, /* spa_deactivate() requests process exit */ 177 SPA_PROC_GONE /* spa_thread() is exiting, spa_proc = &p0 */ 178 } spa_proc_state_t; 179 180 typedef struct spa_taskqs { 181 uint_t stqs_count; 182 taskq_t **stqs_taskq; 183 } spa_taskqs_t; 184 185 typedef enum spa_all_vdev_zap_action { 186 AVZ_ACTION_NONE = 0, 187 AVZ_ACTION_DESTROY, /* Destroy all per-vdev ZAPs and the AVZ. */ 188 AVZ_ACTION_REBUILD, /* Populate the new AVZ, see spa_avz_rebuild */ 189 AVZ_ACTION_INITIALIZE 190 } spa_avz_action_t; 191 192 typedef enum spa_config_source { 193 SPA_CONFIG_SRC_NONE = 0, 194 SPA_CONFIG_SRC_SCAN, /* scan of path (default: /dev/dsk) */ 195 SPA_CONFIG_SRC_CACHEFILE, /* any cachefile */ 196 SPA_CONFIG_SRC_TRYIMPORT, /* returned from call to tryimport */ 197 SPA_CONFIG_SRC_SPLIT, /* new pool in a pool split */ 198 SPA_CONFIG_SRC_MOS /* MOS, but not always from right txg */ 199 } spa_config_source_t; 200 201 struct spa { 202 /* 203 * Fields protected by spa_namespace_lock. 204 */ 205 char spa_name[ZFS_MAX_DATASET_NAME_LEN]; /* pool name */ 206 char *spa_comment; /* comment */ 207 avl_node_t spa_avl; /* node in spa_namespace_avl */ 208 nvlist_t *spa_config; /* last synced config */ 209 nvlist_t *spa_config_syncing; /* currently syncing config */ 210 nvlist_t *spa_config_splitting; /* config for splitting */ 211 nvlist_t *spa_load_info; /* info and errors from load */ 212 uint64_t spa_config_txg; /* txg of last config change */ 213 int spa_sync_pass; /* iterate-to-convergence */ 214 pool_state_t spa_state; /* pool state */ 215 int spa_inject_ref; /* injection references */ 216 uint8_t spa_sync_on; /* sync threads are running */ 217 spa_load_state_t spa_load_state; /* current load operation */ 218 boolean_t spa_indirect_vdevs_loaded; /* mappings loaded? */ 219 boolean_t spa_trust_config; /* do we trust vdev tree? */ 220 boolean_t spa_is_splitting; /* in the middle of a split? */ 221 spa_config_source_t spa_config_source; /* where config comes from? */ 222 uint64_t spa_import_flags; /* import specific flags */ 223 spa_taskqs_t spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES]; 224 dsl_pool_t *spa_dsl_pool; 225 boolean_t spa_is_initializing; /* true while opening pool */ 226 boolean_t spa_is_exporting; /* true while exporting pool */ 227 metaslab_class_t *spa_normal_class; /* normal data class */ 228 metaslab_class_t *spa_log_class; /* intent log data class */ 229 metaslab_class_t *spa_special_class; /* special allocation class */ 230 metaslab_class_t *spa_dedup_class; /* dedup allocation class */ 231 uint64_t spa_first_txg; /* first txg after spa_open() */ 232 uint64_t spa_final_txg; /* txg of export/destroy */ 233 uint64_t spa_freeze_txg; /* freeze pool at this txg */ 234 uint64_t spa_load_max_txg; /* best initial ub_txg */ 235 uint64_t spa_claim_max_txg; /* highest claimed birth txg */ 236 inode_timespec_t spa_loaded_ts; /* 1st successful open time */ 237 objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */ 238 kmutex_t spa_evicting_os_lock; /* Evicting objset list lock */ 239 list_t spa_evicting_os_list; /* Objsets being evicted. */ 240 kcondvar_t spa_evicting_os_cv; /* Objset Eviction Completion */ 241 txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */ 242 vdev_t *spa_root_vdev; /* top-level vdev container */ 243 uint64_t spa_min_ashift; /* of vdevs in normal class */ 244 uint64_t spa_max_ashift; /* of vdevs in normal class */ 245 uint64_t spa_min_alloc; /* of vdevs in normal class */ 246 uint64_t spa_config_guid; /* config pool guid */ 247 uint64_t spa_load_guid; /* spa_load initialized guid */ 248 uint64_t spa_last_synced_guid; /* last synced guid */ 249 list_t spa_config_dirty_list; /* vdevs with dirty config */ 250 list_t spa_state_dirty_list; /* vdevs with dirty state */ 251 /* 252 * spa_alloc_locks and spa_alloc_trees are arrays, whose lengths are 253 * stored in spa_alloc_count. There is one tree and one lock for each 254 * allocator, to help improve allocation performance in write-heavy 255 * workloads. 256 */ 257 kmutex_t *spa_alloc_locks; 258 avl_tree_t *spa_alloc_trees; 259 int spa_alloc_count; 260 261 spa_aux_vdev_t spa_spares; /* hot spares */ 262 spa_aux_vdev_t spa_l2cache; /* L2ARC cache devices */ 263 nvlist_t *spa_label_features; /* Features for reading MOS */ 264 uint64_t spa_config_object; /* MOS object for pool config */ 265 uint64_t spa_config_generation; /* config generation number */ 266 uint64_t spa_syncing_txg; /* txg currently syncing */ 267 bpobj_t spa_deferred_bpobj; /* deferred-free bplist */ 268 bplist_t spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */ 269 zio_cksum_salt_t spa_cksum_salt; /* secret salt for cksum */ 270 /* checksum context templates */ 271 kmutex_t spa_cksum_tmpls_lock; 272 void *spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS]; 273 uberblock_t spa_ubsync; /* last synced uberblock */ 274 uberblock_t spa_uberblock; /* current uberblock */ 275 boolean_t spa_extreme_rewind; /* rewind past deferred frees */ 276 kmutex_t spa_scrub_lock; /* resilver/scrub lock */ 277 uint64_t spa_scrub_inflight; /* in-flight scrub bytes */ 278 279 /* in-flight verification bytes */ 280 uint64_t spa_load_verify_bytes; 281 kcondvar_t spa_scrub_io_cv; /* scrub I/O completion */ 282 uint8_t spa_scrub_active; /* active or suspended? */ 283 uint8_t spa_scrub_type; /* type of scrub we're doing */ 284 uint8_t spa_scrub_finished; /* indicator to rotate logs */ 285 uint8_t spa_scrub_started; /* started since last boot */ 286 uint8_t spa_scrub_reopen; /* scrub doing vdev_reopen */ 287 uint64_t spa_scan_pass_start; /* start time per pass/reboot */ 288 uint64_t spa_scan_pass_scrub_pause; /* scrub pause time */ 289 uint64_t spa_scan_pass_scrub_spent_paused; /* total paused */ 290 uint64_t spa_scan_pass_exam; /* examined bytes per pass */ 291 uint64_t spa_scan_pass_issued; /* issued bytes per pass */ 292 293 /* 294 * We are in the middle of a resilver, and another resilver 295 * is needed once this one completes. This is set iff any 296 * vdev_resilver_deferred is set. 297 */ 298 boolean_t spa_resilver_deferred; 299 kmutex_t spa_async_lock; /* protect async state */ 300 kthread_t *spa_async_thread; /* thread doing async task */ 301 int spa_async_suspended; /* async tasks suspended */ 302 kcondvar_t spa_async_cv; /* wait for thread_exit() */ 303 uint16_t spa_async_tasks; /* async task mask */ 304 uint64_t spa_missing_tvds; /* unopenable tvds on load */ 305 uint64_t spa_missing_tvds_allowed; /* allow loading spa? */ 306 307 spa_removing_phys_t spa_removing_phys; 308 spa_vdev_removal_t *spa_vdev_removal; 309 310 spa_condensing_indirect_phys_t spa_condensing_indirect_phys; 311 spa_condensing_indirect_t *spa_condensing_indirect; 312 zthr_t *spa_condense_zthr; /* zthr doing condense. */ 313 314 uint64_t spa_checkpoint_txg; /* the txg of the checkpoint */ 315 spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */ 316 zthr_t *spa_checkpoint_discard_zthr; 317 318 space_map_t *spa_syncing_log_sm; /* current log space map */ 319 avl_tree_t spa_sm_logs_by_txg; 320 kmutex_t spa_flushed_ms_lock; /* for metaslabs_by_flushed */ 321 avl_tree_t spa_metaslabs_by_flushed; 322 spa_unflushed_stats_t spa_unflushed_stats; 323 list_t spa_log_summary; 324 uint64_t spa_log_flushall_txg; 325 326 zthr_t *spa_livelist_delete_zthr; /* deleting livelists */ 327 zthr_t *spa_livelist_condense_zthr; /* condensing livelists */ 328 uint64_t spa_livelists_to_delete; /* set of livelists to free */ 329 livelist_condense_entry_t spa_to_condense; /* next to condense */ 330 331 char *spa_root; /* alternate root directory */ 332 uint64_t spa_ena; /* spa-wide ereport ENA */ 333 int spa_last_open_failed; /* error if last open failed */ 334 uint64_t spa_last_ubsync_txg; /* "best" uberblock txg */ 335 uint64_t spa_last_ubsync_txg_ts; /* timestamp from that ub */ 336 uint64_t spa_load_txg; /* ub txg that loaded */ 337 uint64_t spa_load_txg_ts; /* timestamp from that ub */ 338 uint64_t spa_load_meta_errors; /* verify metadata err count */ 339 uint64_t spa_load_data_errors; /* verify data err count */ 340 uint64_t spa_verify_min_txg; /* start txg of verify scrub */ 341 kmutex_t spa_errlog_lock; /* error log lock */ 342 uint64_t spa_errlog_last; /* last error log object */ 343 uint64_t spa_errlog_scrub; /* scrub error log object */ 344 kmutex_t spa_errlist_lock; /* error list/ereport lock */ 345 avl_tree_t spa_errlist_last; /* last error list */ 346 avl_tree_t spa_errlist_scrub; /* scrub error list */ 347 uint64_t spa_deflate; /* should we deflate? */ 348 uint64_t spa_history; /* history object */ 349 kmutex_t spa_history_lock; /* history lock */ 350 vdev_t *spa_pending_vdev; /* pending vdev additions */ 351 kmutex_t spa_props_lock; /* property lock */ 352 uint64_t spa_pool_props_object; /* object for properties */ 353 uint64_t spa_bootfs; /* default boot filesystem */ 354 uint64_t spa_failmode; /* failure mode for the pool */ 355 uint64_t spa_deadman_failmode; /* failure mode for deadman */ 356 uint64_t spa_delegation; /* delegation on/off */ 357 list_t spa_config_list; /* previous cache file(s) */ 358 /* per-CPU array of root of async I/O: */ 359 zio_t **spa_async_zio_root; 360 zio_t *spa_suspend_zio_root; /* root of all suspended I/O */ 361 zio_t *spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */ 362 kmutex_t spa_suspend_lock; /* protects suspend_zio_root */ 363 kcondvar_t spa_suspend_cv; /* notification of resume */ 364 zio_suspend_reason_t spa_suspended; /* pool is suspended */ 365 uint8_t spa_claiming; /* pool is doing zil_claim() */ 366 boolean_t spa_is_root; /* pool is root */ 367 int spa_minref; /* num refs when first opened */ 368 spa_mode_t spa_mode; /* SPA_MODE_{READ|WRITE} */ 369 spa_log_state_t spa_log_state; /* log state */ 370 uint64_t spa_autoexpand; /* lun expansion on/off */ 371 ddt_t *spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */ 372 uint64_t spa_ddt_stat_object; /* DDT statistics */ 373 uint64_t spa_dedup_dspace; /* Cache get_dedup_dspace() */ 374 uint64_t spa_dedup_checksum; /* default dedup checksum */ 375 uint64_t spa_dspace; /* dspace in normal class */ 376 kmutex_t spa_vdev_top_lock; /* dueling offline/remove */ 377 kmutex_t spa_proc_lock; /* protects spa_proc* */ 378 kcondvar_t spa_proc_cv; /* spa_proc_state transitions */ 379 spa_proc_state_t spa_proc_state; /* see definition */ 380 proc_t *spa_proc; /* "zpool-poolname" process */ 381 uintptr_t spa_did; /* if procp != p0, did of t1 */ 382 boolean_t spa_autoreplace; /* autoreplace set in open */ 383 int spa_vdev_locks; /* locks grabbed */ 384 uint64_t spa_creation_version; /* version at pool creation */ 385 uint64_t spa_prev_software_version; /* See ub_software_version */ 386 uint64_t spa_feat_for_write_obj; /* required to write to pool */ 387 uint64_t spa_feat_for_read_obj; /* required to read from pool */ 388 uint64_t spa_feat_desc_obj; /* Feature descriptions */ 389 uint64_t spa_feat_enabled_txg_obj; /* Feature enabled txg */ 390 kmutex_t spa_feat_stats_lock; /* protects spa_feat_stats */ 391 nvlist_t *spa_feat_stats; /* Cache of enabled features */ 392 /* cache feature refcounts */ 393 uint64_t spa_feat_refcount_cache[SPA_FEATURES]; 394 taskqid_t spa_deadman_tqid; /* Task id */ 395 uint64_t spa_deadman_calls; /* number of deadman calls */ 396 hrtime_t spa_sync_starttime; /* starting time of spa_sync */ 397 uint64_t spa_deadman_synctime; /* deadman sync expiration */ 398 uint64_t spa_deadman_ziotime; /* deadman zio expiration */ 399 uint64_t spa_all_vdev_zaps; /* ZAP of per-vd ZAP obj #s */ 400 spa_avz_action_t spa_avz_action; /* destroy/rebuild AVZ? */ 401 uint64_t spa_autotrim; /* automatic background trim? */ 402 uint64_t spa_errata; /* errata issues detected */ 403 spa_stats_t spa_stats; /* assorted spa statistics */ 404 spa_keystore_t spa_keystore; /* loaded crypto keys */ 405 406 /* arc_memory_throttle() parameters during low memory condition */ 407 uint64_t spa_lowmem_page_load; /* memory load during txg */ 408 uint64_t spa_lowmem_last_txg; /* txg window start */ 409 410 hrtime_t spa_ccw_fail_time; /* Conf cache write fail time */ 411 taskq_t *spa_zvol_taskq; /* Taskq for minor management */ 412 taskq_t *spa_prefetch_taskq; /* Taskq for prefetch threads */ 413 uint64_t spa_multihost; /* multihost aware (mmp) */ 414 mmp_thread_t spa_mmp; /* multihost mmp thread */ 415 list_t spa_leaf_list; /* list of leaf vdevs */ 416 uint64_t spa_leaf_list_gen; /* track leaf_list changes */ 417 uint32_t spa_hostid; /* cached system hostid */ 418 419 /* synchronization for threads in spa_wait */ 420 kmutex_t spa_activities_lock; 421 kcondvar_t spa_activities_cv; 422 kcondvar_t spa_waiters_cv; 423 int spa_waiters; /* number of waiting threads */ 424 boolean_t spa_waiters_cancel; /* waiters should return */ 425 426 /* 427 * spa_refcount & spa_config_lock must be the last elements 428 * because zfs_refcount_t changes size based on compilation options. 429 * In order for the MDB module to function correctly, the other 430 * fields must remain in the same location. 431 */ 432 spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */ 433 zfs_refcount_t spa_refcount; /* number of opens */ 434 435 taskq_t *spa_upgrade_taskq; /* taskq for upgrade jobs */ 436 }; 437 438 extern char *spa_config_path; 439 extern char *zfs_deadman_failmode; 440 extern int spa_slop_shift; 441 extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 442 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent); 443 extern void spa_taskq_dispatch_sync(spa_t *, zio_type_t t, zio_taskq_type_t q, 444 task_func_t *func, void *arg, uint_t flags); 445 extern void spa_load_spares(spa_t *spa); 446 extern void spa_load_l2cache(spa_t *spa); 447 extern sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, 448 const char *name); 449 extern void spa_event_post(sysevent_t *ev); 450 extern int param_set_deadman_failmode_common(const char *val); 451 extern void spa_set_deadman_synctime(hrtime_t ns); 452 extern void spa_set_deadman_ziotime(hrtime_t ns); 453 extern const char *spa_history_zone(void); 454 455 #ifdef __cplusplus 456 } 457 #endif 458 459 #endif /* _SYS_SPA_IMPL_H */ 460