1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9eda14cbcSMatt Macy * or http://www.opensolaris.org/os/licensing. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23eda14cbcSMatt Macy * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24eda14cbcSMatt Macy * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 252c48331dSMatt Macy * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26eda14cbcSMatt Macy * Copyright (c) 2017, Intel Corporation. 27eda14cbcSMatt Macy */ 28eda14cbcSMatt Macy 29eda14cbcSMatt Macy #include <sys/zfs_context.h> 30eda14cbcSMatt Macy #include <sys/dmu.h> 31eda14cbcSMatt Macy #include <sys/dmu_tx.h> 32eda14cbcSMatt Macy #include <sys/space_map.h> 33eda14cbcSMatt Macy #include <sys/metaslab_impl.h> 34eda14cbcSMatt Macy #include <sys/vdev_impl.h> 357877fdebSMatt Macy #include <sys/vdev_draid.h> 36eda14cbcSMatt Macy #include <sys/zio.h> 37eda14cbcSMatt Macy #include <sys/spa_impl.h> 38eda14cbcSMatt Macy #include <sys/zfeature.h> 39eda14cbcSMatt Macy #include <sys/vdev_indirect_mapping.h> 40eda14cbcSMatt Macy #include <sys/zap.h> 41eda14cbcSMatt Macy #include <sys/btree.h> 42eda14cbcSMatt Macy 43eda14cbcSMatt Macy #define WITH_DF_BLOCK_ALLOCATOR 44eda14cbcSMatt Macy 45eda14cbcSMatt Macy #define GANG_ALLOCATION(flags) \ 46eda14cbcSMatt Macy ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) 47eda14cbcSMatt Macy 48eda14cbcSMatt Macy /* 49eda14cbcSMatt Macy * Metaslab granularity, in bytes. This is roughly similar to what would be 50eda14cbcSMatt Macy * referred to as the "stripe size" in traditional RAID arrays. In normal 51eda14cbcSMatt Macy * operation, we will try to write this amount of data to a top-level vdev 52eda14cbcSMatt Macy * before moving on to the next one. 53eda14cbcSMatt Macy */ 54eda14cbcSMatt Macy unsigned long metaslab_aliquot = 512 << 10; 55eda14cbcSMatt Macy 56eda14cbcSMatt Macy /* 57eda14cbcSMatt Macy * For testing, make some blocks above a certain size be gang blocks. 58eda14cbcSMatt Macy */ 59eda14cbcSMatt Macy unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; 60eda14cbcSMatt Macy 61eda14cbcSMatt Macy /* 62eda14cbcSMatt Macy * In pools where the log space map feature is not enabled we touch 63eda14cbcSMatt Macy * multiple metaslabs (and their respective space maps) with each 64eda14cbcSMatt Macy * transaction group. Thus, we benefit from having a small space map 65eda14cbcSMatt Macy * block size since it allows us to issue more I/O operations scattered 66eda14cbcSMatt Macy * around the disk. So a sane default for the space map block size 67eda14cbcSMatt Macy * is 8~16K. 68eda14cbcSMatt Macy */ 69eda14cbcSMatt Macy int zfs_metaslab_sm_blksz_no_log = (1 << 14); 70eda14cbcSMatt Macy 71eda14cbcSMatt Macy /* 72eda14cbcSMatt Macy * When the log space map feature is enabled, we accumulate a lot of 73eda14cbcSMatt Macy * changes per metaslab that are flushed once in a while so we benefit 74eda14cbcSMatt Macy * from a bigger block size like 128K for the metaslab space maps. 75eda14cbcSMatt Macy */ 76eda14cbcSMatt Macy int zfs_metaslab_sm_blksz_with_log = (1 << 17); 77eda14cbcSMatt Macy 78eda14cbcSMatt Macy /* 79eda14cbcSMatt Macy * The in-core space map representation is more compact than its on-disk form. 80eda14cbcSMatt Macy * The zfs_condense_pct determines how much more compact the in-core 81eda14cbcSMatt Macy * space map representation must be before we compact it on-disk. 82eda14cbcSMatt Macy * Values should be greater than or equal to 100. 83eda14cbcSMatt Macy */ 84eda14cbcSMatt Macy int zfs_condense_pct = 200; 85eda14cbcSMatt Macy 86eda14cbcSMatt Macy /* 87eda14cbcSMatt Macy * Condensing a metaslab is not guaranteed to actually reduce the amount of 88eda14cbcSMatt Macy * space used on disk. In particular, a space map uses data in increments of 89eda14cbcSMatt Macy * MAX(1 << ashift, space_map_blksz), so a metaslab might use the 90eda14cbcSMatt Macy * same number of blocks after condensing. Since the goal of condensing is to 91eda14cbcSMatt Macy * reduce the number of IOPs required to read the space map, we only want to 92eda14cbcSMatt Macy * condense when we can be sure we will reduce the number of blocks used by the 93eda14cbcSMatt Macy * space map. Unfortunately, we cannot precisely compute whether or not this is 94eda14cbcSMatt Macy * the case in metaslab_should_condense since we are holding ms_lock. Instead, 95eda14cbcSMatt Macy * we apply the following heuristic: do not condense a spacemap unless the 96eda14cbcSMatt Macy * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold 97eda14cbcSMatt Macy * blocks. 98eda14cbcSMatt Macy */ 99eda14cbcSMatt Macy int zfs_metaslab_condense_block_threshold = 4; 100eda14cbcSMatt Macy 101eda14cbcSMatt Macy /* 102eda14cbcSMatt Macy * The zfs_mg_noalloc_threshold defines which metaslab groups should 103eda14cbcSMatt Macy * be eligible for allocation. The value is defined as a percentage of 104eda14cbcSMatt Macy * free space. Metaslab groups that have more free space than 105eda14cbcSMatt Macy * zfs_mg_noalloc_threshold are always eligible for allocations. Once 106eda14cbcSMatt Macy * a metaslab group's free space is less than or equal to the 107eda14cbcSMatt Macy * zfs_mg_noalloc_threshold the allocator will avoid allocating to that 108eda14cbcSMatt Macy * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. 109eda14cbcSMatt Macy * Once all groups in the pool reach zfs_mg_noalloc_threshold then all 110eda14cbcSMatt Macy * groups are allowed to accept allocations. Gang blocks are always 111eda14cbcSMatt Macy * eligible to allocate on any metaslab group. The default value of 0 means 112eda14cbcSMatt Macy * no metaslab group will be excluded based on this criterion. 113eda14cbcSMatt Macy */ 114eda14cbcSMatt Macy int zfs_mg_noalloc_threshold = 0; 115eda14cbcSMatt Macy 116eda14cbcSMatt Macy /* 117eda14cbcSMatt Macy * Metaslab groups are considered eligible for allocations if their 118eda14cbcSMatt Macy * fragmentation metric (measured as a percentage) is less than or 119eda14cbcSMatt Macy * equal to zfs_mg_fragmentation_threshold. If a metaslab group 120eda14cbcSMatt Macy * exceeds this threshold then it will be skipped unless all metaslab 121eda14cbcSMatt Macy * groups within the metaslab class have also crossed this threshold. 122eda14cbcSMatt Macy * 123eda14cbcSMatt Macy * This tunable was introduced to avoid edge cases where we continue 124eda14cbcSMatt Macy * allocating from very fragmented disks in our pool while other, less 125eda14cbcSMatt Macy * fragmented disks, exists. On the other hand, if all disks in the 126eda14cbcSMatt Macy * pool are uniformly approaching the threshold, the threshold can 127eda14cbcSMatt Macy * be a speed bump in performance, where we keep switching the disks 128eda14cbcSMatt Macy * that we allocate from (e.g. we allocate some segments from disk A 129eda14cbcSMatt Macy * making it bypassing the threshold while freeing segments from disk 130eda14cbcSMatt Macy * B getting its fragmentation below the threshold). 131eda14cbcSMatt Macy * 132eda14cbcSMatt Macy * Empirically, we've seen that our vdev selection for allocations is 133eda14cbcSMatt Macy * good enough that fragmentation increases uniformly across all vdevs 134eda14cbcSMatt Macy * the majority of the time. Thus we set the threshold percentage high 135eda14cbcSMatt Macy * enough to avoid hitting the speed bump on pools that are being pushed 136eda14cbcSMatt Macy * to the edge. 137eda14cbcSMatt Macy */ 138eda14cbcSMatt Macy int zfs_mg_fragmentation_threshold = 95; 139eda14cbcSMatt Macy 140eda14cbcSMatt Macy /* 141eda14cbcSMatt Macy * Allow metaslabs to keep their active state as long as their fragmentation 142eda14cbcSMatt Macy * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An 143eda14cbcSMatt Macy * active metaslab that exceeds this threshold will no longer keep its active 144eda14cbcSMatt Macy * status allowing better metaslabs to be selected. 145eda14cbcSMatt Macy */ 146eda14cbcSMatt Macy int zfs_metaslab_fragmentation_threshold = 70; 147eda14cbcSMatt Macy 148eda14cbcSMatt Macy /* 149eda14cbcSMatt Macy * When set will load all metaslabs when pool is first opened. 150eda14cbcSMatt Macy */ 151eda14cbcSMatt Macy int metaslab_debug_load = 0; 152eda14cbcSMatt Macy 153eda14cbcSMatt Macy /* 154eda14cbcSMatt Macy * When set will prevent metaslabs from being unloaded. 155eda14cbcSMatt Macy */ 156eda14cbcSMatt Macy int metaslab_debug_unload = 0; 157eda14cbcSMatt Macy 158eda14cbcSMatt Macy /* 159eda14cbcSMatt Macy * Minimum size which forces the dynamic allocator to change 160eda14cbcSMatt Macy * it's allocation strategy. Once the space map cannot satisfy 161eda14cbcSMatt Macy * an allocation of this size then it switches to using more 162eda14cbcSMatt Macy * aggressive strategy (i.e search by size rather than offset). 163eda14cbcSMatt Macy */ 164eda14cbcSMatt Macy uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; 165eda14cbcSMatt Macy 166eda14cbcSMatt Macy /* 167eda14cbcSMatt Macy * The minimum free space, in percent, which must be available 168eda14cbcSMatt Macy * in a space map to continue allocations in a first-fit fashion. 169eda14cbcSMatt Macy * Once the space map's free space drops below this level we dynamically 170eda14cbcSMatt Macy * switch to using best-fit allocations. 171eda14cbcSMatt Macy */ 172eda14cbcSMatt Macy int metaslab_df_free_pct = 4; 173eda14cbcSMatt Macy 174eda14cbcSMatt Macy /* 175eda14cbcSMatt Macy * Maximum distance to search forward from the last offset. Without this 176eda14cbcSMatt Macy * limit, fragmented pools can see >100,000 iterations and 177eda14cbcSMatt Macy * metaslab_block_picker() becomes the performance limiting factor on 178eda14cbcSMatt Macy * high-performance storage. 179eda14cbcSMatt Macy * 180eda14cbcSMatt Macy * With the default setting of 16MB, we typically see less than 500 181eda14cbcSMatt Macy * iterations, even with very fragmented, ashift=9 pools. The maximum number 182eda14cbcSMatt Macy * of iterations possible is: 183eda14cbcSMatt Macy * metaslab_df_max_search / (2 * (1<<ashift)) 184eda14cbcSMatt Macy * With the default setting of 16MB this is 16*1024 (with ashift=9) or 185eda14cbcSMatt Macy * 2048 (with ashift=12). 186eda14cbcSMatt Macy */ 187eda14cbcSMatt Macy int metaslab_df_max_search = 16 * 1024 * 1024; 188eda14cbcSMatt Macy 189eda14cbcSMatt Macy /* 190eda14cbcSMatt Macy * Forces the metaslab_block_picker function to search for at least this many 191eda14cbcSMatt Macy * segments forwards until giving up on finding a segment that the allocation 192eda14cbcSMatt Macy * will fit into. 193eda14cbcSMatt Macy */ 194eda14cbcSMatt Macy uint32_t metaslab_min_search_count = 100; 195eda14cbcSMatt Macy 196eda14cbcSMatt Macy /* 197eda14cbcSMatt Macy * If we are not searching forward (due to metaslab_df_max_search, 198eda14cbcSMatt Macy * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable 199eda14cbcSMatt Macy * controls what segment is used. If it is set, we will use the largest free 200eda14cbcSMatt Macy * segment. If it is not set, we will use a segment of exactly the requested 201eda14cbcSMatt Macy * size (or larger). 202eda14cbcSMatt Macy */ 203eda14cbcSMatt Macy int metaslab_df_use_largest_segment = B_FALSE; 204eda14cbcSMatt Macy 205eda14cbcSMatt Macy /* 206eda14cbcSMatt Macy * Percentage of all cpus that can be used by the metaslab taskq. 207eda14cbcSMatt Macy */ 208eda14cbcSMatt Macy int metaslab_load_pct = 50; 209eda14cbcSMatt Macy 210eda14cbcSMatt Macy /* 211eda14cbcSMatt Macy * These tunables control how long a metaslab will remain loaded after the 212eda14cbcSMatt Macy * last allocation from it. A metaslab can't be unloaded until at least 213eda14cbcSMatt Macy * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds 214eda14cbcSMatt Macy * have elapsed. However, zfs_metaslab_mem_limit may cause it to be 215eda14cbcSMatt Macy * unloaded sooner. These settings are intended to be generous -- to keep 216eda14cbcSMatt Macy * metaslabs loaded for a long time, reducing the rate of metaslab loading. 217eda14cbcSMatt Macy */ 218eda14cbcSMatt Macy int metaslab_unload_delay = 32; 219eda14cbcSMatt Macy int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ 220eda14cbcSMatt Macy 221eda14cbcSMatt Macy /* 222eda14cbcSMatt Macy * Max number of metaslabs per group to preload. 223eda14cbcSMatt Macy */ 224eda14cbcSMatt Macy int metaslab_preload_limit = 10; 225eda14cbcSMatt Macy 226eda14cbcSMatt Macy /* 227eda14cbcSMatt Macy * Enable/disable preloading of metaslab. 228eda14cbcSMatt Macy */ 229eda14cbcSMatt Macy int metaslab_preload_enabled = B_TRUE; 230eda14cbcSMatt Macy 231eda14cbcSMatt Macy /* 232eda14cbcSMatt Macy * Enable/disable fragmentation weighting on metaslabs. 233eda14cbcSMatt Macy */ 234eda14cbcSMatt Macy int metaslab_fragmentation_factor_enabled = B_TRUE; 235eda14cbcSMatt Macy 236eda14cbcSMatt Macy /* 237eda14cbcSMatt Macy * Enable/disable lba weighting (i.e. outer tracks are given preference). 238eda14cbcSMatt Macy */ 239eda14cbcSMatt Macy int metaslab_lba_weighting_enabled = B_TRUE; 240eda14cbcSMatt Macy 241eda14cbcSMatt Macy /* 242eda14cbcSMatt Macy * Enable/disable metaslab group biasing. 243eda14cbcSMatt Macy */ 244eda14cbcSMatt Macy int metaslab_bias_enabled = B_TRUE; 245eda14cbcSMatt Macy 246eda14cbcSMatt Macy /* 247eda14cbcSMatt Macy * Enable/disable remapping of indirect DVAs to their concrete vdevs. 248eda14cbcSMatt Macy */ 249eda14cbcSMatt Macy boolean_t zfs_remap_blkptr_enable = B_TRUE; 250eda14cbcSMatt Macy 251eda14cbcSMatt Macy /* 252eda14cbcSMatt Macy * Enable/disable segment-based metaslab selection. 253eda14cbcSMatt Macy */ 254eda14cbcSMatt Macy int zfs_metaslab_segment_weight_enabled = B_TRUE; 255eda14cbcSMatt Macy 256eda14cbcSMatt Macy /* 257eda14cbcSMatt Macy * When using segment-based metaslab selection, we will continue 258eda14cbcSMatt Macy * allocating from the active metaslab until we have exhausted 259eda14cbcSMatt Macy * zfs_metaslab_switch_threshold of its buckets. 260eda14cbcSMatt Macy */ 261eda14cbcSMatt Macy int zfs_metaslab_switch_threshold = 2; 262eda14cbcSMatt Macy 263eda14cbcSMatt Macy /* 264eda14cbcSMatt Macy * Internal switch to enable/disable the metaslab allocation tracing 265eda14cbcSMatt Macy * facility. 266eda14cbcSMatt Macy */ 2677877fdebSMatt Macy boolean_t metaslab_trace_enabled = B_FALSE; 268eda14cbcSMatt Macy 269eda14cbcSMatt Macy /* 270eda14cbcSMatt Macy * Maximum entries that the metaslab allocation tracing facility will keep 271eda14cbcSMatt Macy * in a given list when running in non-debug mode. We limit the number 272eda14cbcSMatt Macy * of entries in non-debug mode to prevent us from using up too much memory. 273eda14cbcSMatt Macy * The limit should be sufficiently large that we don't expect any allocation 274eda14cbcSMatt Macy * to every exceed this value. In debug mode, the system will panic if this 275eda14cbcSMatt Macy * limit is ever reached allowing for further investigation. 276eda14cbcSMatt Macy */ 277eda14cbcSMatt Macy uint64_t metaslab_trace_max_entries = 5000; 278eda14cbcSMatt Macy 279eda14cbcSMatt Macy /* 280eda14cbcSMatt Macy * Maximum number of metaslabs per group that can be disabled 281eda14cbcSMatt Macy * simultaneously. 282eda14cbcSMatt Macy */ 283eda14cbcSMatt Macy int max_disabled_ms = 3; 284eda14cbcSMatt Macy 285eda14cbcSMatt Macy /* 286eda14cbcSMatt Macy * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. 287eda14cbcSMatt Macy * To avoid 64-bit overflow, don't set above UINT32_MAX. 288eda14cbcSMatt Macy */ 289eda14cbcSMatt Macy unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */ 290eda14cbcSMatt Macy 291eda14cbcSMatt Macy /* 292eda14cbcSMatt Macy * Maximum percentage of memory to use on storing loaded metaslabs. If loading 293eda14cbcSMatt Macy * a metaslab would take it over this percentage, the oldest selected metaslab 294eda14cbcSMatt Macy * is automatically unloaded. 295eda14cbcSMatt Macy */ 296eda14cbcSMatt Macy int zfs_metaslab_mem_limit = 75; 297eda14cbcSMatt Macy 298eda14cbcSMatt Macy /* 299eda14cbcSMatt Macy * Force the per-metaslab range trees to use 64-bit integers to store 300eda14cbcSMatt Macy * segments. Used for debugging purposes. 301eda14cbcSMatt Macy */ 302eda14cbcSMatt Macy boolean_t zfs_metaslab_force_large_segs = B_FALSE; 303eda14cbcSMatt Macy 304eda14cbcSMatt Macy /* 305eda14cbcSMatt Macy * By default we only store segments over a certain size in the size-sorted 306eda14cbcSMatt Macy * metaslab trees (ms_allocatable_by_size and 307eda14cbcSMatt Macy * ms_unflushed_frees_by_size). This dramatically reduces memory usage and 308eda14cbcSMatt Macy * improves load and unload times at the cost of causing us to use slightly 309eda14cbcSMatt Macy * larger segments than we would otherwise in some cases. 310eda14cbcSMatt Macy */ 311eda14cbcSMatt Macy uint32_t metaslab_by_size_min_shift = 14; 312eda14cbcSMatt Macy 3137877fdebSMatt Macy /* 3147877fdebSMatt Macy * If not set, we will first try normal allocation. If that fails then 3157877fdebSMatt Macy * we will do a gang allocation. If that fails then we will do a "try hard" 3167877fdebSMatt Macy * gang allocation. If that fails then we will have a multi-layer gang 3177877fdebSMatt Macy * block. 3187877fdebSMatt Macy * 3197877fdebSMatt Macy * If set, we will first try normal allocation. If that fails then 3207877fdebSMatt Macy * we will do a "try hard" allocation. If that fails we will do a gang 3217877fdebSMatt Macy * allocation. If that fails we will do a "try hard" gang allocation. If 3227877fdebSMatt Macy * that fails then we will have a multi-layer gang block. 3237877fdebSMatt Macy */ 3247877fdebSMatt Macy int zfs_metaslab_try_hard_before_gang = B_FALSE; 3257877fdebSMatt Macy 3267877fdebSMatt Macy /* 3277877fdebSMatt Macy * When not trying hard, we only consider the best zfs_metaslab_find_max_tries 3287877fdebSMatt Macy * metaslabs. This improves performance, especially when there are many 3297877fdebSMatt Macy * metaslabs per vdev and the allocation can't actually be satisfied (so we 3307877fdebSMatt Macy * would otherwise iterate all the metaslabs). If there is a metaslab with a 3317877fdebSMatt Macy * worse weight but it can actually satisfy the allocation, we won't find it 3327877fdebSMatt Macy * until trying hard. This may happen if the worse metaslab is not loaded 3337877fdebSMatt Macy * (and the true weight is better than we have calculated), or due to weight 3347877fdebSMatt Macy * bucketization. E.g. we are looking for a 60K segment, and the best 3357877fdebSMatt Macy * metaslabs all have free segments in the 32-63K bucket, but the best 3367877fdebSMatt Macy * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a 3377877fdebSMatt Macy * subsequent metaslab has ms_max_size >60KB (but fewer segments in this 3387877fdebSMatt Macy * bucket, and therefore a lower weight). 3397877fdebSMatt Macy */ 3407877fdebSMatt Macy int zfs_metaslab_find_max_tries = 100; 3417877fdebSMatt Macy 342eda14cbcSMatt Macy static uint64_t metaslab_weight(metaslab_t *, boolean_t); 343eda14cbcSMatt Macy static void metaslab_set_fragmentation(metaslab_t *, boolean_t); 344eda14cbcSMatt Macy static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t); 345eda14cbcSMatt Macy static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t); 346eda14cbcSMatt Macy 347eda14cbcSMatt Macy static void metaslab_passivate(metaslab_t *msp, uint64_t weight); 348eda14cbcSMatt Macy static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp); 349eda14cbcSMatt Macy static void metaslab_flush_update(metaslab_t *, dmu_tx_t *); 350eda14cbcSMatt Macy static unsigned int metaslab_idx_func(multilist_t *, void *); 351eda14cbcSMatt Macy static void metaslab_evict(metaslab_t *, uint64_t); 352eda14cbcSMatt Macy static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg); 353eda14cbcSMatt Macy kmem_cache_t *metaslab_alloc_trace_cache; 354eda14cbcSMatt Macy 355eda14cbcSMatt Macy typedef struct metaslab_stats { 356eda14cbcSMatt Macy kstat_named_t metaslabstat_trace_over_limit; 357eda14cbcSMatt Macy kstat_named_t metaslabstat_reload_tree; 3587877fdebSMatt Macy kstat_named_t metaslabstat_too_many_tries; 3597877fdebSMatt Macy kstat_named_t metaslabstat_try_hard; 360eda14cbcSMatt Macy } metaslab_stats_t; 361eda14cbcSMatt Macy 362eda14cbcSMatt Macy static metaslab_stats_t metaslab_stats = { 363eda14cbcSMatt Macy { "trace_over_limit", KSTAT_DATA_UINT64 }, 364eda14cbcSMatt Macy { "reload_tree", KSTAT_DATA_UINT64 }, 3657877fdebSMatt Macy { "too_many_tries", KSTAT_DATA_UINT64 }, 3667877fdebSMatt Macy { "try_hard", KSTAT_DATA_UINT64 }, 367eda14cbcSMatt Macy }; 368eda14cbcSMatt Macy 369eda14cbcSMatt Macy #define METASLABSTAT_BUMP(stat) \ 370eda14cbcSMatt Macy atomic_inc_64(&metaslab_stats.stat.value.ui64); 371eda14cbcSMatt Macy 372eda14cbcSMatt Macy 373eda14cbcSMatt Macy kstat_t *metaslab_ksp; 374eda14cbcSMatt Macy 375eda14cbcSMatt Macy void 376eda14cbcSMatt Macy metaslab_stat_init(void) 377eda14cbcSMatt Macy { 378eda14cbcSMatt Macy ASSERT(metaslab_alloc_trace_cache == NULL); 379eda14cbcSMatt Macy metaslab_alloc_trace_cache = kmem_cache_create( 380eda14cbcSMatt Macy "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), 381eda14cbcSMatt Macy 0, NULL, NULL, NULL, NULL, NULL, 0); 382eda14cbcSMatt Macy metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats", 383eda14cbcSMatt Macy "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) / 384eda14cbcSMatt Macy sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 385eda14cbcSMatt Macy if (metaslab_ksp != NULL) { 386eda14cbcSMatt Macy metaslab_ksp->ks_data = &metaslab_stats; 387eda14cbcSMatt Macy kstat_install(metaslab_ksp); 388eda14cbcSMatt Macy } 389eda14cbcSMatt Macy } 390eda14cbcSMatt Macy 391eda14cbcSMatt Macy void 392eda14cbcSMatt Macy metaslab_stat_fini(void) 393eda14cbcSMatt Macy { 394eda14cbcSMatt Macy if (metaslab_ksp != NULL) { 395eda14cbcSMatt Macy kstat_delete(metaslab_ksp); 396eda14cbcSMatt Macy metaslab_ksp = NULL; 397eda14cbcSMatt Macy } 398eda14cbcSMatt Macy 399eda14cbcSMatt Macy kmem_cache_destroy(metaslab_alloc_trace_cache); 400eda14cbcSMatt Macy metaslab_alloc_trace_cache = NULL; 401eda14cbcSMatt Macy } 402eda14cbcSMatt Macy 403eda14cbcSMatt Macy /* 404eda14cbcSMatt Macy * ========================================================================== 405eda14cbcSMatt Macy * Metaslab classes 406eda14cbcSMatt Macy * ========================================================================== 407eda14cbcSMatt Macy */ 408eda14cbcSMatt Macy metaslab_class_t * 409eda14cbcSMatt Macy metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) 410eda14cbcSMatt Macy { 411eda14cbcSMatt Macy metaslab_class_t *mc; 412eda14cbcSMatt Macy 4137877fdebSMatt Macy mc = kmem_zalloc(offsetof(metaslab_class_t, 4147877fdebSMatt Macy mc_allocator[spa->spa_alloc_count]), KM_SLEEP); 415eda14cbcSMatt Macy 416eda14cbcSMatt Macy mc->mc_spa = spa; 417eda14cbcSMatt Macy mc->mc_ops = ops; 418eda14cbcSMatt Macy mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); 419eda14cbcSMatt Macy mc->mc_metaslab_txg_list = multilist_create(sizeof (metaslab_t), 420eda14cbcSMatt Macy offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func); 4217877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 4227877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 4237877fdebSMatt Macy mca->mca_rotor = NULL; 4247877fdebSMatt Macy zfs_refcount_create_tracked(&mca->mca_alloc_slots); 4257877fdebSMatt Macy } 426eda14cbcSMatt Macy 427eda14cbcSMatt Macy return (mc); 428eda14cbcSMatt Macy } 429eda14cbcSMatt Macy 430eda14cbcSMatt Macy void 431eda14cbcSMatt Macy metaslab_class_destroy(metaslab_class_t *mc) 432eda14cbcSMatt Macy { 4337877fdebSMatt Macy spa_t *spa = mc->mc_spa; 4347877fdebSMatt Macy 435eda14cbcSMatt Macy ASSERT(mc->mc_alloc == 0); 436eda14cbcSMatt Macy ASSERT(mc->mc_deferred == 0); 437eda14cbcSMatt Macy ASSERT(mc->mc_space == 0); 438eda14cbcSMatt Macy ASSERT(mc->mc_dspace == 0); 439eda14cbcSMatt Macy 4407877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 4417877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 4427877fdebSMatt Macy ASSERT(mca->mca_rotor == NULL); 4437877fdebSMatt Macy zfs_refcount_destroy(&mca->mca_alloc_slots); 4447877fdebSMatt Macy } 445eda14cbcSMatt Macy mutex_destroy(&mc->mc_lock); 446eda14cbcSMatt Macy multilist_destroy(mc->mc_metaslab_txg_list); 4477877fdebSMatt Macy kmem_free(mc, offsetof(metaslab_class_t, 4487877fdebSMatt Macy mc_allocator[spa->spa_alloc_count])); 449eda14cbcSMatt Macy } 450eda14cbcSMatt Macy 451eda14cbcSMatt Macy int 452eda14cbcSMatt Macy metaslab_class_validate(metaslab_class_t *mc) 453eda14cbcSMatt Macy { 454eda14cbcSMatt Macy metaslab_group_t *mg; 455eda14cbcSMatt Macy vdev_t *vd; 456eda14cbcSMatt Macy 457eda14cbcSMatt Macy /* 458eda14cbcSMatt Macy * Must hold one of the spa_config locks. 459eda14cbcSMatt Macy */ 460eda14cbcSMatt Macy ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 461eda14cbcSMatt Macy spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 462eda14cbcSMatt Macy 4637877fdebSMatt Macy if ((mg = mc->mc_allocator[0].mca_rotor) == NULL) 464eda14cbcSMatt Macy return (0); 465eda14cbcSMatt Macy 466eda14cbcSMatt Macy do { 467eda14cbcSMatt Macy vd = mg->mg_vd; 468eda14cbcSMatt Macy ASSERT(vd->vdev_mg != NULL); 469eda14cbcSMatt Macy ASSERT3P(vd->vdev_top, ==, vd); 470eda14cbcSMatt Macy ASSERT3P(mg->mg_class, ==, mc); 471eda14cbcSMatt Macy ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 4727877fdebSMatt Macy } while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor); 473eda14cbcSMatt Macy 474eda14cbcSMatt Macy return (0); 475eda14cbcSMatt Macy } 476eda14cbcSMatt Macy 477eda14cbcSMatt Macy static void 478eda14cbcSMatt Macy metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 479eda14cbcSMatt Macy int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 480eda14cbcSMatt Macy { 481eda14cbcSMatt Macy atomic_add_64(&mc->mc_alloc, alloc_delta); 482eda14cbcSMatt Macy atomic_add_64(&mc->mc_deferred, defer_delta); 483eda14cbcSMatt Macy atomic_add_64(&mc->mc_space, space_delta); 484eda14cbcSMatt Macy atomic_add_64(&mc->mc_dspace, dspace_delta); 485eda14cbcSMatt Macy } 486eda14cbcSMatt Macy 487eda14cbcSMatt Macy uint64_t 488eda14cbcSMatt Macy metaslab_class_get_alloc(metaslab_class_t *mc) 489eda14cbcSMatt Macy { 490eda14cbcSMatt Macy return (mc->mc_alloc); 491eda14cbcSMatt Macy } 492eda14cbcSMatt Macy 493eda14cbcSMatt Macy uint64_t 494eda14cbcSMatt Macy metaslab_class_get_deferred(metaslab_class_t *mc) 495eda14cbcSMatt Macy { 496eda14cbcSMatt Macy return (mc->mc_deferred); 497eda14cbcSMatt Macy } 498eda14cbcSMatt Macy 499eda14cbcSMatt Macy uint64_t 500eda14cbcSMatt Macy metaslab_class_get_space(metaslab_class_t *mc) 501eda14cbcSMatt Macy { 502eda14cbcSMatt Macy return (mc->mc_space); 503eda14cbcSMatt Macy } 504eda14cbcSMatt Macy 505eda14cbcSMatt Macy uint64_t 506eda14cbcSMatt Macy metaslab_class_get_dspace(metaslab_class_t *mc) 507eda14cbcSMatt Macy { 508eda14cbcSMatt Macy return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 509eda14cbcSMatt Macy } 510eda14cbcSMatt Macy 511eda14cbcSMatt Macy void 512eda14cbcSMatt Macy metaslab_class_histogram_verify(metaslab_class_t *mc) 513eda14cbcSMatt Macy { 514eda14cbcSMatt Macy spa_t *spa = mc->mc_spa; 515eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 516eda14cbcSMatt Macy uint64_t *mc_hist; 517eda14cbcSMatt Macy int i; 518eda14cbcSMatt Macy 519eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 520eda14cbcSMatt Macy return; 521eda14cbcSMatt Macy 522eda14cbcSMatt Macy mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 523eda14cbcSMatt Macy KM_SLEEP); 524eda14cbcSMatt Macy 525eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 526eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 527eda14cbcSMatt Macy metaslab_group_t *mg = tvd->vdev_mg; 528eda14cbcSMatt Macy 529eda14cbcSMatt Macy /* 530eda14cbcSMatt Macy * Skip any holes, uninitialized top-levels, or 531eda14cbcSMatt Macy * vdevs that are not in this metalab class. 532eda14cbcSMatt Macy */ 533eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 534eda14cbcSMatt Macy mg->mg_class != mc) { 535eda14cbcSMatt Macy continue; 536eda14cbcSMatt Macy } 537eda14cbcSMatt Macy 538eda14cbcSMatt Macy for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 539eda14cbcSMatt Macy mc_hist[i] += mg->mg_histogram[i]; 540eda14cbcSMatt Macy } 541eda14cbcSMatt Macy 542eda14cbcSMatt Macy for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 543eda14cbcSMatt Macy VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); 544eda14cbcSMatt Macy 545eda14cbcSMatt Macy kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 546eda14cbcSMatt Macy } 547eda14cbcSMatt Macy 548eda14cbcSMatt Macy /* 549eda14cbcSMatt Macy * Calculate the metaslab class's fragmentation metric. The metric 550eda14cbcSMatt Macy * is weighted based on the space contribution of each metaslab group. 551eda14cbcSMatt Macy * The return value will be a number between 0 and 100 (inclusive), or 552eda14cbcSMatt Macy * ZFS_FRAG_INVALID if the metric has not been set. See comment above the 553eda14cbcSMatt Macy * zfs_frag_table for more information about the metric. 554eda14cbcSMatt Macy */ 555eda14cbcSMatt Macy uint64_t 556eda14cbcSMatt Macy metaslab_class_fragmentation(metaslab_class_t *mc) 557eda14cbcSMatt Macy { 558eda14cbcSMatt Macy vdev_t *rvd = mc->mc_spa->spa_root_vdev; 559eda14cbcSMatt Macy uint64_t fragmentation = 0; 560eda14cbcSMatt Macy 561eda14cbcSMatt Macy spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 562eda14cbcSMatt Macy 563eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 564eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 565eda14cbcSMatt Macy metaslab_group_t *mg = tvd->vdev_mg; 566eda14cbcSMatt Macy 567eda14cbcSMatt Macy /* 568eda14cbcSMatt Macy * Skip any holes, uninitialized top-levels, 569eda14cbcSMatt Macy * or vdevs that are not in this metalab class. 570eda14cbcSMatt Macy */ 571eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 572eda14cbcSMatt Macy mg->mg_class != mc) { 573eda14cbcSMatt Macy continue; 574eda14cbcSMatt Macy } 575eda14cbcSMatt Macy 576eda14cbcSMatt Macy /* 577eda14cbcSMatt Macy * If a metaslab group does not contain a fragmentation 578eda14cbcSMatt Macy * metric then just bail out. 579eda14cbcSMatt Macy */ 580eda14cbcSMatt Macy if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 581eda14cbcSMatt Macy spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 582eda14cbcSMatt Macy return (ZFS_FRAG_INVALID); 583eda14cbcSMatt Macy } 584eda14cbcSMatt Macy 585eda14cbcSMatt Macy /* 586eda14cbcSMatt Macy * Determine how much this metaslab_group is contributing 587eda14cbcSMatt Macy * to the overall pool fragmentation metric. 588eda14cbcSMatt Macy */ 589eda14cbcSMatt Macy fragmentation += mg->mg_fragmentation * 590eda14cbcSMatt Macy metaslab_group_get_space(mg); 591eda14cbcSMatt Macy } 592eda14cbcSMatt Macy fragmentation /= metaslab_class_get_space(mc); 593eda14cbcSMatt Macy 594eda14cbcSMatt Macy ASSERT3U(fragmentation, <=, 100); 595eda14cbcSMatt Macy spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 596eda14cbcSMatt Macy return (fragmentation); 597eda14cbcSMatt Macy } 598eda14cbcSMatt Macy 599eda14cbcSMatt Macy /* 600eda14cbcSMatt Macy * Calculate the amount of expandable space that is available in 601eda14cbcSMatt Macy * this metaslab class. If a device is expanded then its expandable 602eda14cbcSMatt Macy * space will be the amount of allocatable space that is currently not 603eda14cbcSMatt Macy * part of this metaslab class. 604eda14cbcSMatt Macy */ 605eda14cbcSMatt Macy uint64_t 606eda14cbcSMatt Macy metaslab_class_expandable_space(metaslab_class_t *mc) 607eda14cbcSMatt Macy { 608eda14cbcSMatt Macy vdev_t *rvd = mc->mc_spa->spa_root_vdev; 609eda14cbcSMatt Macy uint64_t space = 0; 610eda14cbcSMatt Macy 611eda14cbcSMatt Macy spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 612eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 613eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 614eda14cbcSMatt Macy metaslab_group_t *mg = tvd->vdev_mg; 615eda14cbcSMatt Macy 616eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 617eda14cbcSMatt Macy mg->mg_class != mc) { 618eda14cbcSMatt Macy continue; 619eda14cbcSMatt Macy } 620eda14cbcSMatt Macy 621eda14cbcSMatt Macy /* 622eda14cbcSMatt Macy * Calculate if we have enough space to add additional 623eda14cbcSMatt Macy * metaslabs. We report the expandable space in terms 624eda14cbcSMatt Macy * of the metaslab size since that's the unit of expansion. 625eda14cbcSMatt Macy */ 626eda14cbcSMatt Macy space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize, 627eda14cbcSMatt Macy 1ULL << tvd->vdev_ms_shift); 628eda14cbcSMatt Macy } 629eda14cbcSMatt Macy spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 630eda14cbcSMatt Macy return (space); 631eda14cbcSMatt Macy } 632eda14cbcSMatt Macy 633eda14cbcSMatt Macy void 634eda14cbcSMatt Macy metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) 635eda14cbcSMatt Macy { 636eda14cbcSMatt Macy multilist_t *ml = mc->mc_metaslab_txg_list; 637eda14cbcSMatt Macy for (int i = 0; i < multilist_get_num_sublists(ml); i++) { 638eda14cbcSMatt Macy multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 639eda14cbcSMatt Macy metaslab_t *msp = multilist_sublist_head(mls); 640eda14cbcSMatt Macy multilist_sublist_unlock(mls); 641eda14cbcSMatt Macy while (msp != NULL) { 642eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 643eda14cbcSMatt Macy 644eda14cbcSMatt Macy /* 645eda14cbcSMatt Macy * If the metaslab has been removed from the list 646eda14cbcSMatt Macy * (which could happen if we were at the memory limit 647eda14cbcSMatt Macy * and it was evicted during this loop), then we can't 648eda14cbcSMatt Macy * proceed and we should restart the sublist. 649eda14cbcSMatt Macy */ 650eda14cbcSMatt Macy if (!multilist_link_active(&msp->ms_class_txg_node)) { 651eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 652eda14cbcSMatt Macy i--; 653eda14cbcSMatt Macy break; 654eda14cbcSMatt Macy } 655eda14cbcSMatt Macy mls = multilist_sublist_lock(ml, i); 656eda14cbcSMatt Macy metaslab_t *next_msp = multilist_sublist_next(mls, msp); 657eda14cbcSMatt Macy multilist_sublist_unlock(mls); 658eda14cbcSMatt Macy if (txg > 659eda14cbcSMatt Macy msp->ms_selected_txg + metaslab_unload_delay && 660eda14cbcSMatt Macy gethrtime() > msp->ms_selected_time + 661eda14cbcSMatt Macy (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) { 662eda14cbcSMatt Macy metaslab_evict(msp, txg); 663eda14cbcSMatt Macy } else { 664eda14cbcSMatt Macy /* 665eda14cbcSMatt Macy * Once we've hit a metaslab selected too 666eda14cbcSMatt Macy * recently to evict, we're done evicting for 667eda14cbcSMatt Macy * now. 668eda14cbcSMatt Macy */ 669eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 670eda14cbcSMatt Macy break; 671eda14cbcSMatt Macy } 672eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 673eda14cbcSMatt Macy msp = next_msp; 674eda14cbcSMatt Macy } 675eda14cbcSMatt Macy } 676eda14cbcSMatt Macy } 677eda14cbcSMatt Macy 678eda14cbcSMatt Macy static int 679eda14cbcSMatt Macy metaslab_compare(const void *x1, const void *x2) 680eda14cbcSMatt Macy { 681eda14cbcSMatt Macy const metaslab_t *m1 = (const metaslab_t *)x1; 682eda14cbcSMatt Macy const metaslab_t *m2 = (const metaslab_t *)x2; 683eda14cbcSMatt Macy 684eda14cbcSMatt Macy int sort1 = 0; 685eda14cbcSMatt Macy int sort2 = 0; 686eda14cbcSMatt Macy if (m1->ms_allocator != -1 && m1->ms_primary) 687eda14cbcSMatt Macy sort1 = 1; 688eda14cbcSMatt Macy else if (m1->ms_allocator != -1 && !m1->ms_primary) 689eda14cbcSMatt Macy sort1 = 2; 690eda14cbcSMatt Macy if (m2->ms_allocator != -1 && m2->ms_primary) 691eda14cbcSMatt Macy sort2 = 1; 692eda14cbcSMatt Macy else if (m2->ms_allocator != -1 && !m2->ms_primary) 693eda14cbcSMatt Macy sort2 = 2; 694eda14cbcSMatt Macy 695eda14cbcSMatt Macy /* 696eda14cbcSMatt Macy * Sort inactive metaslabs first, then primaries, then secondaries. When 697eda14cbcSMatt Macy * selecting a metaslab to allocate from, an allocator first tries its 698eda14cbcSMatt Macy * primary, then secondary active metaslab. If it doesn't have active 699eda14cbcSMatt Macy * metaslabs, or can't allocate from them, it searches for an inactive 700eda14cbcSMatt Macy * metaslab to activate. If it can't find a suitable one, it will steal 701eda14cbcSMatt Macy * a primary or secondary metaslab from another allocator. 702eda14cbcSMatt Macy */ 703eda14cbcSMatt Macy if (sort1 < sort2) 704eda14cbcSMatt Macy return (-1); 705eda14cbcSMatt Macy if (sort1 > sort2) 706eda14cbcSMatt Macy return (1); 707eda14cbcSMatt Macy 708eda14cbcSMatt Macy int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight); 709eda14cbcSMatt Macy if (likely(cmp)) 710eda14cbcSMatt Macy return (cmp); 711eda14cbcSMatt Macy 712eda14cbcSMatt Macy IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2); 713eda14cbcSMatt Macy 714eda14cbcSMatt Macy return (TREE_CMP(m1->ms_start, m2->ms_start)); 715eda14cbcSMatt Macy } 716eda14cbcSMatt Macy 717eda14cbcSMatt Macy /* 718eda14cbcSMatt Macy * ========================================================================== 719eda14cbcSMatt Macy * Metaslab groups 720eda14cbcSMatt Macy * ========================================================================== 721eda14cbcSMatt Macy */ 722eda14cbcSMatt Macy /* 723eda14cbcSMatt Macy * Update the allocatable flag and the metaslab group's capacity. 724eda14cbcSMatt Macy * The allocatable flag is set to true if the capacity is below 725eda14cbcSMatt Macy * the zfs_mg_noalloc_threshold or has a fragmentation value that is 726eda14cbcSMatt Macy * greater than zfs_mg_fragmentation_threshold. If a metaslab group 727eda14cbcSMatt Macy * transitions from allocatable to non-allocatable or vice versa then the 728eda14cbcSMatt Macy * metaslab group's class is updated to reflect the transition. 729eda14cbcSMatt Macy */ 730eda14cbcSMatt Macy static void 731eda14cbcSMatt Macy metaslab_group_alloc_update(metaslab_group_t *mg) 732eda14cbcSMatt Macy { 733eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 734eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 735eda14cbcSMatt Macy vdev_stat_t *vs = &vd->vdev_stat; 736eda14cbcSMatt Macy boolean_t was_allocatable; 737eda14cbcSMatt Macy boolean_t was_initialized; 738eda14cbcSMatt Macy 739eda14cbcSMatt Macy ASSERT(vd == vd->vdev_top); 740eda14cbcSMatt Macy ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==, 741eda14cbcSMatt Macy SCL_ALLOC); 742eda14cbcSMatt Macy 743eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 744eda14cbcSMatt Macy was_allocatable = mg->mg_allocatable; 745eda14cbcSMatt Macy was_initialized = mg->mg_initialized; 746eda14cbcSMatt Macy 747eda14cbcSMatt Macy mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / 748eda14cbcSMatt Macy (vs->vs_space + 1); 749eda14cbcSMatt Macy 750eda14cbcSMatt Macy mutex_enter(&mc->mc_lock); 751eda14cbcSMatt Macy 752eda14cbcSMatt Macy /* 753eda14cbcSMatt Macy * If the metaslab group was just added then it won't 754eda14cbcSMatt Macy * have any space until we finish syncing out this txg. 755eda14cbcSMatt Macy * At that point we will consider it initialized and available 756eda14cbcSMatt Macy * for allocations. We also don't consider non-activated 757eda14cbcSMatt Macy * metaslab groups (e.g. vdevs that are in the middle of being removed) 758eda14cbcSMatt Macy * to be initialized, because they can't be used for allocation. 759eda14cbcSMatt Macy */ 760eda14cbcSMatt Macy mg->mg_initialized = metaslab_group_initialized(mg); 761eda14cbcSMatt Macy if (!was_initialized && mg->mg_initialized) { 762eda14cbcSMatt Macy mc->mc_groups++; 763eda14cbcSMatt Macy } else if (was_initialized && !mg->mg_initialized) { 764eda14cbcSMatt Macy ASSERT3U(mc->mc_groups, >, 0); 765eda14cbcSMatt Macy mc->mc_groups--; 766eda14cbcSMatt Macy } 767eda14cbcSMatt Macy if (mg->mg_initialized) 768eda14cbcSMatt Macy mg->mg_no_free_space = B_FALSE; 769eda14cbcSMatt Macy 770eda14cbcSMatt Macy /* 771eda14cbcSMatt Macy * A metaslab group is considered allocatable if it has plenty 772eda14cbcSMatt Macy * of free space or is not heavily fragmented. We only take 773eda14cbcSMatt Macy * fragmentation into account if the metaslab group has a valid 774eda14cbcSMatt Macy * fragmentation metric (i.e. a value between 0 and 100). 775eda14cbcSMatt Macy */ 776eda14cbcSMatt Macy mg->mg_allocatable = (mg->mg_activation_count > 0 && 777eda14cbcSMatt Macy mg->mg_free_capacity > zfs_mg_noalloc_threshold && 778eda14cbcSMatt Macy (mg->mg_fragmentation == ZFS_FRAG_INVALID || 779eda14cbcSMatt Macy mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); 780eda14cbcSMatt Macy 781eda14cbcSMatt Macy /* 782eda14cbcSMatt Macy * The mc_alloc_groups maintains a count of the number of 783eda14cbcSMatt Macy * groups in this metaslab class that are still above the 784eda14cbcSMatt Macy * zfs_mg_noalloc_threshold. This is used by the allocating 785eda14cbcSMatt Macy * threads to determine if they should avoid allocations to 786eda14cbcSMatt Macy * a given group. The allocator will avoid allocations to a group 787eda14cbcSMatt Macy * if that group has reached or is below the zfs_mg_noalloc_threshold 788eda14cbcSMatt Macy * and there are still other groups that are above the threshold. 789eda14cbcSMatt Macy * When a group transitions from allocatable to non-allocatable or 790eda14cbcSMatt Macy * vice versa we update the metaslab class to reflect that change. 791eda14cbcSMatt Macy * When the mc_alloc_groups value drops to 0 that means that all 792eda14cbcSMatt Macy * groups have reached the zfs_mg_noalloc_threshold making all groups 793eda14cbcSMatt Macy * eligible for allocations. This effectively means that all devices 794eda14cbcSMatt Macy * are balanced again. 795eda14cbcSMatt Macy */ 796eda14cbcSMatt Macy if (was_allocatable && !mg->mg_allocatable) 797eda14cbcSMatt Macy mc->mc_alloc_groups--; 798eda14cbcSMatt Macy else if (!was_allocatable && mg->mg_allocatable) 799eda14cbcSMatt Macy mc->mc_alloc_groups++; 800eda14cbcSMatt Macy mutex_exit(&mc->mc_lock); 801eda14cbcSMatt Macy 802eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 803eda14cbcSMatt Macy } 804eda14cbcSMatt Macy 805eda14cbcSMatt Macy int 806eda14cbcSMatt Macy metaslab_sort_by_flushed(const void *va, const void *vb) 807eda14cbcSMatt Macy { 808eda14cbcSMatt Macy const metaslab_t *a = va; 809eda14cbcSMatt Macy const metaslab_t *b = vb; 810eda14cbcSMatt Macy 811eda14cbcSMatt Macy int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg); 812eda14cbcSMatt Macy if (likely(cmp)) 813eda14cbcSMatt Macy return (cmp); 814eda14cbcSMatt Macy 815eda14cbcSMatt Macy uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id; 816eda14cbcSMatt Macy uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id; 817eda14cbcSMatt Macy cmp = TREE_CMP(a_vdev_id, b_vdev_id); 818eda14cbcSMatt Macy if (cmp) 819eda14cbcSMatt Macy return (cmp); 820eda14cbcSMatt Macy 821eda14cbcSMatt Macy return (TREE_CMP(a->ms_id, b->ms_id)); 822eda14cbcSMatt Macy } 823eda14cbcSMatt Macy 824eda14cbcSMatt Macy metaslab_group_t * 825eda14cbcSMatt Macy metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators) 826eda14cbcSMatt Macy { 827eda14cbcSMatt Macy metaslab_group_t *mg; 828eda14cbcSMatt Macy 8297877fdebSMatt Macy mg = kmem_zalloc(offsetof(metaslab_group_t, 8307877fdebSMatt Macy mg_allocator[allocators]), KM_SLEEP); 831eda14cbcSMatt Macy mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 832eda14cbcSMatt Macy mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL); 833eda14cbcSMatt Macy cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL); 834eda14cbcSMatt Macy avl_create(&mg->mg_metaslab_tree, metaslab_compare, 835eda14cbcSMatt Macy sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node)); 836eda14cbcSMatt Macy mg->mg_vd = vd; 837eda14cbcSMatt Macy mg->mg_class = mc; 838eda14cbcSMatt Macy mg->mg_activation_count = 0; 839eda14cbcSMatt Macy mg->mg_initialized = B_FALSE; 840eda14cbcSMatt Macy mg->mg_no_free_space = B_TRUE; 841eda14cbcSMatt Macy mg->mg_allocators = allocators; 842eda14cbcSMatt Macy 843eda14cbcSMatt Macy for (int i = 0; i < allocators; i++) { 844eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 845eda14cbcSMatt Macy zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth); 846eda14cbcSMatt Macy } 847eda14cbcSMatt Macy 848eda14cbcSMatt Macy mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, 849eda14cbcSMatt Macy maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC); 850eda14cbcSMatt Macy 851eda14cbcSMatt Macy return (mg); 852eda14cbcSMatt Macy } 853eda14cbcSMatt Macy 854eda14cbcSMatt Macy void 855eda14cbcSMatt Macy metaslab_group_destroy(metaslab_group_t *mg) 856eda14cbcSMatt Macy { 857eda14cbcSMatt Macy ASSERT(mg->mg_prev == NULL); 858eda14cbcSMatt Macy ASSERT(mg->mg_next == NULL); 859eda14cbcSMatt Macy /* 860eda14cbcSMatt Macy * We may have gone below zero with the activation count 861eda14cbcSMatt Macy * either because we never activated in the first place or 862eda14cbcSMatt Macy * because we're done, and possibly removing the vdev. 863eda14cbcSMatt Macy */ 864eda14cbcSMatt Macy ASSERT(mg->mg_activation_count <= 0); 865eda14cbcSMatt Macy 866eda14cbcSMatt Macy taskq_destroy(mg->mg_taskq); 867eda14cbcSMatt Macy avl_destroy(&mg->mg_metaslab_tree); 868eda14cbcSMatt Macy mutex_destroy(&mg->mg_lock); 869eda14cbcSMatt Macy mutex_destroy(&mg->mg_ms_disabled_lock); 870eda14cbcSMatt Macy cv_destroy(&mg->mg_ms_disabled_cv); 871eda14cbcSMatt Macy 872eda14cbcSMatt Macy for (int i = 0; i < mg->mg_allocators; i++) { 873eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 874eda14cbcSMatt Macy zfs_refcount_destroy(&mga->mga_alloc_queue_depth); 875eda14cbcSMatt Macy } 8767877fdebSMatt Macy kmem_free(mg, offsetof(metaslab_group_t, 8777877fdebSMatt Macy mg_allocator[mg->mg_allocators])); 878eda14cbcSMatt Macy } 879eda14cbcSMatt Macy 880eda14cbcSMatt Macy void 881eda14cbcSMatt Macy metaslab_group_activate(metaslab_group_t *mg) 882eda14cbcSMatt Macy { 883eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 8847877fdebSMatt Macy spa_t *spa = mc->mc_spa; 885eda14cbcSMatt Macy metaslab_group_t *mgprev, *mgnext; 886eda14cbcSMatt Macy 8877877fdebSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0); 888eda14cbcSMatt Macy 889eda14cbcSMatt Macy ASSERT(mg->mg_prev == NULL); 890eda14cbcSMatt Macy ASSERT(mg->mg_next == NULL); 891eda14cbcSMatt Macy ASSERT(mg->mg_activation_count <= 0); 892eda14cbcSMatt Macy 893eda14cbcSMatt Macy if (++mg->mg_activation_count <= 0) 894eda14cbcSMatt Macy return; 895eda14cbcSMatt Macy 896eda14cbcSMatt Macy mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); 897eda14cbcSMatt Macy metaslab_group_alloc_update(mg); 898eda14cbcSMatt Macy 8997877fdebSMatt Macy if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) { 900eda14cbcSMatt Macy mg->mg_prev = mg; 901eda14cbcSMatt Macy mg->mg_next = mg; 902eda14cbcSMatt Macy } else { 903eda14cbcSMatt Macy mgnext = mgprev->mg_next; 904eda14cbcSMatt Macy mg->mg_prev = mgprev; 905eda14cbcSMatt Macy mg->mg_next = mgnext; 906eda14cbcSMatt Macy mgprev->mg_next = mg; 907eda14cbcSMatt Macy mgnext->mg_prev = mg; 908eda14cbcSMatt Macy } 9097877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 9107877fdebSMatt Macy mc->mc_allocator[i].mca_rotor = mg; 9117877fdebSMatt Macy mg = mg->mg_next; 9127877fdebSMatt Macy } 913eda14cbcSMatt Macy } 914eda14cbcSMatt Macy 915eda14cbcSMatt Macy /* 916eda14cbcSMatt Macy * Passivate a metaslab group and remove it from the allocation rotor. 917eda14cbcSMatt Macy * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating 918eda14cbcSMatt Macy * a metaslab group. This function will momentarily drop spa_config_locks 919eda14cbcSMatt Macy * that are lower than the SCL_ALLOC lock (see comment below). 920eda14cbcSMatt Macy */ 921eda14cbcSMatt Macy void 922eda14cbcSMatt Macy metaslab_group_passivate(metaslab_group_t *mg) 923eda14cbcSMatt Macy { 924eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 925eda14cbcSMatt Macy spa_t *spa = mc->mc_spa; 926eda14cbcSMatt Macy metaslab_group_t *mgprev, *mgnext; 927eda14cbcSMatt Macy int locks = spa_config_held(spa, SCL_ALL, RW_WRITER); 928eda14cbcSMatt Macy 929eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==, 930eda14cbcSMatt Macy (SCL_ALLOC | SCL_ZIO)); 931eda14cbcSMatt Macy 932eda14cbcSMatt Macy if (--mg->mg_activation_count != 0) { 9337877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) 9347877fdebSMatt Macy ASSERT(mc->mc_allocator[i].mca_rotor != mg); 935eda14cbcSMatt Macy ASSERT(mg->mg_prev == NULL); 936eda14cbcSMatt Macy ASSERT(mg->mg_next == NULL); 937eda14cbcSMatt Macy ASSERT(mg->mg_activation_count < 0); 938eda14cbcSMatt Macy return; 939eda14cbcSMatt Macy } 940eda14cbcSMatt Macy 941eda14cbcSMatt Macy /* 942eda14cbcSMatt Macy * The spa_config_lock is an array of rwlocks, ordered as 943eda14cbcSMatt Macy * follows (from highest to lowest): 944eda14cbcSMatt Macy * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC > 945eda14cbcSMatt Macy * SCL_ZIO > SCL_FREE > SCL_VDEV 946eda14cbcSMatt Macy * (For more information about the spa_config_lock see spa_misc.c) 947eda14cbcSMatt Macy * The higher the lock, the broader its coverage. When we passivate 948eda14cbcSMatt Macy * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO 949eda14cbcSMatt Macy * config locks. However, the metaslab group's taskq might be trying 950eda14cbcSMatt Macy * to preload metaslabs so we must drop the SCL_ZIO lock and any 951eda14cbcSMatt Macy * lower locks to allow the I/O to complete. At a minimum, 952eda14cbcSMatt Macy * we continue to hold the SCL_ALLOC lock, which prevents any future 953eda14cbcSMatt Macy * allocations from taking place and any changes to the vdev tree. 954eda14cbcSMatt Macy */ 955eda14cbcSMatt Macy spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa); 956eda14cbcSMatt Macy taskq_wait_outstanding(mg->mg_taskq, 0); 957eda14cbcSMatt Macy spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER); 958eda14cbcSMatt Macy metaslab_group_alloc_update(mg); 959eda14cbcSMatt Macy for (int i = 0; i < mg->mg_allocators; i++) { 960eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 961eda14cbcSMatt Macy metaslab_t *msp = mga->mga_primary; 962eda14cbcSMatt Macy if (msp != NULL) { 963eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 964eda14cbcSMatt Macy metaslab_passivate(msp, 965eda14cbcSMatt Macy metaslab_weight_from_range_tree(msp)); 966eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 967eda14cbcSMatt Macy } 968eda14cbcSMatt Macy msp = mga->mga_secondary; 969eda14cbcSMatt Macy if (msp != NULL) { 970eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 971eda14cbcSMatt Macy metaslab_passivate(msp, 972eda14cbcSMatt Macy metaslab_weight_from_range_tree(msp)); 973eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 974eda14cbcSMatt Macy } 975eda14cbcSMatt Macy } 976eda14cbcSMatt Macy 977eda14cbcSMatt Macy mgprev = mg->mg_prev; 978eda14cbcSMatt Macy mgnext = mg->mg_next; 979eda14cbcSMatt Macy 980eda14cbcSMatt Macy if (mg == mgnext) { 9817877fdebSMatt Macy mgnext = NULL; 982eda14cbcSMatt Macy } else { 983eda14cbcSMatt Macy mgprev->mg_next = mgnext; 984eda14cbcSMatt Macy mgnext->mg_prev = mgprev; 985eda14cbcSMatt Macy } 9867877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 9877877fdebSMatt Macy if (mc->mc_allocator[i].mca_rotor == mg) 9887877fdebSMatt Macy mc->mc_allocator[i].mca_rotor = mgnext; 9897877fdebSMatt Macy } 990eda14cbcSMatt Macy 991eda14cbcSMatt Macy mg->mg_prev = NULL; 992eda14cbcSMatt Macy mg->mg_next = NULL; 993eda14cbcSMatt Macy } 994eda14cbcSMatt Macy 995eda14cbcSMatt Macy boolean_t 996eda14cbcSMatt Macy metaslab_group_initialized(metaslab_group_t *mg) 997eda14cbcSMatt Macy { 998eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 999eda14cbcSMatt Macy vdev_stat_t *vs = &vd->vdev_stat; 1000eda14cbcSMatt Macy 1001eda14cbcSMatt Macy return (vs->vs_space != 0 && mg->mg_activation_count > 0); 1002eda14cbcSMatt Macy } 1003eda14cbcSMatt Macy 1004eda14cbcSMatt Macy uint64_t 1005eda14cbcSMatt Macy metaslab_group_get_space(metaslab_group_t *mg) 1006eda14cbcSMatt Macy { 1007eda14cbcSMatt Macy return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count); 1008eda14cbcSMatt Macy } 1009eda14cbcSMatt Macy 1010eda14cbcSMatt Macy void 1011eda14cbcSMatt Macy metaslab_group_histogram_verify(metaslab_group_t *mg) 1012eda14cbcSMatt Macy { 1013eda14cbcSMatt Macy uint64_t *mg_hist; 1014eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 1015eda14cbcSMatt Macy uint64_t ashift = vd->vdev_ashift; 1016eda14cbcSMatt Macy int i; 1017eda14cbcSMatt Macy 1018eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 1019eda14cbcSMatt Macy return; 1020eda14cbcSMatt Macy 1021eda14cbcSMatt Macy mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 1022eda14cbcSMatt Macy KM_SLEEP); 1023eda14cbcSMatt Macy 1024eda14cbcSMatt Macy ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, 1025eda14cbcSMatt Macy SPACE_MAP_HISTOGRAM_SIZE + ashift); 1026eda14cbcSMatt Macy 1027eda14cbcSMatt Macy for (int m = 0; m < vd->vdev_ms_count; m++) { 1028eda14cbcSMatt Macy metaslab_t *msp = vd->vdev_ms[m]; 1029eda14cbcSMatt Macy 1030eda14cbcSMatt Macy /* skip if not active or not a member */ 1031eda14cbcSMatt Macy if (msp->ms_sm == NULL || msp->ms_group != mg) 1032eda14cbcSMatt Macy continue; 1033eda14cbcSMatt Macy 1034eda14cbcSMatt Macy for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 1035eda14cbcSMatt Macy mg_hist[i + ashift] += 1036eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1037eda14cbcSMatt Macy } 1038eda14cbcSMatt Macy 1039eda14cbcSMatt Macy for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) 1040eda14cbcSMatt Macy VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); 1041eda14cbcSMatt Macy 1042eda14cbcSMatt Macy kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 1043eda14cbcSMatt Macy } 1044eda14cbcSMatt Macy 1045eda14cbcSMatt Macy static void 1046eda14cbcSMatt Macy metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) 1047eda14cbcSMatt Macy { 1048eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 1049eda14cbcSMatt Macy uint64_t ashift = mg->mg_vd->vdev_ashift; 1050eda14cbcSMatt Macy 1051eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1052eda14cbcSMatt Macy if (msp->ms_sm == NULL) 1053eda14cbcSMatt Macy return; 1054eda14cbcSMatt Macy 1055eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1056eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1057eda14cbcSMatt Macy mg->mg_histogram[i + ashift] += 1058eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1059eda14cbcSMatt Macy mc->mc_histogram[i + ashift] += 1060eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1061eda14cbcSMatt Macy } 1062eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1063eda14cbcSMatt Macy } 1064eda14cbcSMatt Macy 1065eda14cbcSMatt Macy void 1066eda14cbcSMatt Macy metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) 1067eda14cbcSMatt Macy { 1068eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 1069eda14cbcSMatt Macy uint64_t ashift = mg->mg_vd->vdev_ashift; 1070eda14cbcSMatt Macy 1071eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1072eda14cbcSMatt Macy if (msp->ms_sm == NULL) 1073eda14cbcSMatt Macy return; 1074eda14cbcSMatt Macy 1075eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1076eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1077eda14cbcSMatt Macy ASSERT3U(mg->mg_histogram[i + ashift], >=, 1078eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]); 1079eda14cbcSMatt Macy ASSERT3U(mc->mc_histogram[i + ashift], >=, 1080eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]); 1081eda14cbcSMatt Macy 1082eda14cbcSMatt Macy mg->mg_histogram[i + ashift] -= 1083eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1084eda14cbcSMatt Macy mc->mc_histogram[i + ashift] -= 1085eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1086eda14cbcSMatt Macy } 1087eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1088eda14cbcSMatt Macy } 1089eda14cbcSMatt Macy 1090eda14cbcSMatt Macy static void 1091eda14cbcSMatt Macy metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 1092eda14cbcSMatt Macy { 1093eda14cbcSMatt Macy ASSERT(msp->ms_group == NULL); 1094eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1095eda14cbcSMatt Macy msp->ms_group = mg; 1096eda14cbcSMatt Macy msp->ms_weight = 0; 1097eda14cbcSMatt Macy avl_add(&mg->mg_metaslab_tree, msp); 1098eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1099eda14cbcSMatt Macy 1100eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 1101eda14cbcSMatt Macy metaslab_group_histogram_add(mg, msp); 1102eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 1103eda14cbcSMatt Macy } 1104eda14cbcSMatt Macy 1105eda14cbcSMatt Macy static void 1106eda14cbcSMatt Macy metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 1107eda14cbcSMatt Macy { 1108eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 1109eda14cbcSMatt Macy metaslab_group_histogram_remove(mg, msp); 1110eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 1111eda14cbcSMatt Macy 1112eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1113eda14cbcSMatt Macy ASSERT(msp->ms_group == mg); 1114eda14cbcSMatt Macy avl_remove(&mg->mg_metaslab_tree, msp); 1115eda14cbcSMatt Macy 1116eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 1117eda14cbcSMatt Macy multilist_sublist_t *mls = 1118eda14cbcSMatt Macy multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp); 1119eda14cbcSMatt Macy if (multilist_link_active(&msp->ms_class_txg_node)) 1120eda14cbcSMatt Macy multilist_sublist_remove(mls, msp); 1121eda14cbcSMatt Macy multilist_sublist_unlock(mls); 1122eda14cbcSMatt Macy 1123eda14cbcSMatt Macy msp->ms_group = NULL; 1124eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1125eda14cbcSMatt Macy } 1126eda14cbcSMatt Macy 1127eda14cbcSMatt Macy static void 1128eda14cbcSMatt Macy metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1129eda14cbcSMatt Macy { 1130eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1131eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mg->mg_lock)); 1132eda14cbcSMatt Macy ASSERT(msp->ms_group == mg); 1133eda14cbcSMatt Macy 1134eda14cbcSMatt Macy avl_remove(&mg->mg_metaslab_tree, msp); 1135eda14cbcSMatt Macy msp->ms_weight = weight; 1136eda14cbcSMatt Macy avl_add(&mg->mg_metaslab_tree, msp); 1137eda14cbcSMatt Macy 1138eda14cbcSMatt Macy } 1139eda14cbcSMatt Macy 1140eda14cbcSMatt Macy static void 1141eda14cbcSMatt Macy metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1142eda14cbcSMatt Macy { 1143eda14cbcSMatt Macy /* 1144eda14cbcSMatt Macy * Although in principle the weight can be any value, in 1145eda14cbcSMatt Macy * practice we do not use values in the range [1, 511]. 1146eda14cbcSMatt Macy */ 1147eda14cbcSMatt Macy ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); 1148eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1149eda14cbcSMatt Macy 1150eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1151eda14cbcSMatt Macy metaslab_group_sort_impl(mg, msp, weight); 1152eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1153eda14cbcSMatt Macy } 1154eda14cbcSMatt Macy 1155eda14cbcSMatt Macy /* 1156eda14cbcSMatt Macy * Calculate the fragmentation for a given metaslab group. We can use 1157eda14cbcSMatt Macy * a simple average here since all metaslabs within the group must have 1158eda14cbcSMatt Macy * the same size. The return value will be a value between 0 and 100 1159eda14cbcSMatt Macy * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this 1160eda14cbcSMatt Macy * group have a fragmentation metric. 1161eda14cbcSMatt Macy */ 1162eda14cbcSMatt Macy uint64_t 1163eda14cbcSMatt Macy metaslab_group_fragmentation(metaslab_group_t *mg) 1164eda14cbcSMatt Macy { 1165eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 1166eda14cbcSMatt Macy uint64_t fragmentation = 0; 1167eda14cbcSMatt Macy uint64_t valid_ms = 0; 1168eda14cbcSMatt Macy 1169eda14cbcSMatt Macy for (int m = 0; m < vd->vdev_ms_count; m++) { 1170eda14cbcSMatt Macy metaslab_t *msp = vd->vdev_ms[m]; 1171eda14cbcSMatt Macy 1172eda14cbcSMatt Macy if (msp->ms_fragmentation == ZFS_FRAG_INVALID) 1173eda14cbcSMatt Macy continue; 1174eda14cbcSMatt Macy if (msp->ms_group != mg) 1175eda14cbcSMatt Macy continue; 1176eda14cbcSMatt Macy 1177eda14cbcSMatt Macy valid_ms++; 1178eda14cbcSMatt Macy fragmentation += msp->ms_fragmentation; 1179eda14cbcSMatt Macy } 1180eda14cbcSMatt Macy 1181eda14cbcSMatt Macy if (valid_ms <= mg->mg_vd->vdev_ms_count / 2) 1182eda14cbcSMatt Macy return (ZFS_FRAG_INVALID); 1183eda14cbcSMatt Macy 1184eda14cbcSMatt Macy fragmentation /= valid_ms; 1185eda14cbcSMatt Macy ASSERT3U(fragmentation, <=, 100); 1186eda14cbcSMatt Macy return (fragmentation); 1187eda14cbcSMatt Macy } 1188eda14cbcSMatt Macy 1189eda14cbcSMatt Macy /* 1190eda14cbcSMatt Macy * Determine if a given metaslab group should skip allocations. A metaslab 1191eda14cbcSMatt Macy * group should avoid allocations if its free capacity is less than the 1192eda14cbcSMatt Macy * zfs_mg_noalloc_threshold or its fragmentation metric is greater than 1193eda14cbcSMatt Macy * zfs_mg_fragmentation_threshold and there is at least one metaslab group 1194eda14cbcSMatt Macy * that can still handle allocations. If the allocation throttle is enabled 1195eda14cbcSMatt Macy * then we skip allocations to devices that have reached their maximum 1196eda14cbcSMatt Macy * allocation queue depth unless the selected metaslab group is the only 1197eda14cbcSMatt Macy * eligible group remaining. 1198eda14cbcSMatt Macy */ 1199eda14cbcSMatt Macy static boolean_t 1200eda14cbcSMatt Macy metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, 1201eda14cbcSMatt Macy uint64_t psize, int allocator, int d) 1202eda14cbcSMatt Macy { 1203eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 1204eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 1205eda14cbcSMatt Macy 1206eda14cbcSMatt Macy /* 1207eda14cbcSMatt Macy * We can only consider skipping this metaslab group if it's 1208eda14cbcSMatt Macy * in the normal metaslab class and there are other metaslab 1209eda14cbcSMatt Macy * groups to select from. Otherwise, we always consider it eligible 1210eda14cbcSMatt Macy * for allocations. 1211eda14cbcSMatt Macy */ 1212eda14cbcSMatt Macy if ((mc != spa_normal_class(spa) && 1213eda14cbcSMatt Macy mc != spa_special_class(spa) && 1214eda14cbcSMatt Macy mc != spa_dedup_class(spa)) || 1215eda14cbcSMatt Macy mc->mc_groups <= 1) 1216eda14cbcSMatt Macy return (B_TRUE); 1217eda14cbcSMatt Macy 1218eda14cbcSMatt Macy /* 1219eda14cbcSMatt Macy * If the metaslab group's mg_allocatable flag is set (see comments 1220eda14cbcSMatt Macy * in metaslab_group_alloc_update() for more information) and 1221eda14cbcSMatt Macy * the allocation throttle is disabled then allow allocations to this 1222eda14cbcSMatt Macy * device. However, if the allocation throttle is enabled then 12237877fdebSMatt Macy * check if we have reached our allocation limit (mga_alloc_queue_depth) 1224eda14cbcSMatt Macy * to determine if we should allow allocations to this metaslab group. 1225eda14cbcSMatt Macy * If all metaslab groups are no longer considered allocatable 1226eda14cbcSMatt Macy * (mc_alloc_groups == 0) or we're trying to allocate the smallest 1227eda14cbcSMatt Macy * gang block size then we allow allocations on this metaslab group 1228eda14cbcSMatt Macy * regardless of the mg_allocatable or throttle settings. 1229eda14cbcSMatt Macy */ 1230eda14cbcSMatt Macy if (mg->mg_allocatable) { 1231eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 1232eda14cbcSMatt Macy int64_t qdepth; 1233eda14cbcSMatt Macy uint64_t qmax = mga->mga_cur_max_alloc_queue_depth; 1234eda14cbcSMatt Macy 1235eda14cbcSMatt Macy if (!mc->mc_alloc_throttle_enabled) 1236eda14cbcSMatt Macy return (B_TRUE); 1237eda14cbcSMatt Macy 1238eda14cbcSMatt Macy /* 1239eda14cbcSMatt Macy * If this metaslab group does not have any free space, then 1240eda14cbcSMatt Macy * there is no point in looking further. 1241eda14cbcSMatt Macy */ 1242eda14cbcSMatt Macy if (mg->mg_no_free_space) 1243eda14cbcSMatt Macy return (B_FALSE); 1244eda14cbcSMatt Macy 1245eda14cbcSMatt Macy /* 1246eda14cbcSMatt Macy * Relax allocation throttling for ditto blocks. Due to 1247eda14cbcSMatt Macy * random imbalances in allocation it tends to push copies 1248eda14cbcSMatt Macy * to one vdev, that looks a bit better at the moment. 1249eda14cbcSMatt Macy */ 1250eda14cbcSMatt Macy qmax = qmax * (4 + d) / 4; 1251eda14cbcSMatt Macy 1252eda14cbcSMatt Macy qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth); 1253eda14cbcSMatt Macy 1254eda14cbcSMatt Macy /* 1255eda14cbcSMatt Macy * If this metaslab group is below its qmax or it's 1256eda14cbcSMatt Macy * the only allocatable metasable group, then attempt 1257eda14cbcSMatt Macy * to allocate from it. 1258eda14cbcSMatt Macy */ 1259eda14cbcSMatt Macy if (qdepth < qmax || mc->mc_alloc_groups == 1) 1260eda14cbcSMatt Macy return (B_TRUE); 1261eda14cbcSMatt Macy ASSERT3U(mc->mc_alloc_groups, >, 1); 1262eda14cbcSMatt Macy 1263eda14cbcSMatt Macy /* 1264eda14cbcSMatt Macy * Since this metaslab group is at or over its qmax, we 1265eda14cbcSMatt Macy * need to determine if there are metaslab groups after this 1266eda14cbcSMatt Macy * one that might be able to handle this allocation. This is 1267eda14cbcSMatt Macy * racy since we can't hold the locks for all metaslab 1268eda14cbcSMatt Macy * groups at the same time when we make this check. 1269eda14cbcSMatt Macy */ 1270eda14cbcSMatt Macy for (metaslab_group_t *mgp = mg->mg_next; 1271eda14cbcSMatt Macy mgp != rotor; mgp = mgp->mg_next) { 1272eda14cbcSMatt Macy metaslab_group_allocator_t *mgap = 1273eda14cbcSMatt Macy &mgp->mg_allocator[allocator]; 1274eda14cbcSMatt Macy qmax = mgap->mga_cur_max_alloc_queue_depth; 1275eda14cbcSMatt Macy qmax = qmax * (4 + d) / 4; 1276eda14cbcSMatt Macy qdepth = 1277eda14cbcSMatt Macy zfs_refcount_count(&mgap->mga_alloc_queue_depth); 1278eda14cbcSMatt Macy 1279eda14cbcSMatt Macy /* 1280eda14cbcSMatt Macy * If there is another metaslab group that 1281eda14cbcSMatt Macy * might be able to handle the allocation, then 1282eda14cbcSMatt Macy * we return false so that we skip this group. 1283eda14cbcSMatt Macy */ 1284eda14cbcSMatt Macy if (qdepth < qmax && !mgp->mg_no_free_space) 1285eda14cbcSMatt Macy return (B_FALSE); 1286eda14cbcSMatt Macy } 1287eda14cbcSMatt Macy 1288eda14cbcSMatt Macy /* 1289eda14cbcSMatt Macy * We didn't find another group to handle the allocation 1290eda14cbcSMatt Macy * so we can't skip this metaslab group even though 1291eda14cbcSMatt Macy * we are at or over our qmax. 1292eda14cbcSMatt Macy */ 1293eda14cbcSMatt Macy return (B_TRUE); 1294eda14cbcSMatt Macy 1295eda14cbcSMatt Macy } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { 1296eda14cbcSMatt Macy return (B_TRUE); 1297eda14cbcSMatt Macy } 1298eda14cbcSMatt Macy return (B_FALSE); 1299eda14cbcSMatt Macy } 1300eda14cbcSMatt Macy 1301eda14cbcSMatt Macy /* 1302eda14cbcSMatt Macy * ========================================================================== 1303eda14cbcSMatt Macy * Range tree callbacks 1304eda14cbcSMatt Macy * ========================================================================== 1305eda14cbcSMatt Macy */ 1306eda14cbcSMatt Macy 1307eda14cbcSMatt Macy /* 1308eda14cbcSMatt Macy * Comparison function for the private size-ordered tree using 32-bit 1309eda14cbcSMatt Macy * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1310eda14cbcSMatt Macy */ 1311eda14cbcSMatt Macy static int 1312eda14cbcSMatt Macy metaslab_rangesize32_compare(const void *x1, const void *x2) 1313eda14cbcSMatt Macy { 1314eda14cbcSMatt Macy const range_seg32_t *r1 = x1; 1315eda14cbcSMatt Macy const range_seg32_t *r2 = x2; 1316eda14cbcSMatt Macy 1317eda14cbcSMatt Macy uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1318eda14cbcSMatt Macy uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1319eda14cbcSMatt Macy 1320eda14cbcSMatt Macy int cmp = TREE_CMP(rs_size1, rs_size2); 1321eda14cbcSMatt Macy if (likely(cmp)) 1322eda14cbcSMatt Macy return (cmp); 1323eda14cbcSMatt Macy 1324eda14cbcSMatt Macy return (TREE_CMP(r1->rs_start, r2->rs_start)); 1325eda14cbcSMatt Macy } 1326eda14cbcSMatt Macy 1327eda14cbcSMatt Macy /* 1328eda14cbcSMatt Macy * Comparison function for the private size-ordered tree using 64-bit 1329eda14cbcSMatt Macy * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1330eda14cbcSMatt Macy */ 1331eda14cbcSMatt Macy static int 1332eda14cbcSMatt Macy metaslab_rangesize64_compare(const void *x1, const void *x2) 1333eda14cbcSMatt Macy { 1334eda14cbcSMatt Macy const range_seg64_t *r1 = x1; 1335eda14cbcSMatt Macy const range_seg64_t *r2 = x2; 1336eda14cbcSMatt Macy 1337eda14cbcSMatt Macy uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1338eda14cbcSMatt Macy uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1339eda14cbcSMatt Macy 1340eda14cbcSMatt Macy int cmp = TREE_CMP(rs_size1, rs_size2); 1341eda14cbcSMatt Macy if (likely(cmp)) 1342eda14cbcSMatt Macy return (cmp); 1343eda14cbcSMatt Macy 1344eda14cbcSMatt Macy return (TREE_CMP(r1->rs_start, r2->rs_start)); 1345eda14cbcSMatt Macy } 1346eda14cbcSMatt Macy typedef struct metaslab_rt_arg { 1347eda14cbcSMatt Macy zfs_btree_t *mra_bt; 1348eda14cbcSMatt Macy uint32_t mra_floor_shift; 1349eda14cbcSMatt Macy } metaslab_rt_arg_t; 1350eda14cbcSMatt Macy 1351eda14cbcSMatt Macy struct mssa_arg { 1352eda14cbcSMatt Macy range_tree_t *rt; 1353eda14cbcSMatt Macy metaslab_rt_arg_t *mra; 1354eda14cbcSMatt Macy }; 1355eda14cbcSMatt Macy 1356eda14cbcSMatt Macy static void 1357eda14cbcSMatt Macy metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size) 1358eda14cbcSMatt Macy { 1359eda14cbcSMatt Macy struct mssa_arg *mssap = arg; 1360eda14cbcSMatt Macy range_tree_t *rt = mssap->rt; 1361eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = mssap->mra; 1362eda14cbcSMatt Macy range_seg_max_t seg = {0}; 1363eda14cbcSMatt Macy rs_set_start(&seg, rt, start); 1364eda14cbcSMatt Macy rs_set_end(&seg, rt, start + size); 1365eda14cbcSMatt Macy metaslab_rt_add(rt, &seg, mrap); 1366eda14cbcSMatt Macy } 1367eda14cbcSMatt Macy 1368eda14cbcSMatt Macy static void 1369eda14cbcSMatt Macy metaslab_size_tree_full_load(range_tree_t *rt) 1370eda14cbcSMatt Macy { 1371eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = rt->rt_arg; 1372eda14cbcSMatt Macy METASLABSTAT_BUMP(metaslabstat_reload_tree); 1373eda14cbcSMatt Macy ASSERT0(zfs_btree_numnodes(mrap->mra_bt)); 1374eda14cbcSMatt Macy mrap->mra_floor_shift = 0; 1375eda14cbcSMatt Macy struct mssa_arg arg = {0}; 1376eda14cbcSMatt Macy arg.rt = rt; 1377eda14cbcSMatt Macy arg.mra = mrap; 1378eda14cbcSMatt Macy range_tree_walk(rt, metaslab_size_sorted_add, &arg); 1379eda14cbcSMatt Macy } 1380eda14cbcSMatt Macy 1381eda14cbcSMatt Macy /* 1382eda14cbcSMatt Macy * Create any block allocator specific components. The current allocators 1383eda14cbcSMatt Macy * rely on using both a size-ordered range_tree_t and an array of uint64_t's. 1384eda14cbcSMatt Macy */ 1385eda14cbcSMatt Macy /* ARGSUSED */ 1386eda14cbcSMatt Macy static void 1387eda14cbcSMatt Macy metaslab_rt_create(range_tree_t *rt, void *arg) 1388eda14cbcSMatt Macy { 1389eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1390eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1391eda14cbcSMatt Macy 1392eda14cbcSMatt Macy size_t size; 1393eda14cbcSMatt Macy int (*compare) (const void *, const void *); 1394eda14cbcSMatt Macy switch (rt->rt_type) { 1395eda14cbcSMatt Macy case RANGE_SEG32: 1396eda14cbcSMatt Macy size = sizeof (range_seg32_t); 1397eda14cbcSMatt Macy compare = metaslab_rangesize32_compare; 1398eda14cbcSMatt Macy break; 1399eda14cbcSMatt Macy case RANGE_SEG64: 1400eda14cbcSMatt Macy size = sizeof (range_seg64_t); 1401eda14cbcSMatt Macy compare = metaslab_rangesize64_compare; 1402eda14cbcSMatt Macy break; 1403eda14cbcSMatt Macy default: 1404eda14cbcSMatt Macy panic("Invalid range seg type %d", rt->rt_type); 1405eda14cbcSMatt Macy } 1406eda14cbcSMatt Macy zfs_btree_create(size_tree, compare, size); 1407eda14cbcSMatt Macy mrap->mra_floor_shift = metaslab_by_size_min_shift; 1408eda14cbcSMatt Macy } 1409eda14cbcSMatt Macy 1410eda14cbcSMatt Macy /* ARGSUSED */ 1411eda14cbcSMatt Macy static void 1412eda14cbcSMatt Macy metaslab_rt_destroy(range_tree_t *rt, void *arg) 1413eda14cbcSMatt Macy { 1414eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1415eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1416eda14cbcSMatt Macy 1417eda14cbcSMatt Macy zfs_btree_destroy(size_tree); 1418eda14cbcSMatt Macy kmem_free(mrap, sizeof (*mrap)); 1419eda14cbcSMatt Macy } 1420eda14cbcSMatt Macy 1421eda14cbcSMatt Macy /* ARGSUSED */ 1422eda14cbcSMatt Macy static void 1423eda14cbcSMatt Macy metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) 1424eda14cbcSMatt Macy { 1425eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1426eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1427eda14cbcSMatt Macy 1428eda14cbcSMatt Macy if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < 1429eda14cbcSMatt Macy (1 << mrap->mra_floor_shift)) 1430eda14cbcSMatt Macy return; 1431eda14cbcSMatt Macy 1432eda14cbcSMatt Macy zfs_btree_add(size_tree, rs); 1433eda14cbcSMatt Macy } 1434eda14cbcSMatt Macy 1435eda14cbcSMatt Macy /* ARGSUSED */ 1436eda14cbcSMatt Macy static void 1437eda14cbcSMatt Macy metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) 1438eda14cbcSMatt Macy { 1439eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1440eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1441eda14cbcSMatt Macy 1442eda14cbcSMatt Macy if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1 << 1443eda14cbcSMatt Macy mrap->mra_floor_shift)) 1444eda14cbcSMatt Macy return; 1445eda14cbcSMatt Macy 1446eda14cbcSMatt Macy zfs_btree_remove(size_tree, rs); 1447eda14cbcSMatt Macy } 1448eda14cbcSMatt Macy 1449eda14cbcSMatt Macy /* ARGSUSED */ 1450eda14cbcSMatt Macy static void 1451eda14cbcSMatt Macy metaslab_rt_vacate(range_tree_t *rt, void *arg) 1452eda14cbcSMatt Macy { 1453eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1454eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1455eda14cbcSMatt Macy zfs_btree_clear(size_tree); 1456eda14cbcSMatt Macy zfs_btree_destroy(size_tree); 1457eda14cbcSMatt Macy 1458eda14cbcSMatt Macy metaslab_rt_create(rt, arg); 1459eda14cbcSMatt Macy } 1460eda14cbcSMatt Macy 1461eda14cbcSMatt Macy static range_tree_ops_t metaslab_rt_ops = { 1462eda14cbcSMatt Macy .rtop_create = metaslab_rt_create, 1463eda14cbcSMatt Macy .rtop_destroy = metaslab_rt_destroy, 1464eda14cbcSMatt Macy .rtop_add = metaslab_rt_add, 1465eda14cbcSMatt Macy .rtop_remove = metaslab_rt_remove, 1466eda14cbcSMatt Macy .rtop_vacate = metaslab_rt_vacate 1467eda14cbcSMatt Macy }; 1468eda14cbcSMatt Macy 1469eda14cbcSMatt Macy /* 1470eda14cbcSMatt Macy * ========================================================================== 1471eda14cbcSMatt Macy * Common allocator routines 1472eda14cbcSMatt Macy * ========================================================================== 1473eda14cbcSMatt Macy */ 1474eda14cbcSMatt Macy 1475eda14cbcSMatt Macy /* 1476eda14cbcSMatt Macy * Return the maximum contiguous segment within the metaslab. 1477eda14cbcSMatt Macy */ 1478eda14cbcSMatt Macy uint64_t 1479eda14cbcSMatt Macy metaslab_largest_allocatable(metaslab_t *msp) 1480eda14cbcSMatt Macy { 1481eda14cbcSMatt Macy zfs_btree_t *t = &msp->ms_allocatable_by_size; 1482eda14cbcSMatt Macy range_seg_t *rs; 1483eda14cbcSMatt Macy 1484eda14cbcSMatt Macy if (t == NULL) 1485eda14cbcSMatt Macy return (0); 1486eda14cbcSMatt Macy if (zfs_btree_numnodes(t) == 0) 1487eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_allocatable); 1488eda14cbcSMatt Macy 1489eda14cbcSMatt Macy rs = zfs_btree_last(t, NULL); 1490eda14cbcSMatt Macy if (rs == NULL) 1491eda14cbcSMatt Macy return (0); 1492eda14cbcSMatt Macy 1493eda14cbcSMatt Macy return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs, 1494eda14cbcSMatt Macy msp->ms_allocatable)); 1495eda14cbcSMatt Macy } 1496eda14cbcSMatt Macy 1497eda14cbcSMatt Macy /* 1498eda14cbcSMatt Macy * Return the maximum contiguous segment within the unflushed frees of this 1499eda14cbcSMatt Macy * metaslab. 1500eda14cbcSMatt Macy */ 1501eda14cbcSMatt Macy static uint64_t 1502eda14cbcSMatt Macy metaslab_largest_unflushed_free(metaslab_t *msp) 1503eda14cbcSMatt Macy { 1504eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1505eda14cbcSMatt Macy 1506eda14cbcSMatt Macy if (msp->ms_unflushed_frees == NULL) 1507eda14cbcSMatt Macy return (0); 1508eda14cbcSMatt Macy 1509eda14cbcSMatt Macy if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0) 1510eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_unflushed_frees); 1511eda14cbcSMatt Macy range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size, 1512eda14cbcSMatt Macy NULL); 1513eda14cbcSMatt Macy if (rs == NULL) 1514eda14cbcSMatt Macy return (0); 1515eda14cbcSMatt Macy 1516eda14cbcSMatt Macy /* 1517eda14cbcSMatt Macy * When a range is freed from the metaslab, that range is added to 1518eda14cbcSMatt Macy * both the unflushed frees and the deferred frees. While the block 1519eda14cbcSMatt Macy * will eventually be usable, if the metaslab were loaded the range 1520eda14cbcSMatt Macy * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE 1521eda14cbcSMatt Macy * txgs had passed. As a result, when attempting to estimate an upper 1522eda14cbcSMatt Macy * bound for the largest currently-usable free segment in the 1523eda14cbcSMatt Macy * metaslab, we need to not consider any ranges currently in the defer 1524eda14cbcSMatt Macy * trees. This algorithm approximates the largest available chunk in 1525eda14cbcSMatt Macy * the largest range in the unflushed_frees tree by taking the first 1526eda14cbcSMatt Macy * chunk. While this may be a poor estimate, it should only remain so 1527eda14cbcSMatt Macy * briefly and should eventually self-correct as frees are no longer 1528eda14cbcSMatt Macy * deferred. Similar logic applies to the ms_freed tree. See 1529eda14cbcSMatt Macy * metaslab_load() for more details. 1530eda14cbcSMatt Macy * 1531eda14cbcSMatt Macy * There are two primary sources of inaccuracy in this estimate. Both 1532eda14cbcSMatt Macy * are tolerated for performance reasons. The first source is that we 1533eda14cbcSMatt Macy * only check the largest segment for overlaps. Smaller segments may 1534eda14cbcSMatt Macy * have more favorable overlaps with the other trees, resulting in 1535eda14cbcSMatt Macy * larger usable chunks. Second, we only look at the first chunk in 1536eda14cbcSMatt Macy * the largest segment; there may be other usable chunks in the 1537eda14cbcSMatt Macy * largest segment, but we ignore them. 1538eda14cbcSMatt Macy */ 1539eda14cbcSMatt Macy uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees); 1540eda14cbcSMatt Macy uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart; 1541eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1542eda14cbcSMatt Macy uint64_t start = 0; 1543eda14cbcSMatt Macy uint64_t size = 0; 1544eda14cbcSMatt Macy boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart, 1545eda14cbcSMatt Macy rsize, &start, &size); 1546eda14cbcSMatt Macy if (found) { 1547eda14cbcSMatt Macy if (rstart == start) 1548eda14cbcSMatt Macy return (0); 1549eda14cbcSMatt Macy rsize = start - rstart; 1550eda14cbcSMatt Macy } 1551eda14cbcSMatt Macy } 1552eda14cbcSMatt Macy 1553eda14cbcSMatt Macy uint64_t start = 0; 1554eda14cbcSMatt Macy uint64_t size = 0; 1555eda14cbcSMatt Macy boolean_t found = range_tree_find_in(msp->ms_freed, rstart, 1556eda14cbcSMatt Macy rsize, &start, &size); 1557eda14cbcSMatt Macy if (found) 1558eda14cbcSMatt Macy rsize = start - rstart; 1559eda14cbcSMatt Macy 1560eda14cbcSMatt Macy return (rsize); 1561eda14cbcSMatt Macy } 1562eda14cbcSMatt Macy 1563eda14cbcSMatt Macy static range_seg_t * 1564eda14cbcSMatt Macy metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start, 1565eda14cbcSMatt Macy uint64_t size, zfs_btree_index_t *where) 1566eda14cbcSMatt Macy { 1567eda14cbcSMatt Macy range_seg_t *rs; 1568eda14cbcSMatt Macy range_seg_max_t rsearch; 1569eda14cbcSMatt Macy 1570eda14cbcSMatt Macy rs_set_start(&rsearch, rt, start); 1571eda14cbcSMatt Macy rs_set_end(&rsearch, rt, start + size); 1572eda14cbcSMatt Macy 1573eda14cbcSMatt Macy rs = zfs_btree_find(t, &rsearch, where); 1574eda14cbcSMatt Macy if (rs == NULL) { 1575eda14cbcSMatt Macy rs = zfs_btree_next(t, where, where); 1576eda14cbcSMatt Macy } 1577eda14cbcSMatt Macy 1578eda14cbcSMatt Macy return (rs); 1579eda14cbcSMatt Macy } 1580eda14cbcSMatt Macy 1581eda14cbcSMatt Macy #if defined(WITH_DF_BLOCK_ALLOCATOR) || \ 1582eda14cbcSMatt Macy defined(WITH_CF_BLOCK_ALLOCATOR) 15837877fdebSMatt Macy 1584eda14cbcSMatt Macy /* 1585eda14cbcSMatt Macy * This is a helper function that can be used by the allocator to find a 1586eda14cbcSMatt Macy * suitable block to allocate. This will search the specified B-tree looking 1587eda14cbcSMatt Macy * for a block that matches the specified criteria. 1588eda14cbcSMatt Macy */ 1589eda14cbcSMatt Macy static uint64_t 1590eda14cbcSMatt Macy metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size, 1591eda14cbcSMatt Macy uint64_t max_search) 1592eda14cbcSMatt Macy { 1593eda14cbcSMatt Macy if (*cursor == 0) 1594eda14cbcSMatt Macy *cursor = rt->rt_start; 1595eda14cbcSMatt Macy zfs_btree_t *bt = &rt->rt_root; 1596eda14cbcSMatt Macy zfs_btree_index_t where; 1597eda14cbcSMatt Macy range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where); 1598eda14cbcSMatt Macy uint64_t first_found; 1599eda14cbcSMatt Macy int count_searched = 0; 1600eda14cbcSMatt Macy 1601eda14cbcSMatt Macy if (rs != NULL) 1602eda14cbcSMatt Macy first_found = rs_get_start(rs, rt); 1603eda14cbcSMatt Macy 1604eda14cbcSMatt Macy while (rs != NULL && (rs_get_start(rs, rt) - first_found <= 1605eda14cbcSMatt Macy max_search || count_searched < metaslab_min_search_count)) { 1606eda14cbcSMatt Macy uint64_t offset = rs_get_start(rs, rt); 1607eda14cbcSMatt Macy if (offset + size <= rs_get_end(rs, rt)) { 1608eda14cbcSMatt Macy *cursor = offset + size; 1609eda14cbcSMatt Macy return (offset); 1610eda14cbcSMatt Macy } 1611eda14cbcSMatt Macy rs = zfs_btree_next(bt, &where, &where); 1612eda14cbcSMatt Macy count_searched++; 1613eda14cbcSMatt Macy } 1614eda14cbcSMatt Macy 1615eda14cbcSMatt Macy *cursor = 0; 1616eda14cbcSMatt Macy return (-1ULL); 1617eda14cbcSMatt Macy } 1618eda14cbcSMatt Macy #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */ 1619eda14cbcSMatt Macy 1620eda14cbcSMatt Macy #if defined(WITH_DF_BLOCK_ALLOCATOR) 1621eda14cbcSMatt Macy /* 1622eda14cbcSMatt Macy * ========================================================================== 1623eda14cbcSMatt Macy * Dynamic Fit (df) block allocator 1624eda14cbcSMatt Macy * 1625eda14cbcSMatt Macy * Search for a free chunk of at least this size, starting from the last 1626eda14cbcSMatt Macy * offset (for this alignment of block) looking for up to 1627eda14cbcSMatt Macy * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not 1628eda14cbcSMatt Macy * found within 16MB, then return a free chunk of exactly the requested size (or 1629eda14cbcSMatt Macy * larger). 1630eda14cbcSMatt Macy * 1631eda14cbcSMatt Macy * If it seems like searching from the last offset will be unproductive, skip 1632eda14cbcSMatt Macy * that and just return a free chunk of exactly the requested size (or larger). 1633eda14cbcSMatt Macy * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This 1634eda14cbcSMatt Macy * mechanism is probably not very useful and may be removed in the future. 1635eda14cbcSMatt Macy * 1636eda14cbcSMatt Macy * The behavior when not searching can be changed to return the largest free 1637eda14cbcSMatt Macy * chunk, instead of a free chunk of exactly the requested size, by setting 1638eda14cbcSMatt Macy * metaslab_df_use_largest_segment. 1639eda14cbcSMatt Macy * ========================================================================== 1640eda14cbcSMatt Macy */ 1641eda14cbcSMatt Macy static uint64_t 1642eda14cbcSMatt Macy metaslab_df_alloc(metaslab_t *msp, uint64_t size) 1643eda14cbcSMatt Macy { 1644eda14cbcSMatt Macy /* 1645eda14cbcSMatt Macy * Find the largest power of 2 block size that evenly divides the 1646eda14cbcSMatt Macy * requested size. This is used to try to allocate blocks with similar 1647eda14cbcSMatt Macy * alignment from the same area of the metaslab (i.e. same cursor 1648eda14cbcSMatt Macy * bucket) but it does not guarantee that other allocations sizes 1649eda14cbcSMatt Macy * may exist in the same region. 1650eda14cbcSMatt Macy */ 1651eda14cbcSMatt Macy uint64_t align = size & -size; 1652eda14cbcSMatt Macy uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 1653eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 1654eda14cbcSMatt Macy int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 1655eda14cbcSMatt Macy uint64_t offset; 1656eda14cbcSMatt Macy 1657eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1658eda14cbcSMatt Macy 1659eda14cbcSMatt Macy /* 1660eda14cbcSMatt Macy * If we're running low on space, find a segment based on size, 1661eda14cbcSMatt Macy * rather than iterating based on offset. 1662eda14cbcSMatt Macy */ 1663eda14cbcSMatt Macy if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold || 1664eda14cbcSMatt Macy free_pct < metaslab_df_free_pct) { 1665eda14cbcSMatt Macy offset = -1; 1666eda14cbcSMatt Macy } else { 1667eda14cbcSMatt Macy offset = metaslab_block_picker(rt, 1668eda14cbcSMatt Macy cursor, size, metaslab_df_max_search); 1669eda14cbcSMatt Macy } 1670eda14cbcSMatt Macy 1671eda14cbcSMatt Macy if (offset == -1) { 1672eda14cbcSMatt Macy range_seg_t *rs; 1673eda14cbcSMatt Macy if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0) 1674eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_allocatable); 16757877fdebSMatt Macy 1676eda14cbcSMatt Macy if (metaslab_df_use_largest_segment) { 1677eda14cbcSMatt Macy /* use largest free segment */ 1678eda14cbcSMatt Macy rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL); 1679eda14cbcSMatt Macy } else { 1680eda14cbcSMatt Macy zfs_btree_index_t where; 1681eda14cbcSMatt Macy /* use segment of this size, or next largest */ 1682eda14cbcSMatt Macy rs = metaslab_block_find(&msp->ms_allocatable_by_size, 1683eda14cbcSMatt Macy rt, msp->ms_start, size, &where); 1684eda14cbcSMatt Macy } 1685eda14cbcSMatt Macy if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs, 1686eda14cbcSMatt Macy rt)) { 1687eda14cbcSMatt Macy offset = rs_get_start(rs, rt); 1688eda14cbcSMatt Macy *cursor = offset + size; 1689eda14cbcSMatt Macy } 1690eda14cbcSMatt Macy } 1691eda14cbcSMatt Macy 1692eda14cbcSMatt Macy return (offset); 1693eda14cbcSMatt Macy } 1694eda14cbcSMatt Macy 1695eda14cbcSMatt Macy static metaslab_ops_t metaslab_df_ops = { 1696eda14cbcSMatt Macy metaslab_df_alloc 1697eda14cbcSMatt Macy }; 1698eda14cbcSMatt Macy 1699eda14cbcSMatt Macy metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; 1700eda14cbcSMatt Macy #endif /* WITH_DF_BLOCK_ALLOCATOR */ 1701eda14cbcSMatt Macy 1702eda14cbcSMatt Macy #if defined(WITH_CF_BLOCK_ALLOCATOR) 1703eda14cbcSMatt Macy /* 1704eda14cbcSMatt Macy * ========================================================================== 1705eda14cbcSMatt Macy * Cursor fit block allocator - 1706eda14cbcSMatt Macy * Select the largest region in the metaslab, set the cursor to the beginning 1707eda14cbcSMatt Macy * of the range and the cursor_end to the end of the range. As allocations 1708eda14cbcSMatt Macy * are made advance the cursor. Continue allocating from the cursor until 1709eda14cbcSMatt Macy * the range is exhausted and then find a new range. 1710eda14cbcSMatt Macy * ========================================================================== 1711eda14cbcSMatt Macy */ 1712eda14cbcSMatt Macy static uint64_t 1713eda14cbcSMatt Macy metaslab_cf_alloc(metaslab_t *msp, uint64_t size) 1714eda14cbcSMatt Macy { 1715eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 1716eda14cbcSMatt Macy zfs_btree_t *t = &msp->ms_allocatable_by_size; 1717eda14cbcSMatt Macy uint64_t *cursor = &msp->ms_lbas[0]; 1718eda14cbcSMatt Macy uint64_t *cursor_end = &msp->ms_lbas[1]; 1719eda14cbcSMatt Macy uint64_t offset = 0; 1720eda14cbcSMatt Macy 1721eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1722eda14cbcSMatt Macy 1723eda14cbcSMatt Macy ASSERT3U(*cursor_end, >=, *cursor); 1724eda14cbcSMatt Macy 1725eda14cbcSMatt Macy if ((*cursor + size) > *cursor_end) { 1726eda14cbcSMatt Macy range_seg_t *rs; 1727eda14cbcSMatt Macy 1728eda14cbcSMatt Macy if (zfs_btree_numnodes(t) == 0) 1729eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_allocatable); 1730eda14cbcSMatt Macy rs = zfs_btree_last(t, NULL); 1731eda14cbcSMatt Macy if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < 1732eda14cbcSMatt Macy size) 1733eda14cbcSMatt Macy return (-1ULL); 1734eda14cbcSMatt Macy 1735eda14cbcSMatt Macy *cursor = rs_get_start(rs, rt); 1736eda14cbcSMatt Macy *cursor_end = rs_get_end(rs, rt); 1737eda14cbcSMatt Macy } 1738eda14cbcSMatt Macy 1739eda14cbcSMatt Macy offset = *cursor; 1740eda14cbcSMatt Macy *cursor += size; 1741eda14cbcSMatt Macy 1742eda14cbcSMatt Macy return (offset); 1743eda14cbcSMatt Macy } 1744eda14cbcSMatt Macy 1745eda14cbcSMatt Macy static metaslab_ops_t metaslab_cf_ops = { 1746eda14cbcSMatt Macy metaslab_cf_alloc 1747eda14cbcSMatt Macy }; 1748eda14cbcSMatt Macy 1749eda14cbcSMatt Macy metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops; 1750eda14cbcSMatt Macy #endif /* WITH_CF_BLOCK_ALLOCATOR */ 1751eda14cbcSMatt Macy 1752eda14cbcSMatt Macy #if defined(WITH_NDF_BLOCK_ALLOCATOR) 1753eda14cbcSMatt Macy /* 1754eda14cbcSMatt Macy * ========================================================================== 1755eda14cbcSMatt Macy * New dynamic fit allocator - 1756eda14cbcSMatt Macy * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift 1757eda14cbcSMatt Macy * contiguous blocks. If no region is found then just use the largest segment 1758eda14cbcSMatt Macy * that remains. 1759eda14cbcSMatt Macy * ========================================================================== 1760eda14cbcSMatt Macy */ 1761eda14cbcSMatt Macy 1762eda14cbcSMatt Macy /* 1763eda14cbcSMatt Macy * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) 1764eda14cbcSMatt Macy * to request from the allocator. 1765eda14cbcSMatt Macy */ 1766eda14cbcSMatt Macy uint64_t metaslab_ndf_clump_shift = 4; 1767eda14cbcSMatt Macy 1768eda14cbcSMatt Macy static uint64_t 1769eda14cbcSMatt Macy metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) 1770eda14cbcSMatt Macy { 1771eda14cbcSMatt Macy zfs_btree_t *t = &msp->ms_allocatable->rt_root; 1772eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 1773eda14cbcSMatt Macy zfs_btree_index_t where; 1774eda14cbcSMatt Macy range_seg_t *rs; 1775eda14cbcSMatt Macy range_seg_max_t rsearch; 1776eda14cbcSMatt Macy uint64_t hbit = highbit64(size); 1777eda14cbcSMatt Macy uint64_t *cursor = &msp->ms_lbas[hbit - 1]; 1778eda14cbcSMatt Macy uint64_t max_size = metaslab_largest_allocatable(msp); 1779eda14cbcSMatt Macy 1780eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1781eda14cbcSMatt Macy 1782eda14cbcSMatt Macy if (max_size < size) 1783eda14cbcSMatt Macy return (-1ULL); 1784eda14cbcSMatt Macy 1785eda14cbcSMatt Macy rs_set_start(&rsearch, rt, *cursor); 1786eda14cbcSMatt Macy rs_set_end(&rsearch, rt, *cursor + size); 1787eda14cbcSMatt Macy 1788eda14cbcSMatt Macy rs = zfs_btree_find(t, &rsearch, &where); 1789eda14cbcSMatt Macy if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) { 1790eda14cbcSMatt Macy t = &msp->ms_allocatable_by_size; 1791eda14cbcSMatt Macy 1792eda14cbcSMatt Macy rs_set_start(&rsearch, rt, 0); 1793eda14cbcSMatt Macy rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit + 1794eda14cbcSMatt Macy metaslab_ndf_clump_shift))); 1795eda14cbcSMatt Macy 1796eda14cbcSMatt Macy rs = zfs_btree_find(t, &rsearch, &where); 1797eda14cbcSMatt Macy if (rs == NULL) 1798eda14cbcSMatt Macy rs = zfs_btree_next(t, &where, &where); 1799eda14cbcSMatt Macy ASSERT(rs != NULL); 1800eda14cbcSMatt Macy } 1801eda14cbcSMatt Macy 1802eda14cbcSMatt Macy if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) { 1803eda14cbcSMatt Macy *cursor = rs_get_start(rs, rt) + size; 1804eda14cbcSMatt Macy return (rs_get_start(rs, rt)); 1805eda14cbcSMatt Macy } 1806eda14cbcSMatt Macy return (-1ULL); 1807eda14cbcSMatt Macy } 1808eda14cbcSMatt Macy 1809eda14cbcSMatt Macy static metaslab_ops_t metaslab_ndf_ops = { 1810eda14cbcSMatt Macy metaslab_ndf_alloc 1811eda14cbcSMatt Macy }; 1812eda14cbcSMatt Macy 1813eda14cbcSMatt Macy metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops; 1814eda14cbcSMatt Macy #endif /* WITH_NDF_BLOCK_ALLOCATOR */ 1815eda14cbcSMatt Macy 1816eda14cbcSMatt Macy 1817eda14cbcSMatt Macy /* 1818eda14cbcSMatt Macy * ========================================================================== 1819eda14cbcSMatt Macy * Metaslabs 1820eda14cbcSMatt Macy * ========================================================================== 1821eda14cbcSMatt Macy */ 1822eda14cbcSMatt Macy 1823eda14cbcSMatt Macy /* 1824eda14cbcSMatt Macy * Wait for any in-progress metaslab loads to complete. 1825eda14cbcSMatt Macy */ 1826eda14cbcSMatt Macy static void 1827eda14cbcSMatt Macy metaslab_load_wait(metaslab_t *msp) 1828eda14cbcSMatt Macy { 1829eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1830eda14cbcSMatt Macy 1831eda14cbcSMatt Macy while (msp->ms_loading) { 1832eda14cbcSMatt Macy ASSERT(!msp->ms_loaded); 1833eda14cbcSMatt Macy cv_wait(&msp->ms_load_cv, &msp->ms_lock); 1834eda14cbcSMatt Macy } 1835eda14cbcSMatt Macy } 1836eda14cbcSMatt Macy 1837eda14cbcSMatt Macy /* 1838eda14cbcSMatt Macy * Wait for any in-progress flushing to complete. 1839eda14cbcSMatt Macy */ 1840eda14cbcSMatt Macy static void 1841eda14cbcSMatt Macy metaslab_flush_wait(metaslab_t *msp) 1842eda14cbcSMatt Macy { 1843eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1844eda14cbcSMatt Macy 1845eda14cbcSMatt Macy while (msp->ms_flushing) 1846eda14cbcSMatt Macy cv_wait(&msp->ms_flush_cv, &msp->ms_lock); 1847eda14cbcSMatt Macy } 1848eda14cbcSMatt Macy 1849eda14cbcSMatt Macy static unsigned int 1850eda14cbcSMatt Macy metaslab_idx_func(multilist_t *ml, void *arg) 1851eda14cbcSMatt Macy { 1852eda14cbcSMatt Macy metaslab_t *msp = arg; 1853eda14cbcSMatt Macy return (msp->ms_id % multilist_get_num_sublists(ml)); 1854eda14cbcSMatt Macy } 1855eda14cbcSMatt Macy 1856eda14cbcSMatt Macy uint64_t 1857eda14cbcSMatt Macy metaslab_allocated_space(metaslab_t *msp) 1858eda14cbcSMatt Macy { 1859eda14cbcSMatt Macy return (msp->ms_allocated_space); 1860eda14cbcSMatt Macy } 1861eda14cbcSMatt Macy 1862eda14cbcSMatt Macy /* 1863eda14cbcSMatt Macy * Verify that the space accounting on disk matches the in-core range_trees. 1864eda14cbcSMatt Macy */ 1865eda14cbcSMatt Macy static void 1866eda14cbcSMatt Macy metaslab_verify_space(metaslab_t *msp, uint64_t txg) 1867eda14cbcSMatt Macy { 1868eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1869eda14cbcSMatt Macy uint64_t allocating = 0; 1870eda14cbcSMatt Macy uint64_t sm_free_space, msp_free_space; 1871eda14cbcSMatt Macy 1872eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1873eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 1874eda14cbcSMatt Macy 1875eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 1876eda14cbcSMatt Macy return; 1877eda14cbcSMatt Macy 1878eda14cbcSMatt Macy /* 1879eda14cbcSMatt Macy * We can only verify the metaslab space when we're called 1880eda14cbcSMatt Macy * from syncing context with a loaded metaslab that has an 1881eda14cbcSMatt Macy * allocated space map. Calling this in non-syncing context 1882eda14cbcSMatt Macy * does not provide a consistent view of the metaslab since 1883eda14cbcSMatt Macy * we're performing allocations in the future. 1884eda14cbcSMatt Macy */ 1885eda14cbcSMatt Macy if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || 1886eda14cbcSMatt Macy !msp->ms_loaded) 1887eda14cbcSMatt Macy return; 1888eda14cbcSMatt Macy 1889eda14cbcSMatt Macy /* 1890eda14cbcSMatt Macy * Even though the smp_alloc field can get negative, 1891eda14cbcSMatt Macy * when it comes to a metaslab's space map, that should 1892eda14cbcSMatt Macy * never be the case. 1893eda14cbcSMatt Macy */ 1894eda14cbcSMatt Macy ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0); 1895eda14cbcSMatt Macy 1896eda14cbcSMatt Macy ASSERT3U(space_map_allocated(msp->ms_sm), >=, 1897eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_frees)); 1898eda14cbcSMatt Macy 1899eda14cbcSMatt Macy ASSERT3U(metaslab_allocated_space(msp), ==, 1900eda14cbcSMatt Macy space_map_allocated(msp->ms_sm) + 1901eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_allocs) - 1902eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_frees)); 1903eda14cbcSMatt Macy 1904eda14cbcSMatt Macy sm_free_space = msp->ms_size - metaslab_allocated_space(msp); 1905eda14cbcSMatt Macy 1906eda14cbcSMatt Macy /* 1907eda14cbcSMatt Macy * Account for future allocations since we would have 1908eda14cbcSMatt Macy * already deducted that space from the ms_allocatable. 1909eda14cbcSMatt Macy */ 1910eda14cbcSMatt Macy for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 1911eda14cbcSMatt Macy allocating += 1912eda14cbcSMatt Macy range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]); 1913eda14cbcSMatt Macy } 1914eda14cbcSMatt Macy ASSERT3U(allocating + msp->ms_allocated_this_txg, ==, 1915eda14cbcSMatt Macy msp->ms_allocating_total); 1916eda14cbcSMatt Macy 1917eda14cbcSMatt Macy ASSERT3U(msp->ms_deferspace, ==, 1918eda14cbcSMatt Macy range_tree_space(msp->ms_defer[0]) + 1919eda14cbcSMatt Macy range_tree_space(msp->ms_defer[1])); 1920eda14cbcSMatt Macy 1921eda14cbcSMatt Macy msp_free_space = range_tree_space(msp->ms_allocatable) + allocating + 1922eda14cbcSMatt Macy msp->ms_deferspace + range_tree_space(msp->ms_freed); 1923eda14cbcSMatt Macy 1924eda14cbcSMatt Macy VERIFY3U(sm_free_space, ==, msp_free_space); 1925eda14cbcSMatt Macy } 1926eda14cbcSMatt Macy 1927eda14cbcSMatt Macy static void 1928eda14cbcSMatt Macy metaslab_aux_histograms_clear(metaslab_t *msp) 1929eda14cbcSMatt Macy { 1930eda14cbcSMatt Macy /* 1931eda14cbcSMatt Macy * Auxiliary histograms are only cleared when resetting them, 1932eda14cbcSMatt Macy * which can only happen while the metaslab is loaded. 1933eda14cbcSMatt Macy */ 1934eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 1935eda14cbcSMatt Macy 1936eda14cbcSMatt Macy bzero(msp->ms_synchist, sizeof (msp->ms_synchist)); 1937eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) 1938eda14cbcSMatt Macy bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t])); 1939eda14cbcSMatt Macy } 1940eda14cbcSMatt Macy 1941eda14cbcSMatt Macy static void 1942eda14cbcSMatt Macy metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift, 1943eda14cbcSMatt Macy range_tree_t *rt) 1944eda14cbcSMatt Macy { 1945eda14cbcSMatt Macy /* 1946eda14cbcSMatt Macy * This is modeled after space_map_histogram_add(), so refer to that 1947eda14cbcSMatt Macy * function for implementation details. We want this to work like 1948eda14cbcSMatt Macy * the space map histogram, and not the range tree histogram, as we 1949eda14cbcSMatt Macy * are essentially constructing a delta that will be later subtracted 1950eda14cbcSMatt Macy * from the space map histogram. 1951eda14cbcSMatt Macy */ 1952eda14cbcSMatt Macy int idx = 0; 1953eda14cbcSMatt Macy for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 1954eda14cbcSMatt Macy ASSERT3U(i, >=, idx + shift); 1955eda14cbcSMatt Macy histogram[idx] += rt->rt_histogram[i] << (i - idx - shift); 1956eda14cbcSMatt Macy 1957eda14cbcSMatt Macy if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 1958eda14cbcSMatt Macy ASSERT3U(idx + shift, ==, i); 1959eda14cbcSMatt Macy idx++; 1960eda14cbcSMatt Macy ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 1961eda14cbcSMatt Macy } 1962eda14cbcSMatt Macy } 1963eda14cbcSMatt Macy } 1964eda14cbcSMatt Macy 1965eda14cbcSMatt Macy /* 1966eda14cbcSMatt Macy * Called at every sync pass that the metaslab gets synced. 1967eda14cbcSMatt Macy * 1968eda14cbcSMatt Macy * The reason is that we want our auxiliary histograms to be updated 1969eda14cbcSMatt Macy * wherever the metaslab's space map histogram is updated. This way 1970eda14cbcSMatt Macy * we stay consistent on which parts of the metaslab space map's 1971eda14cbcSMatt Macy * histogram are currently not available for allocations (e.g because 1972eda14cbcSMatt Macy * they are in the defer, freed, and freeing trees). 1973eda14cbcSMatt Macy */ 1974eda14cbcSMatt Macy static void 1975eda14cbcSMatt Macy metaslab_aux_histograms_update(metaslab_t *msp) 1976eda14cbcSMatt Macy { 1977eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 1978eda14cbcSMatt Macy ASSERT(sm != NULL); 1979eda14cbcSMatt Macy 1980eda14cbcSMatt Macy /* 1981eda14cbcSMatt Macy * This is similar to the metaslab's space map histogram updates 1982eda14cbcSMatt Macy * that take place in metaslab_sync(). The only difference is that 1983eda14cbcSMatt Macy * we only care about segments that haven't made it into the 1984eda14cbcSMatt Macy * ms_allocatable tree yet. 1985eda14cbcSMatt Macy */ 1986eda14cbcSMatt Macy if (msp->ms_loaded) { 1987eda14cbcSMatt Macy metaslab_aux_histograms_clear(msp); 1988eda14cbcSMatt Macy 1989eda14cbcSMatt Macy metaslab_aux_histogram_add(msp->ms_synchist, 1990eda14cbcSMatt Macy sm->sm_shift, msp->ms_freed); 1991eda14cbcSMatt Macy 1992eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1993eda14cbcSMatt Macy metaslab_aux_histogram_add(msp->ms_deferhist[t], 1994eda14cbcSMatt Macy sm->sm_shift, msp->ms_defer[t]); 1995eda14cbcSMatt Macy } 1996eda14cbcSMatt Macy } 1997eda14cbcSMatt Macy 1998eda14cbcSMatt Macy metaslab_aux_histogram_add(msp->ms_synchist, 1999eda14cbcSMatt Macy sm->sm_shift, msp->ms_freeing); 2000eda14cbcSMatt Macy } 2001eda14cbcSMatt Macy 2002eda14cbcSMatt Macy /* 2003eda14cbcSMatt Macy * Called every time we are done syncing (writing to) the metaslab, 2004eda14cbcSMatt Macy * i.e. at the end of each sync pass. 2005eda14cbcSMatt Macy * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist] 2006eda14cbcSMatt Macy */ 2007eda14cbcSMatt Macy static void 2008eda14cbcSMatt Macy metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed) 2009eda14cbcSMatt Macy { 2010eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2011eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 2012eda14cbcSMatt Macy 2013eda14cbcSMatt Macy if (sm == NULL) { 2014eda14cbcSMatt Macy /* 2015eda14cbcSMatt Macy * We came here from metaslab_init() when creating/opening a 2016eda14cbcSMatt Macy * pool, looking at a metaslab that hasn't had any allocations 2017eda14cbcSMatt Macy * yet. 2018eda14cbcSMatt Macy */ 2019eda14cbcSMatt Macy return; 2020eda14cbcSMatt Macy } 2021eda14cbcSMatt Macy 2022eda14cbcSMatt Macy /* 2023eda14cbcSMatt Macy * This is similar to the actions that we take for the ms_freed 2024eda14cbcSMatt Macy * and ms_defer trees in metaslab_sync_done(). 2025eda14cbcSMatt Macy */ 2026eda14cbcSMatt Macy uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE; 2027eda14cbcSMatt Macy if (defer_allowed) { 2028eda14cbcSMatt Macy bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index], 2029eda14cbcSMatt Macy sizeof (msp->ms_synchist)); 2030eda14cbcSMatt Macy } else { 2031eda14cbcSMatt Macy bzero(msp->ms_deferhist[hist_index], 2032eda14cbcSMatt Macy sizeof (msp->ms_deferhist[hist_index])); 2033eda14cbcSMatt Macy } 2034eda14cbcSMatt Macy bzero(msp->ms_synchist, sizeof (msp->ms_synchist)); 2035eda14cbcSMatt Macy } 2036eda14cbcSMatt Macy 2037eda14cbcSMatt Macy /* 2038eda14cbcSMatt Macy * Ensure that the metaslab's weight and fragmentation are consistent 2039eda14cbcSMatt Macy * with the contents of the histogram (either the range tree's histogram 2040eda14cbcSMatt Macy * or the space map's depending whether the metaslab is loaded). 2041eda14cbcSMatt Macy */ 2042eda14cbcSMatt Macy static void 2043eda14cbcSMatt Macy metaslab_verify_weight_and_frag(metaslab_t *msp) 2044eda14cbcSMatt Macy { 2045eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2046eda14cbcSMatt Macy 2047eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 2048eda14cbcSMatt Macy return; 2049eda14cbcSMatt Macy 2050eda14cbcSMatt Macy /* 2051eda14cbcSMatt Macy * We can end up here from vdev_remove_complete(), in which case we 2052eda14cbcSMatt Macy * cannot do these assertions because we hold spa config locks and 2053eda14cbcSMatt Macy * thus we are not allowed to read from the DMU. 2054eda14cbcSMatt Macy * 2055eda14cbcSMatt Macy * We check if the metaslab group has been removed and if that's 2056eda14cbcSMatt Macy * the case we return immediately as that would mean that we are 2057eda14cbcSMatt Macy * here from the aforementioned code path. 2058eda14cbcSMatt Macy */ 2059eda14cbcSMatt Macy if (msp->ms_group == NULL) 2060eda14cbcSMatt Macy return; 2061eda14cbcSMatt Macy 2062eda14cbcSMatt Macy /* 2063eda14cbcSMatt Macy * Devices being removed always return a weight of 0 and leave 2064eda14cbcSMatt Macy * fragmentation and ms_max_size as is - there is nothing for 2065eda14cbcSMatt Macy * us to verify here. 2066eda14cbcSMatt Macy */ 2067eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 2068eda14cbcSMatt Macy if (vd->vdev_removing) 2069eda14cbcSMatt Macy return; 2070eda14cbcSMatt Macy 2071eda14cbcSMatt Macy /* 2072eda14cbcSMatt Macy * If the metaslab is dirty it probably means that we've done 2073eda14cbcSMatt Macy * some allocations or frees that have changed our histograms 2074eda14cbcSMatt Macy * and thus the weight. 2075eda14cbcSMatt Macy */ 2076eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) { 2077eda14cbcSMatt Macy if (txg_list_member(&vd->vdev_ms_list, msp, t)) 2078eda14cbcSMatt Macy return; 2079eda14cbcSMatt Macy } 2080eda14cbcSMatt Macy 2081eda14cbcSMatt Macy /* 2082eda14cbcSMatt Macy * This verification checks that our in-memory state is consistent 2083eda14cbcSMatt Macy * with what's on disk. If the pool is read-only then there aren't 2084eda14cbcSMatt Macy * any changes and we just have the initially-loaded state. 2085eda14cbcSMatt Macy */ 2086eda14cbcSMatt Macy if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa)) 2087eda14cbcSMatt Macy return; 2088eda14cbcSMatt Macy 2089eda14cbcSMatt Macy /* some extra verification for in-core tree if you can */ 2090eda14cbcSMatt Macy if (msp->ms_loaded) { 2091eda14cbcSMatt Macy range_tree_stat_verify(msp->ms_allocatable); 2092eda14cbcSMatt Macy VERIFY(space_map_histogram_verify(msp->ms_sm, 2093eda14cbcSMatt Macy msp->ms_allocatable)); 2094eda14cbcSMatt Macy } 2095eda14cbcSMatt Macy 2096eda14cbcSMatt Macy uint64_t weight = msp->ms_weight; 2097eda14cbcSMatt Macy uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 2098eda14cbcSMatt Macy boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight); 2099eda14cbcSMatt Macy uint64_t frag = msp->ms_fragmentation; 2100eda14cbcSMatt Macy uint64_t max_segsize = msp->ms_max_size; 2101eda14cbcSMatt Macy 2102eda14cbcSMatt Macy msp->ms_weight = 0; 2103eda14cbcSMatt Macy msp->ms_fragmentation = 0; 2104eda14cbcSMatt Macy 2105eda14cbcSMatt Macy /* 2106eda14cbcSMatt Macy * This function is used for verification purposes and thus should 2107eda14cbcSMatt Macy * not introduce any side-effects/mutations on the system's state. 2108eda14cbcSMatt Macy * 2109eda14cbcSMatt Macy * Regardless of whether metaslab_weight() thinks this metaslab 2110eda14cbcSMatt Macy * should be active or not, we want to ensure that the actual weight 2111eda14cbcSMatt Macy * (and therefore the value of ms_weight) would be the same if it 2112eda14cbcSMatt Macy * was to be recalculated at this point. 2113eda14cbcSMatt Macy * 2114eda14cbcSMatt Macy * In addition we set the nodirty flag so metaslab_weight() does 2115eda14cbcSMatt Macy * not dirty the metaslab for future TXGs (e.g. when trying to 2116eda14cbcSMatt Macy * force condensing to upgrade the metaslab spacemaps). 2117eda14cbcSMatt Macy */ 2118eda14cbcSMatt Macy msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active; 2119eda14cbcSMatt Macy 2120eda14cbcSMatt Macy VERIFY3U(max_segsize, ==, msp->ms_max_size); 2121eda14cbcSMatt Macy 2122eda14cbcSMatt Macy /* 2123eda14cbcSMatt Macy * If the weight type changed then there is no point in doing 2124eda14cbcSMatt Macy * verification. Revert fields to their original values. 2125eda14cbcSMatt Macy */ 2126eda14cbcSMatt Macy if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) || 2127eda14cbcSMatt Macy (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) { 2128eda14cbcSMatt Macy msp->ms_fragmentation = frag; 2129eda14cbcSMatt Macy msp->ms_weight = weight; 2130eda14cbcSMatt Macy return; 2131eda14cbcSMatt Macy } 2132eda14cbcSMatt Macy 2133eda14cbcSMatt Macy VERIFY3U(msp->ms_fragmentation, ==, frag); 2134eda14cbcSMatt Macy VERIFY3U(msp->ms_weight, ==, weight); 2135eda14cbcSMatt Macy } 2136eda14cbcSMatt Macy 2137eda14cbcSMatt Macy /* 2138eda14cbcSMatt Macy * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from 2139eda14cbcSMatt Macy * this class that was used longest ago, and attempt to unload it. We don't 2140eda14cbcSMatt Macy * want to spend too much time in this loop to prevent performance 2141eda14cbcSMatt Macy * degradation, and we expect that most of the time this operation will 2142eda14cbcSMatt Macy * succeed. Between that and the normal unloading processing during txg sync, 2143eda14cbcSMatt Macy * we expect this to keep the metaslab memory usage under control. 2144eda14cbcSMatt Macy */ 2145eda14cbcSMatt Macy static void 2146eda14cbcSMatt Macy metaslab_potentially_evict(metaslab_class_t *mc) 2147eda14cbcSMatt Macy { 2148eda14cbcSMatt Macy #ifdef _KERNEL 2149eda14cbcSMatt Macy uint64_t allmem = arc_all_memory(); 2150eda14cbcSMatt Macy uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2151eda14cbcSMatt Macy uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache); 2152eda14cbcSMatt Macy int tries = 0; 2153eda14cbcSMatt Macy for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size && 2154eda14cbcSMatt Macy tries < multilist_get_num_sublists(mc->mc_metaslab_txg_list) * 2; 2155eda14cbcSMatt Macy tries++) { 2156eda14cbcSMatt Macy unsigned int idx = multilist_get_random_index( 2157eda14cbcSMatt Macy mc->mc_metaslab_txg_list); 2158eda14cbcSMatt Macy multilist_sublist_t *mls = 2159eda14cbcSMatt Macy multilist_sublist_lock(mc->mc_metaslab_txg_list, idx); 2160eda14cbcSMatt Macy metaslab_t *msp = multilist_sublist_head(mls); 2161eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2162eda14cbcSMatt Macy while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 < 2163eda14cbcSMatt Macy inuse * size) { 2164eda14cbcSMatt Macy VERIFY3P(mls, ==, multilist_sublist_lock( 2165eda14cbcSMatt Macy mc->mc_metaslab_txg_list, idx)); 2166eda14cbcSMatt Macy ASSERT3U(idx, ==, 2167eda14cbcSMatt Macy metaslab_idx_func(mc->mc_metaslab_txg_list, msp)); 2168eda14cbcSMatt Macy 2169eda14cbcSMatt Macy if (!multilist_link_active(&msp->ms_class_txg_node)) { 2170eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2171eda14cbcSMatt Macy break; 2172eda14cbcSMatt Macy } 2173eda14cbcSMatt Macy metaslab_t *next_msp = multilist_sublist_next(mls, msp); 2174eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2175eda14cbcSMatt Macy /* 2176eda14cbcSMatt Macy * If the metaslab is currently loading there are two 2177eda14cbcSMatt Macy * cases. If it's the metaslab we're evicting, we 2178eda14cbcSMatt Macy * can't continue on or we'll panic when we attempt to 2179eda14cbcSMatt Macy * recursively lock the mutex. If it's another 2180eda14cbcSMatt Macy * metaslab that's loading, it can be safely skipped, 2181eda14cbcSMatt Macy * since we know it's very new and therefore not a 2182eda14cbcSMatt Macy * good eviction candidate. We check later once the 2183eda14cbcSMatt Macy * lock is held that the metaslab is fully loaded 2184eda14cbcSMatt Macy * before actually unloading it. 2185eda14cbcSMatt Macy */ 2186eda14cbcSMatt Macy if (msp->ms_loading) { 2187eda14cbcSMatt Macy msp = next_msp; 2188eda14cbcSMatt Macy inuse = 2189eda14cbcSMatt Macy spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2190eda14cbcSMatt Macy continue; 2191eda14cbcSMatt Macy } 2192eda14cbcSMatt Macy /* 2193eda14cbcSMatt Macy * We can't unload metaslabs with no spacemap because 2194eda14cbcSMatt Macy * they're not ready to be unloaded yet. We can't 2195eda14cbcSMatt Macy * unload metaslabs with outstanding allocations 2196eda14cbcSMatt Macy * because doing so could cause the metaslab's weight 2197eda14cbcSMatt Macy * to decrease while it's unloaded, which violates an 2198eda14cbcSMatt Macy * invariant that we use to prevent unnecessary 2199eda14cbcSMatt Macy * loading. We also don't unload metaslabs that are 2200eda14cbcSMatt Macy * currently active because they are high-weight 2201eda14cbcSMatt Macy * metaslabs that are likely to be used in the near 2202eda14cbcSMatt Macy * future. 2203eda14cbcSMatt Macy */ 2204eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 2205eda14cbcSMatt Macy if (msp->ms_allocator == -1 && msp->ms_sm != NULL && 2206eda14cbcSMatt Macy msp->ms_allocating_total == 0) { 2207eda14cbcSMatt Macy metaslab_unload(msp); 2208eda14cbcSMatt Macy } 2209eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 2210eda14cbcSMatt Macy msp = next_msp; 2211eda14cbcSMatt Macy inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2212eda14cbcSMatt Macy } 2213eda14cbcSMatt Macy } 2214eda14cbcSMatt Macy #endif 2215eda14cbcSMatt Macy } 2216eda14cbcSMatt Macy 2217eda14cbcSMatt Macy static int 2218eda14cbcSMatt Macy metaslab_load_impl(metaslab_t *msp) 2219eda14cbcSMatt Macy { 2220eda14cbcSMatt Macy int error = 0; 2221eda14cbcSMatt Macy 2222eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2223eda14cbcSMatt Macy ASSERT(msp->ms_loading); 2224eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 2225eda14cbcSMatt Macy 2226eda14cbcSMatt Macy /* 2227eda14cbcSMatt Macy * We temporarily drop the lock to unblock other operations while we 2228eda14cbcSMatt Macy * are reading the space map. Therefore, metaslab_sync() and 2229eda14cbcSMatt Macy * metaslab_sync_done() can run at the same time as we do. 2230eda14cbcSMatt Macy * 2231eda14cbcSMatt Macy * If we are using the log space maps, metaslab_sync() can't write to 2232eda14cbcSMatt Macy * the metaslab's space map while we are loading as we only write to 2233eda14cbcSMatt Macy * it when we are flushing the metaslab, and that can't happen while 2234eda14cbcSMatt Macy * we are loading it. 2235eda14cbcSMatt Macy * 2236eda14cbcSMatt Macy * If we are not using log space maps though, metaslab_sync() can 2237eda14cbcSMatt Macy * append to the space map while we are loading. Therefore we load 2238eda14cbcSMatt Macy * only entries that existed when we started the load. Additionally, 2239eda14cbcSMatt Macy * metaslab_sync_done() has to wait for the load to complete because 2240eda14cbcSMatt Macy * there are potential races like metaslab_load() loading parts of the 2241eda14cbcSMatt Macy * space map that are currently being appended by metaslab_sync(). If 2242eda14cbcSMatt Macy * we didn't, the ms_allocatable would have entries that 2243eda14cbcSMatt Macy * metaslab_sync_done() would try to re-add later. 2244eda14cbcSMatt Macy * 2245eda14cbcSMatt Macy * That's why before dropping the lock we remember the synced length 2246eda14cbcSMatt Macy * of the metaslab and read up to that point of the space map, 2247eda14cbcSMatt Macy * ignoring entries appended by metaslab_sync() that happen after we 2248eda14cbcSMatt Macy * drop the lock. 2249eda14cbcSMatt Macy */ 2250eda14cbcSMatt Macy uint64_t length = msp->ms_synced_length; 2251eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 2252eda14cbcSMatt Macy 2253eda14cbcSMatt Macy hrtime_t load_start = gethrtime(); 2254eda14cbcSMatt Macy metaslab_rt_arg_t *mrap; 2255eda14cbcSMatt Macy if (msp->ms_allocatable->rt_arg == NULL) { 2256eda14cbcSMatt Macy mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 2257eda14cbcSMatt Macy } else { 2258eda14cbcSMatt Macy mrap = msp->ms_allocatable->rt_arg; 2259eda14cbcSMatt Macy msp->ms_allocatable->rt_ops = NULL; 2260eda14cbcSMatt Macy msp->ms_allocatable->rt_arg = NULL; 2261eda14cbcSMatt Macy } 2262eda14cbcSMatt Macy mrap->mra_bt = &msp->ms_allocatable_by_size; 2263eda14cbcSMatt Macy mrap->mra_floor_shift = metaslab_by_size_min_shift; 2264eda14cbcSMatt Macy 2265eda14cbcSMatt Macy if (msp->ms_sm != NULL) { 2266eda14cbcSMatt Macy error = space_map_load_length(msp->ms_sm, msp->ms_allocatable, 2267eda14cbcSMatt Macy SM_FREE, length); 2268eda14cbcSMatt Macy 2269eda14cbcSMatt Macy /* Now, populate the size-sorted tree. */ 2270eda14cbcSMatt Macy metaslab_rt_create(msp->ms_allocatable, mrap); 2271eda14cbcSMatt Macy msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2272eda14cbcSMatt Macy msp->ms_allocatable->rt_arg = mrap; 2273eda14cbcSMatt Macy 2274eda14cbcSMatt Macy struct mssa_arg arg = {0}; 2275eda14cbcSMatt Macy arg.rt = msp->ms_allocatable; 2276eda14cbcSMatt Macy arg.mra = mrap; 2277eda14cbcSMatt Macy range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add, 2278eda14cbcSMatt Macy &arg); 2279eda14cbcSMatt Macy } else { 2280eda14cbcSMatt Macy /* 2281eda14cbcSMatt Macy * Add the size-sorted tree first, since we don't need to load 2282eda14cbcSMatt Macy * the metaslab from the spacemap. 2283eda14cbcSMatt Macy */ 2284eda14cbcSMatt Macy metaslab_rt_create(msp->ms_allocatable, mrap); 2285eda14cbcSMatt Macy msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2286eda14cbcSMatt Macy msp->ms_allocatable->rt_arg = mrap; 2287eda14cbcSMatt Macy /* 2288eda14cbcSMatt Macy * The space map has not been allocated yet, so treat 2289eda14cbcSMatt Macy * all the space in the metaslab as free and add it to the 2290eda14cbcSMatt Macy * ms_allocatable tree. 2291eda14cbcSMatt Macy */ 2292eda14cbcSMatt Macy range_tree_add(msp->ms_allocatable, 2293eda14cbcSMatt Macy msp->ms_start, msp->ms_size); 2294eda14cbcSMatt Macy 2295eda14cbcSMatt Macy if (msp->ms_freed != NULL) { 2296eda14cbcSMatt Macy /* 2297eda14cbcSMatt Macy * If the ms_sm doesn't exist, this means that this 2298eda14cbcSMatt Macy * metaslab hasn't gone through metaslab_sync() and 2299eda14cbcSMatt Macy * thus has never been dirtied. So we shouldn't 2300eda14cbcSMatt Macy * expect any unflushed allocs or frees from previous 2301eda14cbcSMatt Macy * TXGs. 2302eda14cbcSMatt Macy * 2303eda14cbcSMatt Macy * Note: ms_freed and all the other trees except for 2304eda14cbcSMatt Macy * the ms_allocatable, can be NULL at this point only 2305eda14cbcSMatt Macy * if this is a new metaslab of a vdev that just got 2306eda14cbcSMatt Macy * expanded. 2307eda14cbcSMatt Macy */ 2308eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 2309eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 2310eda14cbcSMatt Macy } 2311eda14cbcSMatt Macy } 2312eda14cbcSMatt Macy 2313eda14cbcSMatt Macy /* 2314eda14cbcSMatt Macy * We need to grab the ms_sync_lock to prevent metaslab_sync() from 2315eda14cbcSMatt Macy * changing the ms_sm (or log_sm) and the metaslab's range trees 2316eda14cbcSMatt Macy * while we are about to use them and populate the ms_allocatable. 2317eda14cbcSMatt Macy * The ms_lock is insufficient for this because metaslab_sync() doesn't 2318eda14cbcSMatt Macy * hold the ms_lock while writing the ms_checkpointing tree to disk. 2319eda14cbcSMatt Macy */ 2320eda14cbcSMatt Macy mutex_enter(&msp->ms_sync_lock); 2321eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 2322eda14cbcSMatt Macy 2323eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 2324eda14cbcSMatt Macy ASSERT(!msp->ms_flushing); 2325eda14cbcSMatt Macy 2326eda14cbcSMatt Macy if (error != 0) { 2327eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 2328eda14cbcSMatt Macy return (error); 2329eda14cbcSMatt Macy } 2330eda14cbcSMatt Macy 2331eda14cbcSMatt Macy ASSERT3P(msp->ms_group, !=, NULL); 2332eda14cbcSMatt Macy msp->ms_loaded = B_TRUE; 2333eda14cbcSMatt Macy 2334eda14cbcSMatt Macy /* 2335eda14cbcSMatt Macy * Apply all the unflushed changes to ms_allocatable right 2336eda14cbcSMatt Macy * away so any manipulations we do below have a clear view 2337eda14cbcSMatt Macy * of what is allocated and what is free. 2338eda14cbcSMatt Macy */ 2339eda14cbcSMatt Macy range_tree_walk(msp->ms_unflushed_allocs, 2340eda14cbcSMatt Macy range_tree_remove, msp->ms_allocatable); 2341eda14cbcSMatt Macy range_tree_walk(msp->ms_unflushed_frees, 2342eda14cbcSMatt Macy range_tree_add, msp->ms_allocatable); 2343eda14cbcSMatt Macy 2344eda14cbcSMatt Macy msp->ms_loaded = B_TRUE; 2345eda14cbcSMatt Macy 2346eda14cbcSMatt Macy ASSERT3P(msp->ms_group, !=, NULL); 2347eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2348eda14cbcSMatt Macy if (spa_syncing_log_sm(spa) != NULL) { 2349eda14cbcSMatt Macy ASSERT(spa_feature_is_enabled(spa, 2350eda14cbcSMatt Macy SPA_FEATURE_LOG_SPACEMAP)); 2351eda14cbcSMatt Macy 2352eda14cbcSMatt Macy /* 2353eda14cbcSMatt Macy * If we use a log space map we add all the segments 2354eda14cbcSMatt Macy * that are in ms_unflushed_frees so they are available 2355eda14cbcSMatt Macy * for allocation. 2356eda14cbcSMatt Macy * 2357eda14cbcSMatt Macy * ms_allocatable needs to contain all free segments 2358eda14cbcSMatt Macy * that are ready for allocations (thus not segments 2359eda14cbcSMatt Macy * from ms_freeing, ms_freed, and the ms_defer trees). 2360eda14cbcSMatt Macy * But if we grab the lock in this code path at a sync 2361eda14cbcSMatt Macy * pass later that 1, then it also contains the 2362eda14cbcSMatt Macy * segments of ms_freed (they were added to it earlier 2363eda14cbcSMatt Macy * in this path through ms_unflushed_frees). So we 2364eda14cbcSMatt Macy * need to remove all the segments that exist in 2365eda14cbcSMatt Macy * ms_freed from ms_allocatable as they will be added 2366eda14cbcSMatt Macy * later in metaslab_sync_done(). 2367eda14cbcSMatt Macy * 2368eda14cbcSMatt Macy * When there's no log space map, the ms_allocatable 2369eda14cbcSMatt Macy * correctly doesn't contain any segments that exist 2370eda14cbcSMatt Macy * in ms_freed [see ms_synced_length]. 2371eda14cbcSMatt Macy */ 2372eda14cbcSMatt Macy range_tree_walk(msp->ms_freed, 2373eda14cbcSMatt Macy range_tree_remove, msp->ms_allocatable); 2374eda14cbcSMatt Macy } 2375eda14cbcSMatt Macy 2376eda14cbcSMatt Macy /* 2377eda14cbcSMatt Macy * If we are not using the log space map, ms_allocatable 2378eda14cbcSMatt Macy * contains the segments that exist in the ms_defer trees 2379eda14cbcSMatt Macy * [see ms_synced_length]. Thus we need to remove them 2380eda14cbcSMatt Macy * from ms_allocatable as they will be added again in 2381eda14cbcSMatt Macy * metaslab_sync_done(). 2382eda14cbcSMatt Macy * 2383eda14cbcSMatt Macy * If we are using the log space map, ms_allocatable still 2384eda14cbcSMatt Macy * contains the segments that exist in the ms_defer trees. 2385eda14cbcSMatt Macy * Not because it read them through the ms_sm though. But 2386eda14cbcSMatt Macy * because these segments are part of ms_unflushed_frees 2387eda14cbcSMatt Macy * whose segments we add to ms_allocatable earlier in this 2388eda14cbcSMatt Macy * code path. 2389eda14cbcSMatt Macy */ 2390eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2391eda14cbcSMatt Macy range_tree_walk(msp->ms_defer[t], 2392eda14cbcSMatt Macy range_tree_remove, msp->ms_allocatable); 2393eda14cbcSMatt Macy } 2394eda14cbcSMatt Macy 2395eda14cbcSMatt Macy /* 2396eda14cbcSMatt Macy * Call metaslab_recalculate_weight_and_sort() now that the 2397eda14cbcSMatt Macy * metaslab is loaded so we get the metaslab's real weight. 2398eda14cbcSMatt Macy * 2399eda14cbcSMatt Macy * Unless this metaslab was created with older software and 2400eda14cbcSMatt Macy * has not yet been converted to use segment-based weight, we 2401eda14cbcSMatt Macy * expect the new weight to be better or equal to the weight 2402eda14cbcSMatt Macy * that the metaslab had while it was not loaded. This is 2403eda14cbcSMatt Macy * because the old weight does not take into account the 2404eda14cbcSMatt Macy * consolidation of adjacent segments between TXGs. [see 2405eda14cbcSMatt Macy * comment for ms_synchist and ms_deferhist[] for more info] 2406eda14cbcSMatt Macy */ 2407eda14cbcSMatt Macy uint64_t weight = msp->ms_weight; 2408eda14cbcSMatt Macy uint64_t max_size = msp->ms_max_size; 2409eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 2410eda14cbcSMatt Macy if (!WEIGHT_IS_SPACEBASED(weight)) 2411eda14cbcSMatt Macy ASSERT3U(weight, <=, msp->ms_weight); 2412eda14cbcSMatt Macy msp->ms_max_size = metaslab_largest_allocatable(msp); 2413eda14cbcSMatt Macy ASSERT3U(max_size, <=, msp->ms_max_size); 2414eda14cbcSMatt Macy hrtime_t load_end = gethrtime(); 2415eda14cbcSMatt Macy msp->ms_load_time = load_end; 2416eda14cbcSMatt Macy zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, " 2417eda14cbcSMatt Macy "ms_id %llu, smp_length %llu, " 2418eda14cbcSMatt Macy "unflushed_allocs %llu, unflushed_frees %llu, " 2419eda14cbcSMatt Macy "freed %llu, defer %llu + %llu, unloaded time %llu ms, " 2420eda14cbcSMatt Macy "loading_time %lld ms, ms_max_size %llu, " 2421eda14cbcSMatt Macy "max size error %lld, " 2422eda14cbcSMatt Macy "old_weight %llx, new_weight %llx", 2423eda14cbcSMatt Macy spa_syncing_txg(spa), spa_name(spa), 2424eda14cbcSMatt Macy msp->ms_group->mg_vd->vdev_id, msp->ms_id, 2425eda14cbcSMatt Macy space_map_length(msp->ms_sm), 2426eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_allocs), 2427eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_frees), 2428eda14cbcSMatt Macy range_tree_space(msp->ms_freed), 2429eda14cbcSMatt Macy range_tree_space(msp->ms_defer[0]), 2430eda14cbcSMatt Macy range_tree_space(msp->ms_defer[1]), 2431eda14cbcSMatt Macy (longlong_t)((load_start - msp->ms_unload_time) / 1000000), 2432eda14cbcSMatt Macy (longlong_t)((load_end - load_start) / 1000000), 2433eda14cbcSMatt Macy msp->ms_max_size, msp->ms_max_size - max_size, 2434eda14cbcSMatt Macy weight, msp->ms_weight); 2435eda14cbcSMatt Macy 2436eda14cbcSMatt Macy metaslab_verify_space(msp, spa_syncing_txg(spa)); 2437eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 2438eda14cbcSMatt Macy return (0); 2439eda14cbcSMatt Macy } 2440eda14cbcSMatt Macy 2441eda14cbcSMatt Macy int 2442eda14cbcSMatt Macy metaslab_load(metaslab_t *msp) 2443eda14cbcSMatt Macy { 2444eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2445eda14cbcSMatt Macy 2446eda14cbcSMatt Macy /* 2447eda14cbcSMatt Macy * There may be another thread loading the same metaslab, if that's 2448eda14cbcSMatt Macy * the case just wait until the other thread is done and return. 2449eda14cbcSMatt Macy */ 2450eda14cbcSMatt Macy metaslab_load_wait(msp); 2451eda14cbcSMatt Macy if (msp->ms_loaded) 2452eda14cbcSMatt Macy return (0); 2453eda14cbcSMatt Macy VERIFY(!msp->ms_loading); 2454eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 2455eda14cbcSMatt Macy 2456eda14cbcSMatt Macy /* 2457eda14cbcSMatt Macy * We set the loading flag BEFORE potentially dropping the lock to 2458eda14cbcSMatt Macy * wait for an ongoing flush (see ms_flushing below). This way other 2459eda14cbcSMatt Macy * threads know that there is already a thread that is loading this 2460eda14cbcSMatt Macy * metaslab. 2461eda14cbcSMatt Macy */ 2462eda14cbcSMatt Macy msp->ms_loading = B_TRUE; 2463eda14cbcSMatt Macy 2464eda14cbcSMatt Macy /* 2465eda14cbcSMatt Macy * Wait for any in-progress flushing to finish as we drop the ms_lock 2466eda14cbcSMatt Macy * both here (during space_map_load()) and in metaslab_flush() (when 2467eda14cbcSMatt Macy * we flush our changes to the ms_sm). 2468eda14cbcSMatt Macy */ 2469eda14cbcSMatt Macy if (msp->ms_flushing) 2470eda14cbcSMatt Macy metaslab_flush_wait(msp); 2471eda14cbcSMatt Macy 2472eda14cbcSMatt Macy /* 2473eda14cbcSMatt Macy * In the possibility that we were waiting for the metaslab to be 2474eda14cbcSMatt Macy * flushed (where we temporarily dropped the ms_lock), ensure that 2475eda14cbcSMatt Macy * no one else loaded the metaslab somehow. 2476eda14cbcSMatt Macy */ 2477eda14cbcSMatt Macy ASSERT(!msp->ms_loaded); 2478eda14cbcSMatt Macy 2479eda14cbcSMatt Macy /* 2480eda14cbcSMatt Macy * If we're loading a metaslab in the normal class, consider evicting 2481eda14cbcSMatt Macy * another one to keep our memory usage under the limit defined by the 2482eda14cbcSMatt Macy * zfs_metaslab_mem_limit tunable. 2483eda14cbcSMatt Macy */ 2484eda14cbcSMatt Macy if (spa_normal_class(msp->ms_group->mg_class->mc_spa) == 2485eda14cbcSMatt Macy msp->ms_group->mg_class) { 2486eda14cbcSMatt Macy metaslab_potentially_evict(msp->ms_group->mg_class); 2487eda14cbcSMatt Macy } 2488eda14cbcSMatt Macy 2489eda14cbcSMatt Macy int error = metaslab_load_impl(msp); 2490eda14cbcSMatt Macy 2491eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2492eda14cbcSMatt Macy msp->ms_loading = B_FALSE; 2493eda14cbcSMatt Macy cv_broadcast(&msp->ms_load_cv); 2494eda14cbcSMatt Macy 2495eda14cbcSMatt Macy return (error); 2496eda14cbcSMatt Macy } 2497eda14cbcSMatt Macy 2498eda14cbcSMatt Macy void 2499eda14cbcSMatt Macy metaslab_unload(metaslab_t *msp) 2500eda14cbcSMatt Macy { 2501eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2502eda14cbcSMatt Macy 2503eda14cbcSMatt Macy /* 2504eda14cbcSMatt Macy * This can happen if a metaslab is selected for eviction (in 2505eda14cbcSMatt Macy * metaslab_potentially_evict) and then unloaded during spa_sync (via 2506eda14cbcSMatt Macy * metaslab_class_evict_old). 2507eda14cbcSMatt Macy */ 2508eda14cbcSMatt Macy if (!msp->ms_loaded) 2509eda14cbcSMatt Macy return; 2510eda14cbcSMatt Macy 2511eda14cbcSMatt Macy range_tree_vacate(msp->ms_allocatable, NULL, NULL); 2512eda14cbcSMatt Macy msp->ms_loaded = B_FALSE; 2513eda14cbcSMatt Macy msp->ms_unload_time = gethrtime(); 2514eda14cbcSMatt Macy 2515eda14cbcSMatt Macy msp->ms_activation_weight = 0; 2516eda14cbcSMatt Macy msp->ms_weight &= ~METASLAB_ACTIVE_MASK; 2517eda14cbcSMatt Macy 2518eda14cbcSMatt Macy if (msp->ms_group != NULL) { 2519eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 2520eda14cbcSMatt Macy multilist_sublist_t *mls = 2521eda14cbcSMatt Macy multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp); 2522eda14cbcSMatt Macy if (multilist_link_active(&msp->ms_class_txg_node)) 2523eda14cbcSMatt Macy multilist_sublist_remove(mls, msp); 2524eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2525eda14cbcSMatt Macy 2526eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2527eda14cbcSMatt Macy zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, " 2528eda14cbcSMatt Macy "ms_id %llu, weight %llx, " 2529eda14cbcSMatt Macy "selected txg %llu (%llu ms ago), alloc_txg %llu, " 2530eda14cbcSMatt Macy "loaded %llu ms ago, max_size %llu", 2531eda14cbcSMatt Macy spa_syncing_txg(spa), spa_name(spa), 2532eda14cbcSMatt Macy msp->ms_group->mg_vd->vdev_id, msp->ms_id, 2533eda14cbcSMatt Macy msp->ms_weight, 2534eda14cbcSMatt Macy msp->ms_selected_txg, 2535eda14cbcSMatt Macy (msp->ms_unload_time - msp->ms_selected_time) / 1000 / 1000, 2536eda14cbcSMatt Macy msp->ms_alloc_txg, 2537eda14cbcSMatt Macy (msp->ms_unload_time - msp->ms_load_time) / 1000 / 1000, 2538eda14cbcSMatt Macy msp->ms_max_size); 2539eda14cbcSMatt Macy } 2540eda14cbcSMatt Macy 2541eda14cbcSMatt Macy /* 2542eda14cbcSMatt Macy * We explicitly recalculate the metaslab's weight based on its space 2543eda14cbcSMatt Macy * map (as it is now not loaded). We want unload metaslabs to always 2544eda14cbcSMatt Macy * have their weights calculated from the space map histograms, while 2545eda14cbcSMatt Macy * loaded ones have it calculated from their in-core range tree 2546eda14cbcSMatt Macy * [see metaslab_load()]. This way, the weight reflects the information 2547eda14cbcSMatt Macy * available in-core, whether it is loaded or not. 2548eda14cbcSMatt Macy * 2549eda14cbcSMatt Macy * If ms_group == NULL means that we came here from metaslab_fini(), 2550eda14cbcSMatt Macy * at which point it doesn't make sense for us to do the recalculation 2551eda14cbcSMatt Macy * and the sorting. 2552eda14cbcSMatt Macy */ 2553eda14cbcSMatt Macy if (msp->ms_group != NULL) 2554eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 2555eda14cbcSMatt Macy } 2556eda14cbcSMatt Macy 2557eda14cbcSMatt Macy /* 2558eda14cbcSMatt Macy * We want to optimize the memory use of the per-metaslab range 2559eda14cbcSMatt Macy * trees. To do this, we store the segments in the range trees in 2560eda14cbcSMatt Macy * units of sectors, zero-indexing from the start of the metaslab. If 2561eda14cbcSMatt Macy * the vdev_ms_shift - the vdev_ashift is less than 32, we can store 2562eda14cbcSMatt Macy * the ranges using two uint32_ts, rather than two uint64_ts. 2563eda14cbcSMatt Macy */ 2564eda14cbcSMatt Macy range_seg_type_t 2565eda14cbcSMatt Macy metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp, 2566eda14cbcSMatt Macy uint64_t *start, uint64_t *shift) 2567eda14cbcSMatt Macy { 2568eda14cbcSMatt Macy if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 && 2569eda14cbcSMatt Macy !zfs_metaslab_force_large_segs) { 2570eda14cbcSMatt Macy *shift = vdev->vdev_ashift; 2571eda14cbcSMatt Macy *start = msp->ms_start; 2572eda14cbcSMatt Macy return (RANGE_SEG32); 2573eda14cbcSMatt Macy } else { 2574eda14cbcSMatt Macy *shift = 0; 2575eda14cbcSMatt Macy *start = 0; 2576eda14cbcSMatt Macy return (RANGE_SEG64); 2577eda14cbcSMatt Macy } 2578eda14cbcSMatt Macy } 2579eda14cbcSMatt Macy 2580eda14cbcSMatt Macy void 2581eda14cbcSMatt Macy metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg) 2582eda14cbcSMatt Macy { 2583eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2584eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 2585eda14cbcSMatt Macy multilist_sublist_t *mls = 2586eda14cbcSMatt Macy multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp); 2587eda14cbcSMatt Macy if (multilist_link_active(&msp->ms_class_txg_node)) 2588eda14cbcSMatt Macy multilist_sublist_remove(mls, msp); 2589eda14cbcSMatt Macy msp->ms_selected_txg = txg; 2590eda14cbcSMatt Macy msp->ms_selected_time = gethrtime(); 2591eda14cbcSMatt Macy multilist_sublist_insert_tail(mls, msp); 2592eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2593eda14cbcSMatt Macy } 2594eda14cbcSMatt Macy 2595eda14cbcSMatt Macy void 2596eda14cbcSMatt Macy metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta, 2597eda14cbcSMatt Macy int64_t defer_delta, int64_t space_delta) 2598eda14cbcSMatt Macy { 2599eda14cbcSMatt Macy vdev_space_update(vd, alloc_delta, defer_delta, space_delta); 2600eda14cbcSMatt Macy 2601eda14cbcSMatt Macy ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent); 2602eda14cbcSMatt Macy ASSERT(vd->vdev_ms_count != 0); 2603eda14cbcSMatt Macy 2604eda14cbcSMatt Macy metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta, 2605eda14cbcSMatt Macy vdev_deflated_space(vd, space_delta)); 2606eda14cbcSMatt Macy } 2607eda14cbcSMatt Macy 2608eda14cbcSMatt Macy int 2609eda14cbcSMatt Macy metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, 2610eda14cbcSMatt Macy uint64_t txg, metaslab_t **msp) 2611eda14cbcSMatt Macy { 2612eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 2613eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2614eda14cbcSMatt Macy objset_t *mos = spa->spa_meta_objset; 2615eda14cbcSMatt Macy metaslab_t *ms; 2616eda14cbcSMatt Macy int error; 2617eda14cbcSMatt Macy 2618eda14cbcSMatt Macy ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 2619eda14cbcSMatt Macy mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); 2620eda14cbcSMatt Macy mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL); 2621eda14cbcSMatt Macy cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); 2622eda14cbcSMatt Macy cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL); 2623eda14cbcSMatt Macy multilist_link_init(&ms->ms_class_txg_node); 2624eda14cbcSMatt Macy 2625eda14cbcSMatt Macy ms->ms_id = id; 2626eda14cbcSMatt Macy ms->ms_start = id << vd->vdev_ms_shift; 2627eda14cbcSMatt Macy ms->ms_size = 1ULL << vd->vdev_ms_shift; 2628eda14cbcSMatt Macy ms->ms_allocator = -1; 2629eda14cbcSMatt Macy ms->ms_new = B_TRUE; 2630eda14cbcSMatt Macy 26317877fdebSMatt Macy vdev_ops_t *ops = vd->vdev_ops; 26327877fdebSMatt Macy if (ops->vdev_op_metaslab_init != NULL) 26337877fdebSMatt Macy ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size); 26347877fdebSMatt Macy 2635eda14cbcSMatt Macy /* 2636eda14cbcSMatt Macy * We only open space map objects that already exist. All others 2637eda14cbcSMatt Macy * will be opened when we finally allocate an object for it. 2638eda14cbcSMatt Macy * 2639eda14cbcSMatt Macy * Note: 2640eda14cbcSMatt Macy * When called from vdev_expand(), we can't call into the DMU as 2641eda14cbcSMatt Macy * we are holding the spa_config_lock as a writer and we would 2642eda14cbcSMatt Macy * deadlock [see relevant comment in vdev_metaslab_init()]. in 2643eda14cbcSMatt Macy * that case, the object parameter is zero though, so we won't 2644eda14cbcSMatt Macy * call into the DMU. 2645eda14cbcSMatt Macy */ 2646eda14cbcSMatt Macy if (object != 0) { 2647eda14cbcSMatt Macy error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, 2648eda14cbcSMatt Macy ms->ms_size, vd->vdev_ashift); 2649eda14cbcSMatt Macy 2650eda14cbcSMatt Macy if (error != 0) { 2651eda14cbcSMatt Macy kmem_free(ms, sizeof (metaslab_t)); 2652eda14cbcSMatt Macy return (error); 2653eda14cbcSMatt Macy } 2654eda14cbcSMatt Macy 2655eda14cbcSMatt Macy ASSERT(ms->ms_sm != NULL); 2656eda14cbcSMatt Macy ms->ms_allocated_space = space_map_allocated(ms->ms_sm); 2657eda14cbcSMatt Macy } 2658eda14cbcSMatt Macy 2659eda14cbcSMatt Macy range_seg_type_t type; 2660eda14cbcSMatt Macy uint64_t shift, start; 2661eda14cbcSMatt Macy type = metaslab_calculate_range_tree_type(vd, ms, &start, &shift); 2662eda14cbcSMatt Macy 2663eda14cbcSMatt Macy /* 2664eda14cbcSMatt Macy * We create the ms_allocatable here, but we don't create the 2665eda14cbcSMatt Macy * other range trees until metaslab_sync_done(). This serves 2666eda14cbcSMatt Macy * two purposes: it allows metaslab_sync_done() to detect the 2667eda14cbcSMatt Macy * addition of new space; and for debugging, it ensures that 2668eda14cbcSMatt Macy * we'd data fault on any attempt to use this metaslab before 2669eda14cbcSMatt Macy * it's ready. 2670eda14cbcSMatt Macy */ 2671eda14cbcSMatt Macy ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift); 2672eda14cbcSMatt Macy 2673eda14cbcSMatt Macy ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift); 2674eda14cbcSMatt Macy 2675eda14cbcSMatt Macy metaslab_group_add(mg, ms); 2676eda14cbcSMatt Macy metaslab_set_fragmentation(ms, B_FALSE); 2677eda14cbcSMatt Macy 2678eda14cbcSMatt Macy /* 2679eda14cbcSMatt Macy * If we're opening an existing pool (txg == 0) or creating 2680eda14cbcSMatt Macy * a new one (txg == TXG_INITIAL), all space is available now. 2681eda14cbcSMatt Macy * If we're adding space to an existing pool, the new space 2682eda14cbcSMatt Macy * does not become available until after this txg has synced. 2683eda14cbcSMatt Macy * The metaslab's weight will also be initialized when we sync 2684eda14cbcSMatt Macy * out this txg. This ensures that we don't attempt to allocate 2685eda14cbcSMatt Macy * from it before we have initialized it completely. 2686eda14cbcSMatt Macy */ 2687eda14cbcSMatt Macy if (txg <= TXG_INITIAL) { 2688eda14cbcSMatt Macy metaslab_sync_done(ms, 0); 2689eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, 2690eda14cbcSMatt Macy metaslab_allocated_space(ms), 0, 0); 2691eda14cbcSMatt Macy } 2692eda14cbcSMatt Macy 2693eda14cbcSMatt Macy if (txg != 0) { 2694eda14cbcSMatt Macy vdev_dirty(vd, 0, NULL, txg); 2695eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, ms, txg); 2696eda14cbcSMatt Macy } 2697eda14cbcSMatt Macy 2698eda14cbcSMatt Macy *msp = ms; 2699eda14cbcSMatt Macy 2700eda14cbcSMatt Macy return (0); 2701eda14cbcSMatt Macy } 2702eda14cbcSMatt Macy 2703eda14cbcSMatt Macy static void 2704eda14cbcSMatt Macy metaslab_fini_flush_data(metaslab_t *msp) 2705eda14cbcSMatt Macy { 2706eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2707eda14cbcSMatt Macy 2708eda14cbcSMatt Macy if (metaslab_unflushed_txg(msp) == 0) { 2709eda14cbcSMatt Macy ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), 2710eda14cbcSMatt Macy ==, NULL); 2711eda14cbcSMatt Macy return; 2712eda14cbcSMatt Macy } 2713eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 2714eda14cbcSMatt Macy 2715eda14cbcSMatt Macy mutex_enter(&spa->spa_flushed_ms_lock); 2716eda14cbcSMatt Macy avl_remove(&spa->spa_metaslabs_by_flushed, msp); 2717eda14cbcSMatt Macy mutex_exit(&spa->spa_flushed_ms_lock); 2718eda14cbcSMatt Macy 2719eda14cbcSMatt Macy spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp)); 2720eda14cbcSMatt Macy spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp)); 2721eda14cbcSMatt Macy } 2722eda14cbcSMatt Macy 2723eda14cbcSMatt Macy uint64_t 2724eda14cbcSMatt Macy metaslab_unflushed_changes_memused(metaslab_t *ms) 2725eda14cbcSMatt Macy { 2726eda14cbcSMatt Macy return ((range_tree_numsegs(ms->ms_unflushed_allocs) + 2727eda14cbcSMatt Macy range_tree_numsegs(ms->ms_unflushed_frees)) * 2728eda14cbcSMatt Macy ms->ms_unflushed_allocs->rt_root.bt_elem_size); 2729eda14cbcSMatt Macy } 2730eda14cbcSMatt Macy 2731eda14cbcSMatt Macy void 2732eda14cbcSMatt Macy metaslab_fini(metaslab_t *msp) 2733eda14cbcSMatt Macy { 2734eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 2735eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 2736eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2737eda14cbcSMatt Macy 2738eda14cbcSMatt Macy metaslab_fini_flush_data(msp); 2739eda14cbcSMatt Macy 2740eda14cbcSMatt Macy metaslab_group_remove(mg, msp); 2741eda14cbcSMatt Macy 2742eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 2743eda14cbcSMatt Macy VERIFY(msp->ms_group == NULL); 2744eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, 2745eda14cbcSMatt Macy -metaslab_allocated_space(msp), 0, -msp->ms_size); 2746eda14cbcSMatt Macy 2747eda14cbcSMatt Macy space_map_close(msp->ms_sm); 2748eda14cbcSMatt Macy msp->ms_sm = NULL; 2749eda14cbcSMatt Macy 2750eda14cbcSMatt Macy metaslab_unload(msp); 2751eda14cbcSMatt Macy range_tree_destroy(msp->ms_allocatable); 2752eda14cbcSMatt Macy range_tree_destroy(msp->ms_freeing); 2753eda14cbcSMatt Macy range_tree_destroy(msp->ms_freed); 2754eda14cbcSMatt Macy 2755eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 2756eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 2757eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 2758eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 2759eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 2760eda14cbcSMatt Macy range_tree_destroy(msp->ms_unflushed_allocs); 2761eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 2762eda14cbcSMatt Macy range_tree_destroy(msp->ms_unflushed_frees); 2763eda14cbcSMatt Macy 2764eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) { 2765eda14cbcSMatt Macy range_tree_destroy(msp->ms_allocating[t]); 2766eda14cbcSMatt Macy } 2767eda14cbcSMatt Macy 2768eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2769eda14cbcSMatt Macy range_tree_destroy(msp->ms_defer[t]); 2770eda14cbcSMatt Macy } 2771eda14cbcSMatt Macy ASSERT0(msp->ms_deferspace); 2772eda14cbcSMatt Macy 2773eda14cbcSMatt Macy range_tree_destroy(msp->ms_checkpointing); 2774eda14cbcSMatt Macy 2775eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) 2776eda14cbcSMatt Macy ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t)); 2777eda14cbcSMatt Macy 2778eda14cbcSMatt Macy range_tree_vacate(msp->ms_trim, NULL, NULL); 2779eda14cbcSMatt Macy range_tree_destroy(msp->ms_trim); 2780eda14cbcSMatt Macy 2781eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 2782eda14cbcSMatt Macy cv_destroy(&msp->ms_load_cv); 2783eda14cbcSMatt Macy cv_destroy(&msp->ms_flush_cv); 2784eda14cbcSMatt Macy mutex_destroy(&msp->ms_lock); 2785eda14cbcSMatt Macy mutex_destroy(&msp->ms_sync_lock); 2786eda14cbcSMatt Macy ASSERT3U(msp->ms_allocator, ==, -1); 2787eda14cbcSMatt Macy 2788eda14cbcSMatt Macy kmem_free(msp, sizeof (metaslab_t)); 2789eda14cbcSMatt Macy } 2790eda14cbcSMatt Macy 2791eda14cbcSMatt Macy #define FRAGMENTATION_TABLE_SIZE 17 2792eda14cbcSMatt Macy 2793eda14cbcSMatt Macy /* 2794eda14cbcSMatt Macy * This table defines a segment size based fragmentation metric that will 2795eda14cbcSMatt Macy * allow each metaslab to derive its own fragmentation value. This is done 2796eda14cbcSMatt Macy * by calculating the space in each bucket of the spacemap histogram and 2797eda14cbcSMatt Macy * multiplying that by the fragmentation metric in this table. Doing 2798eda14cbcSMatt Macy * this for all buckets and dividing it by the total amount of free 2799eda14cbcSMatt Macy * space in this metaslab (i.e. the total free space in all buckets) gives 2800eda14cbcSMatt Macy * us the fragmentation metric. This means that a high fragmentation metric 2801eda14cbcSMatt Macy * equates to most of the free space being comprised of small segments. 2802eda14cbcSMatt Macy * Conversely, if the metric is low, then most of the free space is in 2803eda14cbcSMatt Macy * large segments. A 10% change in fragmentation equates to approximately 2804eda14cbcSMatt Macy * double the number of segments. 2805eda14cbcSMatt Macy * 2806eda14cbcSMatt Macy * This table defines 0% fragmented space using 16MB segments. Testing has 2807eda14cbcSMatt Macy * shown that segments that are greater than or equal to 16MB do not suffer 2808eda14cbcSMatt Macy * from drastic performance problems. Using this value, we derive the rest 2809eda14cbcSMatt Macy * of the table. Since the fragmentation value is never stored on disk, it 2810eda14cbcSMatt Macy * is possible to change these calculations in the future. 2811eda14cbcSMatt Macy */ 2812eda14cbcSMatt Macy int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { 2813eda14cbcSMatt Macy 100, /* 512B */ 2814eda14cbcSMatt Macy 100, /* 1K */ 2815eda14cbcSMatt Macy 98, /* 2K */ 2816eda14cbcSMatt Macy 95, /* 4K */ 2817eda14cbcSMatt Macy 90, /* 8K */ 2818eda14cbcSMatt Macy 80, /* 16K */ 2819eda14cbcSMatt Macy 70, /* 32K */ 2820eda14cbcSMatt Macy 60, /* 64K */ 2821eda14cbcSMatt Macy 50, /* 128K */ 2822eda14cbcSMatt Macy 40, /* 256K */ 2823eda14cbcSMatt Macy 30, /* 512K */ 2824eda14cbcSMatt Macy 20, /* 1M */ 2825eda14cbcSMatt Macy 15, /* 2M */ 2826eda14cbcSMatt Macy 10, /* 4M */ 2827eda14cbcSMatt Macy 5, /* 8M */ 2828eda14cbcSMatt Macy 0 /* 16M */ 2829eda14cbcSMatt Macy }; 2830eda14cbcSMatt Macy 2831eda14cbcSMatt Macy /* 2832eda14cbcSMatt Macy * Calculate the metaslab's fragmentation metric and set ms_fragmentation. 2833eda14cbcSMatt Macy * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not 2834eda14cbcSMatt Macy * been upgraded and does not support this metric. Otherwise, the return 2835eda14cbcSMatt Macy * value should be in the range [0, 100]. 2836eda14cbcSMatt Macy */ 2837eda14cbcSMatt Macy static void 2838eda14cbcSMatt Macy metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty) 2839eda14cbcSMatt Macy { 2840eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2841eda14cbcSMatt Macy uint64_t fragmentation = 0; 2842eda14cbcSMatt Macy uint64_t total = 0; 2843eda14cbcSMatt Macy boolean_t feature_enabled = spa_feature_is_enabled(spa, 2844eda14cbcSMatt Macy SPA_FEATURE_SPACEMAP_HISTOGRAM); 2845eda14cbcSMatt Macy 2846eda14cbcSMatt Macy if (!feature_enabled) { 2847eda14cbcSMatt Macy msp->ms_fragmentation = ZFS_FRAG_INVALID; 2848eda14cbcSMatt Macy return; 2849eda14cbcSMatt Macy } 2850eda14cbcSMatt Macy 2851eda14cbcSMatt Macy /* 2852eda14cbcSMatt Macy * A null space map means that the entire metaslab is free 2853eda14cbcSMatt Macy * and thus is not fragmented. 2854eda14cbcSMatt Macy */ 2855eda14cbcSMatt Macy if (msp->ms_sm == NULL) { 2856eda14cbcSMatt Macy msp->ms_fragmentation = 0; 2857eda14cbcSMatt Macy return; 2858eda14cbcSMatt Macy } 2859eda14cbcSMatt Macy 2860eda14cbcSMatt Macy /* 2861eda14cbcSMatt Macy * If this metaslab's space map has not been upgraded, flag it 2862eda14cbcSMatt Macy * so that we upgrade next time we encounter it. 2863eda14cbcSMatt Macy */ 2864eda14cbcSMatt Macy if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { 2865eda14cbcSMatt Macy uint64_t txg = spa_syncing_txg(spa); 2866eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 2867eda14cbcSMatt Macy 2868eda14cbcSMatt Macy /* 2869eda14cbcSMatt Macy * If we've reached the final dirty txg, then we must 2870eda14cbcSMatt Macy * be shutting down the pool. We don't want to dirty 2871eda14cbcSMatt Macy * any data past this point so skip setting the condense 2872eda14cbcSMatt Macy * flag. We can retry this action the next time the pool 2873eda14cbcSMatt Macy * is imported. We also skip marking this metaslab for 2874eda14cbcSMatt Macy * condensing if the caller has explicitly set nodirty. 2875eda14cbcSMatt Macy */ 2876eda14cbcSMatt Macy if (!nodirty && 2877eda14cbcSMatt Macy spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) { 2878eda14cbcSMatt Macy msp->ms_condense_wanted = B_TRUE; 2879eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 2880eda14cbcSMatt Macy zfs_dbgmsg("txg %llu, requesting force condense: " 2881eda14cbcSMatt Macy "ms_id %llu, vdev_id %llu", txg, msp->ms_id, 2882eda14cbcSMatt Macy vd->vdev_id); 2883eda14cbcSMatt Macy } 2884eda14cbcSMatt Macy msp->ms_fragmentation = ZFS_FRAG_INVALID; 2885eda14cbcSMatt Macy return; 2886eda14cbcSMatt Macy } 2887eda14cbcSMatt Macy 2888eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 2889eda14cbcSMatt Macy uint64_t space = 0; 2890eda14cbcSMatt Macy uint8_t shift = msp->ms_sm->sm_shift; 2891eda14cbcSMatt Macy 2892eda14cbcSMatt Macy int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, 2893eda14cbcSMatt Macy FRAGMENTATION_TABLE_SIZE - 1); 2894eda14cbcSMatt Macy 2895eda14cbcSMatt Macy if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) 2896eda14cbcSMatt Macy continue; 2897eda14cbcSMatt Macy 2898eda14cbcSMatt Macy space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); 2899eda14cbcSMatt Macy total += space; 2900eda14cbcSMatt Macy 2901eda14cbcSMatt Macy ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); 2902eda14cbcSMatt Macy fragmentation += space * zfs_frag_table[idx]; 2903eda14cbcSMatt Macy } 2904eda14cbcSMatt Macy 2905eda14cbcSMatt Macy if (total > 0) 2906eda14cbcSMatt Macy fragmentation /= total; 2907eda14cbcSMatt Macy ASSERT3U(fragmentation, <=, 100); 2908eda14cbcSMatt Macy 2909eda14cbcSMatt Macy msp->ms_fragmentation = fragmentation; 2910eda14cbcSMatt Macy } 2911eda14cbcSMatt Macy 2912eda14cbcSMatt Macy /* 2913eda14cbcSMatt Macy * Compute a weight -- a selection preference value -- for the given metaslab. 2914eda14cbcSMatt Macy * This is based on the amount of free space, the level of fragmentation, 2915eda14cbcSMatt Macy * the LBA range, and whether the metaslab is loaded. 2916eda14cbcSMatt Macy */ 2917eda14cbcSMatt Macy static uint64_t 2918eda14cbcSMatt Macy metaslab_space_weight(metaslab_t *msp) 2919eda14cbcSMatt Macy { 2920eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 2921eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 2922eda14cbcSMatt Macy uint64_t weight, space; 2923eda14cbcSMatt Macy 2924eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2925eda14cbcSMatt Macy 2926eda14cbcSMatt Macy /* 2927eda14cbcSMatt Macy * The baseline weight is the metaslab's free space. 2928eda14cbcSMatt Macy */ 2929eda14cbcSMatt Macy space = msp->ms_size - metaslab_allocated_space(msp); 2930eda14cbcSMatt Macy 2931eda14cbcSMatt Macy if (metaslab_fragmentation_factor_enabled && 2932eda14cbcSMatt Macy msp->ms_fragmentation != ZFS_FRAG_INVALID) { 2933eda14cbcSMatt Macy /* 2934eda14cbcSMatt Macy * Use the fragmentation information to inversely scale 2935eda14cbcSMatt Macy * down the baseline weight. We need to ensure that we 2936eda14cbcSMatt Macy * don't exclude this metaslab completely when it's 100% 2937eda14cbcSMatt Macy * fragmented. To avoid this we reduce the fragmented value 2938eda14cbcSMatt Macy * by 1. 2939eda14cbcSMatt Macy */ 2940eda14cbcSMatt Macy space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; 2941eda14cbcSMatt Macy 2942eda14cbcSMatt Macy /* 2943eda14cbcSMatt Macy * If space < SPA_MINBLOCKSIZE, then we will not allocate from 2944eda14cbcSMatt Macy * this metaslab again. The fragmentation metric may have 2945eda14cbcSMatt Macy * decreased the space to something smaller than 2946eda14cbcSMatt Macy * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE 2947eda14cbcSMatt Macy * so that we can consume any remaining space. 2948eda14cbcSMatt Macy */ 2949eda14cbcSMatt Macy if (space > 0 && space < SPA_MINBLOCKSIZE) 2950eda14cbcSMatt Macy space = SPA_MINBLOCKSIZE; 2951eda14cbcSMatt Macy } 2952eda14cbcSMatt Macy weight = space; 2953eda14cbcSMatt Macy 2954eda14cbcSMatt Macy /* 2955eda14cbcSMatt Macy * Modern disks have uniform bit density and constant angular velocity. 2956eda14cbcSMatt Macy * Therefore, the outer recording zones are faster (higher bandwidth) 2957eda14cbcSMatt Macy * than the inner zones by the ratio of outer to inner track diameter, 2958eda14cbcSMatt Macy * which is typically around 2:1. We account for this by assigning 2959eda14cbcSMatt Macy * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 2960eda14cbcSMatt Macy * In effect, this means that we'll select the metaslab with the most 2961eda14cbcSMatt Macy * free bandwidth rather than simply the one with the most free space. 2962eda14cbcSMatt Macy */ 2963eda14cbcSMatt Macy if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) { 2964eda14cbcSMatt Macy weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; 2965eda14cbcSMatt Macy ASSERT(weight >= space && weight <= 2 * space); 2966eda14cbcSMatt Macy } 2967eda14cbcSMatt Macy 2968eda14cbcSMatt Macy /* 2969eda14cbcSMatt Macy * If this metaslab is one we're actively using, adjust its 2970eda14cbcSMatt Macy * weight to make it preferable to any inactive metaslab so 2971eda14cbcSMatt Macy * we'll polish it off. If the fragmentation on this metaslab 2972eda14cbcSMatt Macy * has exceed our threshold, then don't mark it active. 2973eda14cbcSMatt Macy */ 2974eda14cbcSMatt Macy if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && 2975eda14cbcSMatt Macy msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { 2976eda14cbcSMatt Macy weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 2977eda14cbcSMatt Macy } 2978eda14cbcSMatt Macy 2979eda14cbcSMatt Macy WEIGHT_SET_SPACEBASED(weight); 2980eda14cbcSMatt Macy return (weight); 2981eda14cbcSMatt Macy } 2982eda14cbcSMatt Macy 2983eda14cbcSMatt Macy /* 2984eda14cbcSMatt Macy * Return the weight of the specified metaslab, according to the segment-based 2985eda14cbcSMatt Macy * weighting algorithm. The metaslab must be loaded. This function can 2986eda14cbcSMatt Macy * be called within a sync pass since it relies only on the metaslab's 2987eda14cbcSMatt Macy * range tree which is always accurate when the metaslab is loaded. 2988eda14cbcSMatt Macy */ 2989eda14cbcSMatt Macy static uint64_t 2990eda14cbcSMatt Macy metaslab_weight_from_range_tree(metaslab_t *msp) 2991eda14cbcSMatt Macy { 2992eda14cbcSMatt Macy uint64_t weight = 0; 2993eda14cbcSMatt Macy uint32_t segments = 0; 2994eda14cbcSMatt Macy 2995eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 2996eda14cbcSMatt Macy 2997eda14cbcSMatt Macy for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; 2998eda14cbcSMatt Macy i--) { 2999eda14cbcSMatt Macy uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; 3000eda14cbcSMatt Macy int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3001eda14cbcSMatt Macy 3002eda14cbcSMatt Macy segments <<= 1; 3003eda14cbcSMatt Macy segments += msp->ms_allocatable->rt_histogram[i]; 3004eda14cbcSMatt Macy 3005eda14cbcSMatt Macy /* 3006eda14cbcSMatt Macy * The range tree provides more precision than the space map 3007eda14cbcSMatt Macy * and must be downgraded so that all values fit within the 3008eda14cbcSMatt Macy * space map's histogram. This allows us to compare loaded 3009eda14cbcSMatt Macy * vs. unloaded metaslabs to determine which metaslab is 3010eda14cbcSMatt Macy * considered "best". 3011eda14cbcSMatt Macy */ 3012eda14cbcSMatt Macy if (i > max_idx) 3013eda14cbcSMatt Macy continue; 3014eda14cbcSMatt Macy 3015eda14cbcSMatt Macy if (segments != 0) { 3016eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, segments); 3017eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, i); 3018eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, 0); 3019eda14cbcSMatt Macy break; 3020eda14cbcSMatt Macy } 3021eda14cbcSMatt Macy } 3022eda14cbcSMatt Macy return (weight); 3023eda14cbcSMatt Macy } 3024eda14cbcSMatt Macy 3025eda14cbcSMatt Macy /* 3026eda14cbcSMatt Macy * Calculate the weight based on the on-disk histogram. Should be applied 3027eda14cbcSMatt Macy * only to unloaded metaslabs (i.e no incoming allocations) in-order to 3028eda14cbcSMatt Macy * give results consistent with the on-disk state 3029eda14cbcSMatt Macy */ 3030eda14cbcSMatt Macy static uint64_t 3031eda14cbcSMatt Macy metaslab_weight_from_spacemap(metaslab_t *msp) 3032eda14cbcSMatt Macy { 3033eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 3034eda14cbcSMatt Macy ASSERT(!msp->ms_loaded); 3035eda14cbcSMatt Macy ASSERT(sm != NULL); 3036eda14cbcSMatt Macy ASSERT3U(space_map_object(sm), !=, 0); 3037eda14cbcSMatt Macy ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3038eda14cbcSMatt Macy 3039eda14cbcSMatt Macy /* 3040eda14cbcSMatt Macy * Create a joint histogram from all the segments that have made 3041eda14cbcSMatt Macy * it to the metaslab's space map histogram, that are not yet 3042eda14cbcSMatt Macy * available for allocation because they are still in the freeing 3043eda14cbcSMatt Macy * pipeline (e.g. freeing, freed, and defer trees). Then subtract 3044eda14cbcSMatt Macy * these segments from the space map's histogram to get a more 3045eda14cbcSMatt Macy * accurate weight. 3046eda14cbcSMatt Macy */ 3047eda14cbcSMatt Macy uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0}; 3048eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 3049eda14cbcSMatt Macy deferspace_histogram[i] += msp->ms_synchist[i]; 3050eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3051eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 3052eda14cbcSMatt Macy deferspace_histogram[i] += msp->ms_deferhist[t][i]; 3053eda14cbcSMatt Macy } 3054eda14cbcSMatt Macy } 3055eda14cbcSMatt Macy 3056eda14cbcSMatt Macy uint64_t weight = 0; 3057eda14cbcSMatt Macy for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { 3058eda14cbcSMatt Macy ASSERT3U(sm->sm_phys->smp_histogram[i], >=, 3059eda14cbcSMatt Macy deferspace_histogram[i]); 3060eda14cbcSMatt Macy uint64_t count = 3061eda14cbcSMatt Macy sm->sm_phys->smp_histogram[i] - deferspace_histogram[i]; 3062eda14cbcSMatt Macy if (count != 0) { 3063eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, count); 3064eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, i + sm->sm_shift); 3065eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, 0); 3066eda14cbcSMatt Macy break; 3067eda14cbcSMatt Macy } 3068eda14cbcSMatt Macy } 3069eda14cbcSMatt Macy return (weight); 3070eda14cbcSMatt Macy } 3071eda14cbcSMatt Macy 3072eda14cbcSMatt Macy /* 3073eda14cbcSMatt Macy * Compute a segment-based weight for the specified metaslab. The weight 3074eda14cbcSMatt Macy * is determined by highest bucket in the histogram. The information 3075eda14cbcSMatt Macy * for the highest bucket is encoded into the weight value. 3076eda14cbcSMatt Macy */ 3077eda14cbcSMatt Macy static uint64_t 3078eda14cbcSMatt Macy metaslab_segment_weight(metaslab_t *msp) 3079eda14cbcSMatt Macy { 3080eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3081eda14cbcSMatt Macy uint64_t weight = 0; 3082eda14cbcSMatt Macy uint8_t shift = mg->mg_vd->vdev_ashift; 3083eda14cbcSMatt Macy 3084eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3085eda14cbcSMatt Macy 3086eda14cbcSMatt Macy /* 3087eda14cbcSMatt Macy * The metaslab is completely free. 3088eda14cbcSMatt Macy */ 3089eda14cbcSMatt Macy if (metaslab_allocated_space(msp) == 0) { 3090eda14cbcSMatt Macy int idx = highbit64(msp->ms_size) - 1; 3091eda14cbcSMatt Macy int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3092eda14cbcSMatt Macy 3093eda14cbcSMatt Macy if (idx < max_idx) { 3094eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, 1ULL); 3095eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, idx); 3096eda14cbcSMatt Macy } else { 3097eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); 3098eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, max_idx); 3099eda14cbcSMatt Macy } 3100eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, 0); 3101eda14cbcSMatt Macy ASSERT(!WEIGHT_IS_SPACEBASED(weight)); 3102eda14cbcSMatt Macy return (weight); 3103eda14cbcSMatt Macy } 3104eda14cbcSMatt Macy 3105eda14cbcSMatt Macy ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3106eda14cbcSMatt Macy 3107eda14cbcSMatt Macy /* 3108eda14cbcSMatt Macy * If the metaslab is fully allocated then just make the weight 0. 3109eda14cbcSMatt Macy */ 3110eda14cbcSMatt Macy if (metaslab_allocated_space(msp) == msp->ms_size) 3111eda14cbcSMatt Macy return (0); 3112eda14cbcSMatt Macy /* 3113eda14cbcSMatt Macy * If the metaslab is already loaded, then use the range tree to 3114eda14cbcSMatt Macy * determine the weight. Otherwise, we rely on the space map information 3115eda14cbcSMatt Macy * to generate the weight. 3116eda14cbcSMatt Macy */ 3117eda14cbcSMatt Macy if (msp->ms_loaded) { 3118eda14cbcSMatt Macy weight = metaslab_weight_from_range_tree(msp); 3119eda14cbcSMatt Macy } else { 3120eda14cbcSMatt Macy weight = metaslab_weight_from_spacemap(msp); 3121eda14cbcSMatt Macy } 3122eda14cbcSMatt Macy 3123eda14cbcSMatt Macy /* 3124eda14cbcSMatt Macy * If the metaslab was active the last time we calculated its weight 3125eda14cbcSMatt Macy * then keep it active. We want to consume the entire region that 3126eda14cbcSMatt Macy * is associated with this weight. 3127eda14cbcSMatt Macy */ 3128eda14cbcSMatt Macy if (msp->ms_activation_weight != 0 && weight != 0) 3129eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); 3130eda14cbcSMatt Macy return (weight); 3131eda14cbcSMatt Macy } 3132eda14cbcSMatt Macy 3133eda14cbcSMatt Macy /* 3134eda14cbcSMatt Macy * Determine if we should attempt to allocate from this metaslab. If the 3135eda14cbcSMatt Macy * metaslab is loaded, then we can determine if the desired allocation 3136eda14cbcSMatt Macy * can be satisfied by looking at the size of the maximum free segment 3137eda14cbcSMatt Macy * on that metaslab. Otherwise, we make our decision based on the metaslab's 3138eda14cbcSMatt Macy * weight. For segment-based weighting we can determine the maximum 3139eda14cbcSMatt Macy * allocation based on the index encoded in its value. For space-based 3140eda14cbcSMatt Macy * weights we rely on the entire weight (excluding the weight-type bit). 3141eda14cbcSMatt Macy */ 3142eda14cbcSMatt Macy static boolean_t 3143eda14cbcSMatt Macy metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard) 3144eda14cbcSMatt Macy { 3145eda14cbcSMatt Macy /* 3146eda14cbcSMatt Macy * If the metaslab is loaded, ms_max_size is definitive and we can use 3147eda14cbcSMatt Macy * the fast check. If it's not, the ms_max_size is a lower bound (once 3148eda14cbcSMatt Macy * set), and we should use the fast check as long as we're not in 3149eda14cbcSMatt Macy * try_hard and it's been less than zfs_metaslab_max_size_cache_sec 3150eda14cbcSMatt Macy * seconds since the metaslab was unloaded. 3151eda14cbcSMatt Macy */ 3152eda14cbcSMatt Macy if (msp->ms_loaded || 3153eda14cbcSMatt Macy (msp->ms_max_size != 0 && !try_hard && gethrtime() < 3154eda14cbcSMatt Macy msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec))) 3155eda14cbcSMatt Macy return (msp->ms_max_size >= asize); 3156eda14cbcSMatt Macy 3157eda14cbcSMatt Macy boolean_t should_allocate; 3158eda14cbcSMatt Macy if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 3159eda14cbcSMatt Macy /* 3160eda14cbcSMatt Macy * The metaslab segment weight indicates segments in the 3161eda14cbcSMatt Macy * range [2^i, 2^(i+1)), where i is the index in the weight. 3162eda14cbcSMatt Macy * Since the asize might be in the middle of the range, we 3163eda14cbcSMatt Macy * should attempt the allocation if asize < 2^(i+1). 3164eda14cbcSMatt Macy */ 3165eda14cbcSMatt Macy should_allocate = (asize < 3166eda14cbcSMatt Macy 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); 3167eda14cbcSMatt Macy } else { 3168eda14cbcSMatt Macy should_allocate = (asize <= 3169eda14cbcSMatt Macy (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); 3170eda14cbcSMatt Macy } 3171eda14cbcSMatt Macy 3172eda14cbcSMatt Macy return (should_allocate); 3173eda14cbcSMatt Macy } 3174eda14cbcSMatt Macy 3175eda14cbcSMatt Macy static uint64_t 3176eda14cbcSMatt Macy metaslab_weight(metaslab_t *msp, boolean_t nodirty) 3177eda14cbcSMatt Macy { 3178eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 3179eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 3180eda14cbcSMatt Macy uint64_t weight; 3181eda14cbcSMatt Macy 3182eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3183eda14cbcSMatt Macy 3184eda14cbcSMatt Macy metaslab_set_fragmentation(msp, nodirty); 3185eda14cbcSMatt Macy 3186eda14cbcSMatt Macy /* 3187eda14cbcSMatt Macy * Update the maximum size. If the metaslab is loaded, this will 3188eda14cbcSMatt Macy * ensure that we get an accurate maximum size if newly freed space 3189eda14cbcSMatt Macy * has been added back into the free tree. If the metaslab is 3190eda14cbcSMatt Macy * unloaded, we check if there's a larger free segment in the 3191eda14cbcSMatt Macy * unflushed frees. This is a lower bound on the largest allocatable 3192eda14cbcSMatt Macy * segment size. Coalescing of adjacent entries may reveal larger 3193eda14cbcSMatt Macy * allocatable segments, but we aren't aware of those until loading 3194eda14cbcSMatt Macy * the space map into a range tree. 3195eda14cbcSMatt Macy */ 3196eda14cbcSMatt Macy if (msp->ms_loaded) { 3197eda14cbcSMatt Macy msp->ms_max_size = metaslab_largest_allocatable(msp); 3198eda14cbcSMatt Macy } else { 3199eda14cbcSMatt Macy msp->ms_max_size = MAX(msp->ms_max_size, 3200eda14cbcSMatt Macy metaslab_largest_unflushed_free(msp)); 3201eda14cbcSMatt Macy } 3202eda14cbcSMatt Macy 3203eda14cbcSMatt Macy /* 3204eda14cbcSMatt Macy * Segment-based weighting requires space map histogram support. 3205eda14cbcSMatt Macy */ 3206eda14cbcSMatt Macy if (zfs_metaslab_segment_weight_enabled && 3207eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 3208eda14cbcSMatt Macy (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == 3209eda14cbcSMatt Macy sizeof (space_map_phys_t))) { 3210eda14cbcSMatt Macy weight = metaslab_segment_weight(msp); 3211eda14cbcSMatt Macy } else { 3212eda14cbcSMatt Macy weight = metaslab_space_weight(msp); 3213eda14cbcSMatt Macy } 3214eda14cbcSMatt Macy return (weight); 3215eda14cbcSMatt Macy } 3216eda14cbcSMatt Macy 3217eda14cbcSMatt Macy void 3218eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(metaslab_t *msp) 3219eda14cbcSMatt Macy { 3220eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3221eda14cbcSMatt Macy 3222eda14cbcSMatt Macy /* note: we preserve the mask (e.g. indication of primary, etc..) */ 3223eda14cbcSMatt Macy uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 3224eda14cbcSMatt Macy metaslab_group_sort(msp->ms_group, msp, 3225eda14cbcSMatt Macy metaslab_weight(msp, B_FALSE) | was_active); 3226eda14cbcSMatt Macy } 3227eda14cbcSMatt Macy 3228eda14cbcSMatt Macy static int 3229eda14cbcSMatt Macy metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3230eda14cbcSMatt Macy int allocator, uint64_t activation_weight) 3231eda14cbcSMatt Macy { 3232eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 3233eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3234eda14cbcSMatt Macy 3235eda14cbcSMatt Macy /* 3236eda14cbcSMatt Macy * If we're activating for the claim code, we don't want to actually 3237eda14cbcSMatt Macy * set the metaslab up for a specific allocator. 3238eda14cbcSMatt Macy */ 3239eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_CLAIM) { 3240eda14cbcSMatt Macy ASSERT0(msp->ms_activation_weight); 3241eda14cbcSMatt Macy msp->ms_activation_weight = msp->ms_weight; 3242eda14cbcSMatt Macy metaslab_group_sort(mg, msp, msp->ms_weight | 3243eda14cbcSMatt Macy activation_weight); 3244eda14cbcSMatt Macy return (0); 3245eda14cbcSMatt Macy } 3246eda14cbcSMatt Macy 3247eda14cbcSMatt Macy metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ? 3248eda14cbcSMatt Macy &mga->mga_primary : &mga->mga_secondary); 3249eda14cbcSMatt Macy 3250eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 3251eda14cbcSMatt Macy if (*mspp != NULL) { 3252eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3253eda14cbcSMatt Macy return (EEXIST); 3254eda14cbcSMatt Macy } 3255eda14cbcSMatt Macy 3256eda14cbcSMatt Macy *mspp = msp; 3257eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, -1); 3258eda14cbcSMatt Macy msp->ms_allocator = allocator; 3259eda14cbcSMatt Macy msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY); 3260eda14cbcSMatt Macy 3261eda14cbcSMatt Macy ASSERT0(msp->ms_activation_weight); 3262eda14cbcSMatt Macy msp->ms_activation_weight = msp->ms_weight; 3263eda14cbcSMatt Macy metaslab_group_sort_impl(mg, msp, 3264eda14cbcSMatt Macy msp->ms_weight | activation_weight); 3265eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3266eda14cbcSMatt Macy 3267eda14cbcSMatt Macy return (0); 3268eda14cbcSMatt Macy } 3269eda14cbcSMatt Macy 3270eda14cbcSMatt Macy static int 3271eda14cbcSMatt Macy metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight) 3272eda14cbcSMatt Macy { 3273eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3274eda14cbcSMatt Macy 3275eda14cbcSMatt Macy /* 3276eda14cbcSMatt Macy * The current metaslab is already activated for us so there 3277eda14cbcSMatt Macy * is nothing to do. Already activated though, doesn't mean 3278eda14cbcSMatt Macy * that this metaslab is activated for our allocator nor our 3279eda14cbcSMatt Macy * requested activation weight. The metaslab could have started 3280eda14cbcSMatt Macy * as an active one for our allocator but changed allocators 3281eda14cbcSMatt Macy * while we were waiting to grab its ms_lock or we stole it 3282eda14cbcSMatt Macy * [see find_valid_metaslab()]. This means that there is a 3283eda14cbcSMatt Macy * possibility of passivating a metaslab of another allocator 3284eda14cbcSMatt Macy * or from a different activation mask, from this thread. 3285eda14cbcSMatt Macy */ 3286eda14cbcSMatt Macy if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3287eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3288eda14cbcSMatt Macy return (0); 3289eda14cbcSMatt Macy } 3290eda14cbcSMatt Macy 3291eda14cbcSMatt Macy int error = metaslab_load(msp); 3292eda14cbcSMatt Macy if (error != 0) { 3293eda14cbcSMatt Macy metaslab_group_sort(msp->ms_group, msp, 0); 3294eda14cbcSMatt Macy return (error); 3295eda14cbcSMatt Macy } 3296eda14cbcSMatt Macy 3297eda14cbcSMatt Macy /* 3298eda14cbcSMatt Macy * When entering metaslab_load() we may have dropped the 3299eda14cbcSMatt Macy * ms_lock because we were loading this metaslab, or we 3300eda14cbcSMatt Macy * were waiting for another thread to load it for us. In 3301eda14cbcSMatt Macy * that scenario, we recheck the weight of the metaslab 3302eda14cbcSMatt Macy * to see if it was activated by another thread. 3303eda14cbcSMatt Macy * 3304eda14cbcSMatt Macy * If the metaslab was activated for another allocator or 3305eda14cbcSMatt Macy * it was activated with a different activation weight (e.g. 3306eda14cbcSMatt Macy * we wanted to make it a primary but it was activated as 3307eda14cbcSMatt Macy * secondary) we return error (EBUSY). 3308eda14cbcSMatt Macy * 3309eda14cbcSMatt Macy * If the metaslab was activated for the same allocator 3310eda14cbcSMatt Macy * and requested activation mask, skip activating it. 3311eda14cbcSMatt Macy */ 3312eda14cbcSMatt Macy if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3313eda14cbcSMatt Macy if (msp->ms_allocator != allocator) 3314eda14cbcSMatt Macy return (EBUSY); 3315eda14cbcSMatt Macy 3316eda14cbcSMatt Macy if ((msp->ms_weight & activation_weight) == 0) 3317eda14cbcSMatt Macy return (SET_ERROR(EBUSY)); 3318eda14cbcSMatt Macy 3319eda14cbcSMatt Macy EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY), 3320eda14cbcSMatt Macy msp->ms_primary); 3321eda14cbcSMatt Macy return (0); 3322eda14cbcSMatt Macy } 3323eda14cbcSMatt Macy 3324eda14cbcSMatt Macy /* 3325eda14cbcSMatt Macy * If the metaslab has literally 0 space, it will have weight 0. In 3326eda14cbcSMatt Macy * that case, don't bother activating it. This can happen if the 3327eda14cbcSMatt Macy * metaslab had space during find_valid_metaslab, but another thread 3328eda14cbcSMatt Macy * loaded it and used all that space while we were waiting to grab the 3329eda14cbcSMatt Macy * lock. 3330eda14cbcSMatt Macy */ 3331eda14cbcSMatt Macy if (msp->ms_weight == 0) { 3332eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocatable)); 3333eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 3334eda14cbcSMatt Macy } 3335eda14cbcSMatt Macy 3336eda14cbcSMatt Macy if ((error = metaslab_activate_allocator(msp->ms_group, msp, 3337eda14cbcSMatt Macy allocator, activation_weight)) != 0) { 3338eda14cbcSMatt Macy return (error); 3339eda14cbcSMatt Macy } 3340eda14cbcSMatt Macy 3341eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3342eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 3343eda14cbcSMatt Macy 3344eda14cbcSMatt Macy return (0); 3345eda14cbcSMatt Macy } 3346eda14cbcSMatt Macy 3347eda14cbcSMatt Macy static void 3348eda14cbcSMatt Macy metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3349eda14cbcSMatt Macy uint64_t weight) 3350eda14cbcSMatt Macy { 3351eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3352eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3353eda14cbcSMatt Macy 3354eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 3355eda14cbcSMatt Macy metaslab_group_sort(mg, msp, weight); 3356eda14cbcSMatt Macy return; 3357eda14cbcSMatt Macy } 3358eda14cbcSMatt Macy 3359eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 3360eda14cbcSMatt Macy ASSERT3P(msp->ms_group, ==, mg); 3361eda14cbcSMatt Macy ASSERT3S(0, <=, msp->ms_allocator); 3362eda14cbcSMatt Macy ASSERT3U(msp->ms_allocator, <, mg->mg_allocators); 3363eda14cbcSMatt Macy 3364eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator]; 3365eda14cbcSMatt Macy if (msp->ms_primary) { 3366eda14cbcSMatt Macy ASSERT3P(mga->mga_primary, ==, msp); 3367eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 3368eda14cbcSMatt Macy mga->mga_primary = NULL; 3369eda14cbcSMatt Macy } else { 3370eda14cbcSMatt Macy ASSERT3P(mga->mga_secondary, ==, msp); 3371eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 3372eda14cbcSMatt Macy mga->mga_secondary = NULL; 3373eda14cbcSMatt Macy } 3374eda14cbcSMatt Macy msp->ms_allocator = -1; 3375eda14cbcSMatt Macy metaslab_group_sort_impl(mg, msp, weight); 3376eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3377eda14cbcSMatt Macy } 3378eda14cbcSMatt Macy 3379eda14cbcSMatt Macy static void 3380eda14cbcSMatt Macy metaslab_passivate(metaslab_t *msp, uint64_t weight) 3381eda14cbcSMatt Macy { 3382eda14cbcSMatt Macy uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE; 3383eda14cbcSMatt Macy 3384eda14cbcSMatt Macy /* 3385eda14cbcSMatt Macy * If size < SPA_MINBLOCKSIZE, then we will not allocate from 3386eda14cbcSMatt Macy * this metaslab again. In that case, it had better be empty, 3387eda14cbcSMatt Macy * or we would be leaving space on the table. 3388eda14cbcSMatt Macy */ 3389eda14cbcSMatt Macy ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) || 3390eda14cbcSMatt Macy size >= SPA_MINBLOCKSIZE || 3391eda14cbcSMatt Macy range_tree_space(msp->ms_allocatable) == 0); 3392eda14cbcSMatt Macy ASSERT0(weight & METASLAB_ACTIVE_MASK); 3393eda14cbcSMatt Macy 3394eda14cbcSMatt Macy ASSERT(msp->ms_activation_weight != 0); 3395eda14cbcSMatt Macy msp->ms_activation_weight = 0; 3396eda14cbcSMatt Macy metaslab_passivate_allocator(msp->ms_group, msp, weight); 3397eda14cbcSMatt Macy ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK); 3398eda14cbcSMatt Macy } 3399eda14cbcSMatt Macy 3400eda14cbcSMatt Macy /* 3401eda14cbcSMatt Macy * Segment-based metaslabs are activated once and remain active until 3402eda14cbcSMatt Macy * we either fail an allocation attempt (similar to space-based metaslabs) 3403eda14cbcSMatt Macy * or have exhausted the free space in zfs_metaslab_switch_threshold 3404eda14cbcSMatt Macy * buckets since the metaslab was activated. This function checks to see 3405eda14cbcSMatt Macy * if we've exhausted the zfs_metaslab_switch_threshold buckets in the 3406eda14cbcSMatt Macy * metaslab and passivates it proactively. This will allow us to select a 3407eda14cbcSMatt Macy * metaslab with a larger contiguous region, if any, remaining within this 3408eda14cbcSMatt Macy * metaslab group. If we're in sync pass > 1, then we continue using this 3409eda14cbcSMatt Macy * metaslab so that we don't dirty more block and cause more sync passes. 3410eda14cbcSMatt Macy */ 3411eda14cbcSMatt Macy static void 3412eda14cbcSMatt Macy metaslab_segment_may_passivate(metaslab_t *msp) 3413eda14cbcSMatt Macy { 3414eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3415eda14cbcSMatt Macy 3416eda14cbcSMatt Macy if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) 3417eda14cbcSMatt Macy return; 3418eda14cbcSMatt Macy 3419eda14cbcSMatt Macy /* 3420eda14cbcSMatt Macy * Since we are in the middle of a sync pass, the most accurate 3421eda14cbcSMatt Macy * information that is accessible to us is the in-core range tree 3422eda14cbcSMatt Macy * histogram; calculate the new weight based on that information. 3423eda14cbcSMatt Macy */ 3424eda14cbcSMatt Macy uint64_t weight = metaslab_weight_from_range_tree(msp); 3425eda14cbcSMatt Macy int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); 3426eda14cbcSMatt Macy int current_idx = WEIGHT_GET_INDEX(weight); 3427eda14cbcSMatt Macy 3428eda14cbcSMatt Macy if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) 3429eda14cbcSMatt Macy metaslab_passivate(msp, weight); 3430eda14cbcSMatt Macy } 3431eda14cbcSMatt Macy 3432eda14cbcSMatt Macy static void 3433eda14cbcSMatt Macy metaslab_preload(void *arg) 3434eda14cbcSMatt Macy { 3435eda14cbcSMatt Macy metaslab_t *msp = arg; 3436eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 3437eda14cbcSMatt Macy spa_t *spa = mc->mc_spa; 3438eda14cbcSMatt Macy fstrans_cookie_t cookie = spl_fstrans_mark(); 3439eda14cbcSMatt Macy 3440eda14cbcSMatt Macy ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); 3441eda14cbcSMatt Macy 3442eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 3443eda14cbcSMatt Macy (void) metaslab_load(msp); 3444eda14cbcSMatt Macy metaslab_set_selected_txg(msp, spa_syncing_txg(spa)); 3445eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 3446eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 3447eda14cbcSMatt Macy } 3448eda14cbcSMatt Macy 3449eda14cbcSMatt Macy static void 3450eda14cbcSMatt Macy metaslab_group_preload(metaslab_group_t *mg) 3451eda14cbcSMatt Macy { 3452eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 3453eda14cbcSMatt Macy metaslab_t *msp; 3454eda14cbcSMatt Macy avl_tree_t *t = &mg->mg_metaslab_tree; 3455eda14cbcSMatt Macy int m = 0; 3456eda14cbcSMatt Macy 3457eda14cbcSMatt Macy if (spa_shutting_down(spa) || !metaslab_preload_enabled) { 3458eda14cbcSMatt Macy taskq_wait_outstanding(mg->mg_taskq, 0); 3459eda14cbcSMatt Macy return; 3460eda14cbcSMatt Macy } 3461eda14cbcSMatt Macy 3462eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 3463eda14cbcSMatt Macy 3464eda14cbcSMatt Macy /* 3465eda14cbcSMatt Macy * Load the next potential metaslabs 3466eda14cbcSMatt Macy */ 3467eda14cbcSMatt Macy for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { 3468eda14cbcSMatt Macy ASSERT3P(msp->ms_group, ==, mg); 3469eda14cbcSMatt Macy 3470eda14cbcSMatt Macy /* 3471eda14cbcSMatt Macy * We preload only the maximum number of metaslabs specified 3472eda14cbcSMatt Macy * by metaslab_preload_limit. If a metaslab is being forced 3473eda14cbcSMatt Macy * to condense then we preload it too. This will ensure 3474eda14cbcSMatt Macy * that force condensing happens in the next txg. 3475eda14cbcSMatt Macy */ 3476eda14cbcSMatt Macy if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { 3477eda14cbcSMatt Macy continue; 3478eda14cbcSMatt Macy } 3479eda14cbcSMatt Macy 3480eda14cbcSMatt Macy VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, 3481eda14cbcSMatt Macy msp, TQ_SLEEP) != TASKQID_INVALID); 3482eda14cbcSMatt Macy } 3483eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3484eda14cbcSMatt Macy } 3485eda14cbcSMatt Macy 3486eda14cbcSMatt Macy /* 3487eda14cbcSMatt Macy * Determine if the space map's on-disk footprint is past our tolerance for 3488eda14cbcSMatt Macy * inefficiency. We would like to use the following criteria to make our 3489eda14cbcSMatt Macy * decision: 3490eda14cbcSMatt Macy * 3491eda14cbcSMatt Macy * 1. Do not condense if the size of the space map object would dramatically 3492eda14cbcSMatt Macy * increase as a result of writing out the free space range tree. 3493eda14cbcSMatt Macy * 3494eda14cbcSMatt Macy * 2. Condense if the on on-disk space map representation is at least 3495eda14cbcSMatt Macy * zfs_condense_pct/100 times the size of the optimal representation 3496eda14cbcSMatt Macy * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB). 3497eda14cbcSMatt Macy * 3498eda14cbcSMatt Macy * 3. Do not condense if the on-disk size of the space map does not actually 3499eda14cbcSMatt Macy * decrease. 3500eda14cbcSMatt Macy * 3501eda14cbcSMatt Macy * Unfortunately, we cannot compute the on-disk size of the space map in this 3502eda14cbcSMatt Macy * context because we cannot accurately compute the effects of compression, etc. 3503eda14cbcSMatt Macy * Instead, we apply the heuristic described in the block comment for 3504eda14cbcSMatt Macy * zfs_metaslab_condense_block_threshold - we only condense if the space used 3505eda14cbcSMatt Macy * is greater than a threshold number of blocks. 3506eda14cbcSMatt Macy */ 3507eda14cbcSMatt Macy static boolean_t 3508eda14cbcSMatt Macy metaslab_should_condense(metaslab_t *msp) 3509eda14cbcSMatt Macy { 3510eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 3511eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 3512eda14cbcSMatt Macy uint64_t vdev_blocksize = 1 << vd->vdev_ashift; 3513eda14cbcSMatt Macy 3514eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3515eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3516eda14cbcSMatt Macy ASSERT(sm != NULL); 3517eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1); 3518eda14cbcSMatt Macy 3519eda14cbcSMatt Macy /* 3520eda14cbcSMatt Macy * We always condense metaslabs that are empty and metaslabs for 3521eda14cbcSMatt Macy * which a condense request has been made. 3522eda14cbcSMatt Macy */ 3523eda14cbcSMatt Macy if (range_tree_numsegs(msp->ms_allocatable) == 0 || 3524eda14cbcSMatt Macy msp->ms_condense_wanted) 3525eda14cbcSMatt Macy return (B_TRUE); 3526eda14cbcSMatt Macy 3527eda14cbcSMatt Macy uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize); 3528eda14cbcSMatt Macy uint64_t object_size = space_map_length(sm); 3529eda14cbcSMatt Macy uint64_t optimal_size = space_map_estimate_optimal_size(sm, 3530eda14cbcSMatt Macy msp->ms_allocatable, SM_NO_VDEVID); 3531eda14cbcSMatt Macy 3532eda14cbcSMatt Macy return (object_size >= (optimal_size * zfs_condense_pct / 100) && 3533eda14cbcSMatt Macy object_size > zfs_metaslab_condense_block_threshold * record_size); 3534eda14cbcSMatt Macy } 3535eda14cbcSMatt Macy 3536eda14cbcSMatt Macy /* 3537eda14cbcSMatt Macy * Condense the on-disk space map representation to its minimized form. 3538eda14cbcSMatt Macy * The minimized form consists of a small number of allocations followed 3539eda14cbcSMatt Macy * by the entries of the free range tree (ms_allocatable). The condensed 3540eda14cbcSMatt Macy * spacemap contains all the entries of previous TXGs (including those in 3541eda14cbcSMatt Macy * the pool-wide log spacemaps; thus this is effectively a superset of 3542eda14cbcSMatt Macy * metaslab_flush()), but this TXG's entries still need to be written. 3543eda14cbcSMatt Macy */ 3544eda14cbcSMatt Macy static void 3545eda14cbcSMatt Macy metaslab_condense(metaslab_t *msp, dmu_tx_t *tx) 3546eda14cbcSMatt Macy { 3547eda14cbcSMatt Macy range_tree_t *condense_tree; 3548eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 3549eda14cbcSMatt Macy uint64_t txg = dmu_tx_get_txg(tx); 3550eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3551eda14cbcSMatt Macy 3552eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3553eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3554eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 3555eda14cbcSMatt Macy 3556eda14cbcSMatt Macy /* 3557eda14cbcSMatt Macy * In order to condense the space map, we need to change it so it 3558eda14cbcSMatt Macy * only describes which segments are currently allocated and free. 3559eda14cbcSMatt Macy * 3560eda14cbcSMatt Macy * All the current free space resides in the ms_allocatable, all 3561eda14cbcSMatt Macy * the ms_defer trees, and all the ms_allocating trees. We ignore 3562eda14cbcSMatt Macy * ms_freed because it is empty because we're in sync pass 1. We 3563eda14cbcSMatt Macy * ignore ms_freeing because these changes are not yet reflected 3564eda14cbcSMatt Macy * in the spacemap (they will be written later this txg). 3565eda14cbcSMatt Macy * 3566eda14cbcSMatt Macy * So to truncate the space map to represent all the entries of 3567eda14cbcSMatt Macy * previous TXGs we do the following: 3568eda14cbcSMatt Macy * 3569eda14cbcSMatt Macy * 1] We create a range tree (condense tree) that is 100% empty. 3570eda14cbcSMatt Macy * 2] We add to it all segments found in the ms_defer trees 3571eda14cbcSMatt Macy * as those segments are marked as free in the original space 3572eda14cbcSMatt Macy * map. We do the same with the ms_allocating trees for the same 3573eda14cbcSMatt Macy * reason. Adding these segments should be a relatively 3574eda14cbcSMatt Macy * inexpensive operation since we expect these trees to have a 3575eda14cbcSMatt Macy * small number of nodes. 3576eda14cbcSMatt Macy * 3] We vacate any unflushed allocs, since they are not frees we 3577eda14cbcSMatt Macy * need to add to the condense tree. Then we vacate any 3578eda14cbcSMatt Macy * unflushed frees as they should already be part of ms_allocatable. 3579eda14cbcSMatt Macy * 4] At this point, we would ideally like to add all segments 3580eda14cbcSMatt Macy * in the ms_allocatable tree from the condense tree. This way 3581eda14cbcSMatt Macy * we would write all the entries of the condense tree as the 3582eda14cbcSMatt Macy * condensed space map, which would only contain freed 3583eda14cbcSMatt Macy * segments with everything else assumed to be allocated. 3584eda14cbcSMatt Macy * 3585eda14cbcSMatt Macy * Doing so can be prohibitively expensive as ms_allocatable can 3586eda14cbcSMatt Macy * be large, and therefore computationally expensive to add to 3587eda14cbcSMatt Macy * the condense_tree. Instead we first sync out an entry marking 3588eda14cbcSMatt Macy * everything as allocated, then the condense_tree and then the 3589eda14cbcSMatt Macy * ms_allocatable, in the condensed space map. While this is not 3590eda14cbcSMatt Macy * optimal, it is typically close to optimal and more importantly 3591eda14cbcSMatt Macy * much cheaper to compute. 3592eda14cbcSMatt Macy * 3593eda14cbcSMatt Macy * 5] Finally, as both of the unflushed trees were written to our 3594eda14cbcSMatt Macy * new and condensed metaslab space map, we basically flushed 3595eda14cbcSMatt Macy * all the unflushed changes to disk, thus we call 3596eda14cbcSMatt Macy * metaslab_flush_update(). 3597eda14cbcSMatt Macy */ 3598eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(spa), ==, 1); 3599eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */ 3600eda14cbcSMatt Macy 3601eda14cbcSMatt Macy zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, " 3602eda14cbcSMatt Macy "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg, 3603eda14cbcSMatt Macy msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id, 3604eda14cbcSMatt Macy spa->spa_name, space_map_length(msp->ms_sm), 3605eda14cbcSMatt Macy range_tree_numsegs(msp->ms_allocatable), 3606eda14cbcSMatt Macy msp->ms_condense_wanted ? "TRUE" : "FALSE"); 3607eda14cbcSMatt Macy 3608eda14cbcSMatt Macy msp->ms_condense_wanted = B_FALSE; 3609eda14cbcSMatt Macy 3610eda14cbcSMatt Macy range_seg_type_t type; 3611eda14cbcSMatt Macy uint64_t shift, start; 3612eda14cbcSMatt Macy type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp, 3613eda14cbcSMatt Macy &start, &shift); 3614eda14cbcSMatt Macy 3615eda14cbcSMatt Macy condense_tree = range_tree_create(NULL, type, NULL, start, shift); 3616eda14cbcSMatt Macy 3617eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3618eda14cbcSMatt Macy range_tree_walk(msp->ms_defer[t], 3619eda14cbcSMatt Macy range_tree_add, condense_tree); 3620eda14cbcSMatt Macy } 3621eda14cbcSMatt Macy 3622eda14cbcSMatt Macy for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 3623eda14cbcSMatt Macy range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], 3624eda14cbcSMatt Macy range_tree_add, condense_tree); 3625eda14cbcSMatt Macy } 3626eda14cbcSMatt Macy 3627eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 3628eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 3629eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 3630eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 3631eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 3632eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 3633eda14cbcSMatt Macy 3634eda14cbcSMatt Macy /* 3635eda14cbcSMatt Macy * We're about to drop the metaslab's lock thus allowing other 3636eda14cbcSMatt Macy * consumers to change it's content. Set the metaslab's ms_condensing 3637eda14cbcSMatt Macy * flag to ensure that allocations on this metaslab do not occur 3638eda14cbcSMatt Macy * while we're in the middle of committing it to disk. This is only 3639eda14cbcSMatt Macy * critical for ms_allocatable as all other range trees use per TXG 3640eda14cbcSMatt Macy * views of their content. 3641eda14cbcSMatt Macy */ 3642eda14cbcSMatt Macy msp->ms_condensing = B_TRUE; 3643eda14cbcSMatt Macy 3644eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 3645eda14cbcSMatt Macy uint64_t object = space_map_object(msp->ms_sm); 3646eda14cbcSMatt Macy space_map_truncate(sm, 3647eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 3648eda14cbcSMatt Macy zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx); 3649eda14cbcSMatt Macy 3650eda14cbcSMatt Macy /* 3651eda14cbcSMatt Macy * space_map_truncate() may have reallocated the spacemap object. 3652eda14cbcSMatt Macy * If so, update the vdev_ms_array. 3653eda14cbcSMatt Macy */ 3654eda14cbcSMatt Macy if (space_map_object(msp->ms_sm) != object) { 3655eda14cbcSMatt Macy object = space_map_object(msp->ms_sm); 3656eda14cbcSMatt Macy dmu_write(spa->spa_meta_objset, 3657eda14cbcSMatt Macy msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) * 3658eda14cbcSMatt Macy msp->ms_id, sizeof (uint64_t), &object, tx); 3659eda14cbcSMatt Macy } 3660eda14cbcSMatt Macy 3661eda14cbcSMatt Macy /* 3662eda14cbcSMatt Macy * Note: 3663eda14cbcSMatt Macy * When the log space map feature is enabled, each space map will 3664eda14cbcSMatt Macy * always have ALLOCS followed by FREES for each sync pass. This is 3665eda14cbcSMatt Macy * typically true even when the log space map feature is disabled, 3666eda14cbcSMatt Macy * except from the case where a metaslab goes through metaslab_sync() 3667eda14cbcSMatt Macy * and gets condensed. In that case the metaslab's space map will have 3668eda14cbcSMatt Macy * ALLOCS followed by FREES (due to condensing) followed by ALLOCS 3669eda14cbcSMatt Macy * followed by FREES (due to space_map_write() in metaslab_sync()) for 3670eda14cbcSMatt Macy * sync pass 1. 3671eda14cbcSMatt Macy */ 3672eda14cbcSMatt Macy range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start, 3673eda14cbcSMatt Macy shift); 3674eda14cbcSMatt Macy range_tree_add(tmp_tree, msp->ms_start, msp->ms_size); 3675eda14cbcSMatt Macy space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx); 3676eda14cbcSMatt Macy space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx); 3677eda14cbcSMatt Macy space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx); 3678eda14cbcSMatt Macy 3679eda14cbcSMatt Macy range_tree_vacate(condense_tree, NULL, NULL); 3680eda14cbcSMatt Macy range_tree_destroy(condense_tree); 3681eda14cbcSMatt Macy range_tree_vacate(tmp_tree, NULL, NULL); 3682eda14cbcSMatt Macy range_tree_destroy(tmp_tree); 3683eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 3684eda14cbcSMatt Macy 3685eda14cbcSMatt Macy msp->ms_condensing = B_FALSE; 3686eda14cbcSMatt Macy metaslab_flush_update(msp, tx); 3687eda14cbcSMatt Macy } 3688eda14cbcSMatt Macy 3689eda14cbcSMatt Macy /* 3690eda14cbcSMatt Macy * Called when the metaslab has been flushed (its own spacemap now reflects 3691eda14cbcSMatt Macy * all the contents of the pool-wide spacemap log). Updates the metaslab's 3692eda14cbcSMatt Macy * metadata and any pool-wide related log space map data (e.g. summary, 3693eda14cbcSMatt Macy * obsolete logs, etc..) to reflect that. 3694eda14cbcSMatt Macy */ 3695eda14cbcSMatt Macy static void 3696eda14cbcSMatt Macy metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx) 3697eda14cbcSMatt Macy { 3698eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3699eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 3700eda14cbcSMatt Macy 3701eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3702eda14cbcSMatt Macy 3703eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(spa), ==, 1); 3704eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 3705eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 3706eda14cbcSMatt Macy 3707eda14cbcSMatt Macy /* 3708eda14cbcSMatt Macy * Just because a metaslab got flushed, that doesn't mean that 3709eda14cbcSMatt Macy * it will pass through metaslab_sync_done(). Thus, make sure to 3710eda14cbcSMatt Macy * update ms_synced_length here in case it doesn't. 3711eda14cbcSMatt Macy */ 3712eda14cbcSMatt Macy msp->ms_synced_length = space_map_length(msp->ms_sm); 3713eda14cbcSMatt Macy 3714eda14cbcSMatt Macy /* 3715eda14cbcSMatt Macy * We may end up here from metaslab_condense() without the 3716eda14cbcSMatt Macy * feature being active. In that case this is a no-op. 3717eda14cbcSMatt Macy */ 3718eda14cbcSMatt Macy if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 3719eda14cbcSMatt Macy return; 3720eda14cbcSMatt Macy 3721eda14cbcSMatt Macy ASSERT(spa_syncing_log_sm(spa) != NULL); 3722eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 3723eda14cbcSMatt Macy ASSERT(metaslab_unflushed_txg(msp) != 0); 3724eda14cbcSMatt Macy ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp); 3725eda14cbcSMatt Macy 3726eda14cbcSMatt Macy VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa)); 3727eda14cbcSMatt Macy 3728eda14cbcSMatt Macy /* update metaslab's position in our flushing tree */ 3729eda14cbcSMatt Macy uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp); 3730eda14cbcSMatt Macy mutex_enter(&spa->spa_flushed_ms_lock); 3731eda14cbcSMatt Macy avl_remove(&spa->spa_metaslabs_by_flushed, msp); 3732eda14cbcSMatt Macy metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3733eda14cbcSMatt Macy avl_add(&spa->spa_metaslabs_by_flushed, msp); 3734eda14cbcSMatt Macy mutex_exit(&spa->spa_flushed_ms_lock); 3735eda14cbcSMatt Macy 3736eda14cbcSMatt Macy /* update metaslab counts of spa_log_sm_t nodes */ 3737eda14cbcSMatt Macy spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg); 3738eda14cbcSMatt Macy spa_log_sm_increment_current_mscount(spa); 3739eda14cbcSMatt Macy 3740eda14cbcSMatt Macy /* cleanup obsolete logs if any */ 3741eda14cbcSMatt Macy uint64_t log_blocks_before = spa_log_sm_nblocks(spa); 3742eda14cbcSMatt Macy spa_cleanup_old_sm_logs(spa, tx); 3743eda14cbcSMatt Macy uint64_t log_blocks_after = spa_log_sm_nblocks(spa); 3744eda14cbcSMatt Macy VERIFY3U(log_blocks_after, <=, log_blocks_before); 3745eda14cbcSMatt Macy 3746eda14cbcSMatt Macy /* update log space map summary */ 3747eda14cbcSMatt Macy uint64_t blocks_gone = log_blocks_before - log_blocks_after; 3748eda14cbcSMatt Macy spa_log_summary_add_flushed_metaslab(spa); 3749eda14cbcSMatt Macy spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg); 3750eda14cbcSMatt Macy spa_log_summary_decrement_blkcount(spa, blocks_gone); 3751eda14cbcSMatt Macy } 3752eda14cbcSMatt Macy 3753eda14cbcSMatt Macy boolean_t 3754eda14cbcSMatt Macy metaslab_flush(metaslab_t *msp, dmu_tx_t *tx) 3755eda14cbcSMatt Macy { 3756eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3757eda14cbcSMatt Macy 3758eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3759eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(spa), ==, 1); 3760eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 3761eda14cbcSMatt Macy 3762eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 3763eda14cbcSMatt Macy ASSERT(metaslab_unflushed_txg(msp) != 0); 3764eda14cbcSMatt Macy ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL); 3765eda14cbcSMatt Macy 3766eda14cbcSMatt Macy /* 3767eda14cbcSMatt Macy * There is nothing wrong with flushing the same metaslab twice, as 3768eda14cbcSMatt Macy * this codepath should work on that case. However, the current 3769eda14cbcSMatt Macy * flushing scheme makes sure to avoid this situation as we would be 3770eda14cbcSMatt Macy * making all these calls without having anything meaningful to write 3771eda14cbcSMatt Macy * to disk. We assert this behavior here. 3772eda14cbcSMatt Macy */ 3773eda14cbcSMatt Macy ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx)); 3774eda14cbcSMatt Macy 3775eda14cbcSMatt Macy /* 3776eda14cbcSMatt Macy * We can not flush while loading, because then we would 3777eda14cbcSMatt Macy * not load the ms_unflushed_{allocs,frees}. 3778eda14cbcSMatt Macy */ 3779eda14cbcSMatt Macy if (msp->ms_loading) 3780eda14cbcSMatt Macy return (B_FALSE); 3781eda14cbcSMatt Macy 3782eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3783eda14cbcSMatt Macy metaslab_verify_weight_and_frag(msp); 3784eda14cbcSMatt Macy 3785eda14cbcSMatt Macy /* 3786eda14cbcSMatt Macy * Metaslab condensing is effectively flushing. Therefore if the 3787eda14cbcSMatt Macy * metaslab can be condensed we can just condense it instead of 3788eda14cbcSMatt Macy * flushing it. 3789eda14cbcSMatt Macy * 3790eda14cbcSMatt Macy * Note that metaslab_condense() does call metaslab_flush_update() 3791eda14cbcSMatt Macy * so we can just return immediately after condensing. We also 3792eda14cbcSMatt Macy * don't need to care about setting ms_flushing or broadcasting 3793eda14cbcSMatt Macy * ms_flush_cv, even if we temporarily drop the ms_lock in 3794eda14cbcSMatt Macy * metaslab_condense(), as the metaslab is already loaded. 3795eda14cbcSMatt Macy */ 3796eda14cbcSMatt Macy if (msp->ms_loaded && metaslab_should_condense(msp)) { 3797eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3798eda14cbcSMatt Macy 3799eda14cbcSMatt Macy /* 3800eda14cbcSMatt Macy * For all histogram operations below refer to the 3801eda14cbcSMatt Macy * comments of metaslab_sync() where we follow a 3802eda14cbcSMatt Macy * similar procedure. 3803eda14cbcSMatt Macy */ 3804eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 3805eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 3806eda14cbcSMatt Macy metaslab_group_histogram_remove(mg, msp); 3807eda14cbcSMatt Macy 3808eda14cbcSMatt Macy metaslab_condense(msp, tx); 3809eda14cbcSMatt Macy 3810eda14cbcSMatt Macy space_map_histogram_clear(msp->ms_sm); 3811eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 3812eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_freed)); 3813eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3814eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, 3815eda14cbcSMatt Macy msp->ms_defer[t], tx); 3816eda14cbcSMatt Macy } 3817eda14cbcSMatt Macy metaslab_aux_histograms_update(msp); 3818eda14cbcSMatt Macy 3819eda14cbcSMatt Macy metaslab_group_histogram_add(mg, msp); 3820eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 3821eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 3822eda14cbcSMatt Macy 3823eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3824eda14cbcSMatt Macy 3825eda14cbcSMatt Macy /* 3826eda14cbcSMatt Macy * Since we recreated the histogram (and potentially 3827eda14cbcSMatt Macy * the ms_sm too while condensing) ensure that the 3828eda14cbcSMatt Macy * weight is updated too because we are not guaranteed 3829eda14cbcSMatt Macy * that this metaslab is dirty and will go through 3830eda14cbcSMatt Macy * metaslab_sync_done(). 3831eda14cbcSMatt Macy */ 3832eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 3833eda14cbcSMatt Macy return (B_TRUE); 3834eda14cbcSMatt Macy } 3835eda14cbcSMatt Macy 3836eda14cbcSMatt Macy msp->ms_flushing = B_TRUE; 3837eda14cbcSMatt Macy uint64_t sm_len_before = space_map_length(msp->ms_sm); 3838eda14cbcSMatt Macy 3839eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 3840eda14cbcSMatt Macy space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC, 3841eda14cbcSMatt Macy SM_NO_VDEVID, tx); 3842eda14cbcSMatt Macy space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE, 3843eda14cbcSMatt Macy SM_NO_VDEVID, tx); 3844eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 3845eda14cbcSMatt Macy 3846eda14cbcSMatt Macy uint64_t sm_len_after = space_map_length(msp->ms_sm); 3847eda14cbcSMatt Macy if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) { 3848eda14cbcSMatt Macy zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, " 3849eda14cbcSMatt Macy "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, " 3850eda14cbcSMatt Macy "appended %llu bytes", dmu_tx_get_txg(tx), spa_name(spa), 3851eda14cbcSMatt Macy msp->ms_group->mg_vd->vdev_id, msp->ms_id, 3852eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_allocs), 3853eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_frees), 3854eda14cbcSMatt Macy (sm_len_after - sm_len_before)); 3855eda14cbcSMatt Macy } 3856eda14cbcSMatt Macy 3857eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 3858eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 3859eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 3860eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 3861eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 3862eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 3863eda14cbcSMatt Macy 3864eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3865eda14cbcSMatt Macy metaslab_verify_weight_and_frag(msp); 3866eda14cbcSMatt Macy 3867eda14cbcSMatt Macy metaslab_flush_update(msp, tx); 3868eda14cbcSMatt Macy 3869eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3870eda14cbcSMatt Macy metaslab_verify_weight_and_frag(msp); 3871eda14cbcSMatt Macy 3872eda14cbcSMatt Macy msp->ms_flushing = B_FALSE; 3873eda14cbcSMatt Macy cv_broadcast(&msp->ms_flush_cv); 3874eda14cbcSMatt Macy return (B_TRUE); 3875eda14cbcSMatt Macy } 3876eda14cbcSMatt Macy 3877eda14cbcSMatt Macy /* 3878eda14cbcSMatt Macy * Write a metaslab to disk in the context of the specified transaction group. 3879eda14cbcSMatt Macy */ 3880eda14cbcSMatt Macy void 3881eda14cbcSMatt Macy metaslab_sync(metaslab_t *msp, uint64_t txg) 3882eda14cbcSMatt Macy { 3883eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3884eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 3885eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 3886eda14cbcSMatt Macy objset_t *mos = spa_meta_objset(spa); 3887eda14cbcSMatt Macy range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; 3888eda14cbcSMatt Macy dmu_tx_t *tx; 3889eda14cbcSMatt Macy 3890eda14cbcSMatt Macy ASSERT(!vd->vdev_ishole); 3891eda14cbcSMatt Macy 3892eda14cbcSMatt Macy /* 3893eda14cbcSMatt Macy * This metaslab has just been added so there's no work to do now. 3894eda14cbcSMatt Macy */ 3895eda14cbcSMatt Macy if (msp->ms_freeing == NULL) { 3896eda14cbcSMatt Macy ASSERT3P(alloctree, ==, NULL); 3897eda14cbcSMatt Macy return; 3898eda14cbcSMatt Macy } 3899eda14cbcSMatt Macy 3900eda14cbcSMatt Macy ASSERT3P(alloctree, !=, NULL); 3901eda14cbcSMatt Macy ASSERT3P(msp->ms_freeing, !=, NULL); 3902eda14cbcSMatt Macy ASSERT3P(msp->ms_freed, !=, NULL); 3903eda14cbcSMatt Macy ASSERT3P(msp->ms_checkpointing, !=, NULL); 3904eda14cbcSMatt Macy ASSERT3P(msp->ms_trim, !=, NULL); 3905eda14cbcSMatt Macy 3906eda14cbcSMatt Macy /* 3907eda14cbcSMatt Macy * Normally, we don't want to process a metaslab if there are no 3908eda14cbcSMatt Macy * allocations or frees to perform. However, if the metaslab is being 3909eda14cbcSMatt Macy * forced to condense, it's loaded and we're not beyond the final 3910eda14cbcSMatt Macy * dirty txg, we need to let it through. Not condensing beyond the 3911eda14cbcSMatt Macy * final dirty txg prevents an issue where metaslabs that need to be 3912eda14cbcSMatt Macy * condensed but were loaded for other reasons could cause a panic 3913eda14cbcSMatt Macy * here. By only checking the txg in that branch of the conditional, 3914eda14cbcSMatt Macy * we preserve the utility of the VERIFY statements in all other 3915eda14cbcSMatt Macy * cases. 3916eda14cbcSMatt Macy */ 3917eda14cbcSMatt Macy if (range_tree_is_empty(alloctree) && 3918eda14cbcSMatt Macy range_tree_is_empty(msp->ms_freeing) && 3919eda14cbcSMatt Macy range_tree_is_empty(msp->ms_checkpointing) && 3920eda14cbcSMatt Macy !(msp->ms_loaded && msp->ms_condense_wanted && 3921eda14cbcSMatt Macy txg <= spa_final_dirty_txg(spa))) 3922eda14cbcSMatt Macy return; 3923eda14cbcSMatt Macy 3924eda14cbcSMatt Macy 3925eda14cbcSMatt Macy VERIFY3U(txg, <=, spa_final_dirty_txg(spa)); 3926eda14cbcSMatt Macy 3927eda14cbcSMatt Macy /* 3928eda14cbcSMatt Macy * The only state that can actually be changing concurrently 3929eda14cbcSMatt Macy * with metaslab_sync() is the metaslab's ms_allocatable. No 3930eda14cbcSMatt Macy * other thread can be modifying this txg's alloc, freeing, 3931eda14cbcSMatt Macy * freed, or space_map_phys_t. We drop ms_lock whenever we 3932eda14cbcSMatt Macy * could call into the DMU, because the DMU can call down to 3933eda14cbcSMatt Macy * us (e.g. via zio_free()) at any time. 3934eda14cbcSMatt Macy * 3935eda14cbcSMatt Macy * The spa_vdev_remove_thread() can be reading metaslab state 3936eda14cbcSMatt Macy * concurrently, and it is locked out by the ms_sync_lock. 3937eda14cbcSMatt Macy * Note that the ms_lock is insufficient for this, because it 3938eda14cbcSMatt Macy * is dropped by space_map_write(). 3939eda14cbcSMatt Macy */ 3940eda14cbcSMatt Macy tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 3941eda14cbcSMatt Macy 3942eda14cbcSMatt Macy /* 3943eda14cbcSMatt Macy * Generate a log space map if one doesn't exist already. 3944eda14cbcSMatt Macy */ 3945eda14cbcSMatt Macy spa_generate_syncing_log_sm(spa, tx); 3946eda14cbcSMatt Macy 3947eda14cbcSMatt Macy if (msp->ms_sm == NULL) { 3948eda14cbcSMatt Macy uint64_t new_object = space_map_alloc(mos, 3949eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 3950eda14cbcSMatt Macy zfs_metaslab_sm_blksz_with_log : 3951eda14cbcSMatt Macy zfs_metaslab_sm_blksz_no_log, tx); 3952eda14cbcSMatt Macy VERIFY3U(new_object, !=, 0); 3953eda14cbcSMatt Macy 3954eda14cbcSMatt Macy dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 3955eda14cbcSMatt Macy msp->ms_id, sizeof (uint64_t), &new_object, tx); 3956eda14cbcSMatt Macy 3957eda14cbcSMatt Macy VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, 3958eda14cbcSMatt Macy msp->ms_start, msp->ms_size, vd->vdev_ashift)); 3959eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 3960eda14cbcSMatt Macy 3961eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 3962eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 3963eda14cbcSMatt Macy ASSERT0(metaslab_allocated_space(msp)); 3964eda14cbcSMatt Macy } 3965eda14cbcSMatt Macy 3966eda14cbcSMatt Macy if (metaslab_unflushed_txg(msp) == 0 && 3967eda14cbcSMatt Macy spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) { 3968eda14cbcSMatt Macy ASSERT(spa_syncing_log_sm(spa) != NULL); 3969eda14cbcSMatt Macy 3970eda14cbcSMatt Macy metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3971eda14cbcSMatt Macy spa_log_sm_increment_current_mscount(spa); 3972eda14cbcSMatt Macy spa_log_summary_add_flushed_metaslab(spa); 3973eda14cbcSMatt Macy 3974eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 3975eda14cbcSMatt Macy mutex_enter(&spa->spa_flushed_ms_lock); 3976eda14cbcSMatt Macy avl_add(&spa->spa_metaslabs_by_flushed, msp); 3977eda14cbcSMatt Macy mutex_exit(&spa->spa_flushed_ms_lock); 3978eda14cbcSMatt Macy 3979eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 3980eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 3981eda14cbcSMatt Macy } 3982eda14cbcSMatt Macy 3983eda14cbcSMatt Macy if (!range_tree_is_empty(msp->ms_checkpointing) && 3984eda14cbcSMatt Macy vd->vdev_checkpoint_sm == NULL) { 3985eda14cbcSMatt Macy ASSERT(spa_has_checkpoint(spa)); 3986eda14cbcSMatt Macy 3987eda14cbcSMatt Macy uint64_t new_object = space_map_alloc(mos, 3988eda14cbcSMatt Macy zfs_vdev_standard_sm_blksz, tx); 3989eda14cbcSMatt Macy VERIFY3U(new_object, !=, 0); 3990eda14cbcSMatt Macy 3991eda14cbcSMatt Macy VERIFY0(space_map_open(&vd->vdev_checkpoint_sm, 3992eda14cbcSMatt Macy mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift)); 3993eda14cbcSMatt Macy ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 3994eda14cbcSMatt Macy 3995eda14cbcSMatt Macy /* 3996eda14cbcSMatt Macy * We save the space map object as an entry in vdev_top_zap 3997eda14cbcSMatt Macy * so it can be retrieved when the pool is reopened after an 3998eda14cbcSMatt Macy * export or through zdb. 3999eda14cbcSMatt Macy */ 4000eda14cbcSMatt Macy VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, 4001eda14cbcSMatt Macy vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 4002eda14cbcSMatt Macy sizeof (new_object), 1, &new_object, tx)); 4003eda14cbcSMatt Macy } 4004eda14cbcSMatt Macy 4005eda14cbcSMatt Macy mutex_enter(&msp->ms_sync_lock); 4006eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4007eda14cbcSMatt Macy 4008eda14cbcSMatt Macy /* 4009eda14cbcSMatt Macy * Note: metaslab_condense() clears the space map's histogram. 4010eda14cbcSMatt Macy * Therefore we must verify and remove this histogram before 4011eda14cbcSMatt Macy * condensing. 4012eda14cbcSMatt Macy */ 4013eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 4014eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 4015eda14cbcSMatt Macy metaslab_group_histogram_remove(mg, msp); 4016eda14cbcSMatt Macy 4017eda14cbcSMatt Macy if (spa->spa_sync_pass == 1 && msp->ms_loaded && 4018eda14cbcSMatt Macy metaslab_should_condense(msp)) 4019eda14cbcSMatt Macy metaslab_condense(msp, tx); 4020eda14cbcSMatt Macy 4021eda14cbcSMatt Macy /* 4022eda14cbcSMatt Macy * We'll be going to disk to sync our space accounting, thus we 4023eda14cbcSMatt Macy * drop the ms_lock during that time so allocations coming from 4024eda14cbcSMatt Macy * open-context (ZIL) for future TXGs do not block. 4025eda14cbcSMatt Macy */ 4026eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4027eda14cbcSMatt Macy space_map_t *log_sm = spa_syncing_log_sm(spa); 4028eda14cbcSMatt Macy if (log_sm != NULL) { 4029eda14cbcSMatt Macy ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4030eda14cbcSMatt Macy 4031eda14cbcSMatt Macy space_map_write(log_sm, alloctree, SM_ALLOC, 4032eda14cbcSMatt Macy vd->vdev_id, tx); 4033eda14cbcSMatt Macy space_map_write(log_sm, msp->ms_freeing, SM_FREE, 4034eda14cbcSMatt Macy vd->vdev_id, tx); 4035eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4036eda14cbcSMatt Macy 4037eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 4038eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 4039eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 4040eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 4041eda14cbcSMatt Macy range_tree_remove_xor_add(alloctree, 4042eda14cbcSMatt Macy msp->ms_unflushed_frees, msp->ms_unflushed_allocs); 4043eda14cbcSMatt Macy range_tree_remove_xor_add(msp->ms_freeing, 4044eda14cbcSMatt Macy msp->ms_unflushed_allocs, msp->ms_unflushed_frees); 4045eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused += 4046eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 4047eda14cbcSMatt Macy } else { 4048eda14cbcSMatt Macy ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4049eda14cbcSMatt Macy 4050eda14cbcSMatt Macy space_map_write(msp->ms_sm, alloctree, SM_ALLOC, 4051eda14cbcSMatt Macy SM_NO_VDEVID, tx); 4052eda14cbcSMatt Macy space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE, 4053eda14cbcSMatt Macy SM_NO_VDEVID, tx); 4054eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4055eda14cbcSMatt Macy } 4056eda14cbcSMatt Macy 4057eda14cbcSMatt Macy msp->ms_allocated_space += range_tree_space(alloctree); 4058eda14cbcSMatt Macy ASSERT3U(msp->ms_allocated_space, >=, 4059eda14cbcSMatt Macy range_tree_space(msp->ms_freeing)); 4060eda14cbcSMatt Macy msp->ms_allocated_space -= range_tree_space(msp->ms_freeing); 4061eda14cbcSMatt Macy 4062eda14cbcSMatt Macy if (!range_tree_is_empty(msp->ms_checkpointing)) { 4063eda14cbcSMatt Macy ASSERT(spa_has_checkpoint(spa)); 4064eda14cbcSMatt Macy ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 4065eda14cbcSMatt Macy 4066eda14cbcSMatt Macy /* 4067eda14cbcSMatt Macy * Since we are doing writes to disk and the ms_checkpointing 4068eda14cbcSMatt Macy * tree won't be changing during that time, we drop the 4069eda14cbcSMatt Macy * ms_lock while writing to the checkpoint space map, for the 4070eda14cbcSMatt Macy * same reason mentioned above. 4071eda14cbcSMatt Macy */ 4072eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4073eda14cbcSMatt Macy space_map_write(vd->vdev_checkpoint_sm, 4074eda14cbcSMatt Macy msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx); 4075eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4076eda14cbcSMatt Macy 4077eda14cbcSMatt Macy spa->spa_checkpoint_info.sci_dspace += 4078eda14cbcSMatt Macy range_tree_space(msp->ms_checkpointing); 4079eda14cbcSMatt Macy vd->vdev_stat.vs_checkpoint_space += 4080eda14cbcSMatt Macy range_tree_space(msp->ms_checkpointing); 4081eda14cbcSMatt Macy ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==, 4082eda14cbcSMatt Macy -space_map_allocated(vd->vdev_checkpoint_sm)); 4083eda14cbcSMatt Macy 4084eda14cbcSMatt Macy range_tree_vacate(msp->ms_checkpointing, NULL, NULL); 4085eda14cbcSMatt Macy } 4086eda14cbcSMatt Macy 4087eda14cbcSMatt Macy if (msp->ms_loaded) { 4088eda14cbcSMatt Macy /* 4089eda14cbcSMatt Macy * When the space map is loaded, we have an accurate 4090eda14cbcSMatt Macy * histogram in the range tree. This gives us an opportunity 4091eda14cbcSMatt Macy * to bring the space map's histogram up-to-date so we clear 4092eda14cbcSMatt Macy * it first before updating it. 4093eda14cbcSMatt Macy */ 4094eda14cbcSMatt Macy space_map_histogram_clear(msp->ms_sm); 4095eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 4096eda14cbcSMatt Macy 4097eda14cbcSMatt Macy /* 4098eda14cbcSMatt Macy * Since we've cleared the histogram we need to add back 4099eda14cbcSMatt Macy * any free space that has already been processed, plus 4100eda14cbcSMatt Macy * any deferred space. This allows the on-disk histogram 4101eda14cbcSMatt Macy * to accurately reflect all free space even if some space 4102eda14cbcSMatt Macy * is not yet available for allocation (i.e. deferred). 4103eda14cbcSMatt Macy */ 4104eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx); 4105eda14cbcSMatt Macy 4106eda14cbcSMatt Macy /* 4107eda14cbcSMatt Macy * Add back any deferred free space that has not been 4108eda14cbcSMatt Macy * added back into the in-core free tree yet. This will 4109eda14cbcSMatt Macy * ensure that we don't end up with a space map histogram 4110eda14cbcSMatt Macy * that is completely empty unless the metaslab is fully 4111eda14cbcSMatt Macy * allocated. 4112eda14cbcSMatt Macy */ 4113eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 4114eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, 4115eda14cbcSMatt Macy msp->ms_defer[t], tx); 4116eda14cbcSMatt Macy } 4117eda14cbcSMatt Macy } 4118eda14cbcSMatt Macy 4119eda14cbcSMatt Macy /* 4120eda14cbcSMatt Macy * Always add the free space from this sync pass to the space 4121eda14cbcSMatt Macy * map histogram. We want to make sure that the on-disk histogram 4122eda14cbcSMatt Macy * accounts for all free space. If the space map is not loaded, 4123eda14cbcSMatt Macy * then we will lose some accuracy but will correct it the next 4124eda14cbcSMatt Macy * time we load the space map. 4125eda14cbcSMatt Macy */ 4126eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx); 4127eda14cbcSMatt Macy metaslab_aux_histograms_update(msp); 4128eda14cbcSMatt Macy 4129eda14cbcSMatt Macy metaslab_group_histogram_add(mg, msp); 4130eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 4131eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 4132eda14cbcSMatt Macy 4133eda14cbcSMatt Macy /* 4134eda14cbcSMatt Macy * For sync pass 1, we avoid traversing this txg's free range tree 4135eda14cbcSMatt Macy * and instead will just swap the pointers for freeing and freed. 4136eda14cbcSMatt Macy * We can safely do this since the freed_tree is guaranteed to be 4137eda14cbcSMatt Macy * empty on the initial pass. 4138eda14cbcSMatt Macy * 4139eda14cbcSMatt Macy * Keep in mind that even if we are currently using a log spacemap 4140eda14cbcSMatt Macy * we want current frees to end up in the ms_allocatable (but not 4141eda14cbcSMatt Macy * get appended to the ms_sm) so their ranges can be reused as usual. 4142eda14cbcSMatt Macy */ 4143eda14cbcSMatt Macy if (spa_sync_pass(spa) == 1) { 4144eda14cbcSMatt Macy range_tree_swap(&msp->ms_freeing, &msp->ms_freed); 4145eda14cbcSMatt Macy ASSERT0(msp->ms_allocated_this_txg); 4146eda14cbcSMatt Macy } else { 4147eda14cbcSMatt Macy range_tree_vacate(msp->ms_freeing, 4148eda14cbcSMatt Macy range_tree_add, msp->ms_freed); 4149eda14cbcSMatt Macy } 4150eda14cbcSMatt Macy msp->ms_allocated_this_txg += range_tree_space(alloctree); 4151eda14cbcSMatt Macy range_tree_vacate(alloctree, NULL, NULL); 4152eda14cbcSMatt Macy 4153eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4154eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg) 4155eda14cbcSMatt Macy & TXG_MASK])); 4156eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freeing)); 4157eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_checkpointing)); 4158eda14cbcSMatt Macy 4159eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4160eda14cbcSMatt Macy 4161eda14cbcSMatt Macy /* 4162eda14cbcSMatt Macy * Verify that the space map object ID has been recorded in the 4163eda14cbcSMatt Macy * vdev_ms_array. 4164eda14cbcSMatt Macy */ 4165eda14cbcSMatt Macy uint64_t object; 4166eda14cbcSMatt Macy VERIFY0(dmu_read(mos, vd->vdev_ms_array, 4167eda14cbcSMatt Macy msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0)); 4168eda14cbcSMatt Macy VERIFY3U(object, ==, space_map_object(msp->ms_sm)); 4169eda14cbcSMatt Macy 4170eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 4171eda14cbcSMatt Macy dmu_tx_commit(tx); 4172eda14cbcSMatt Macy } 4173eda14cbcSMatt Macy 4174eda14cbcSMatt Macy static void 4175eda14cbcSMatt Macy metaslab_evict(metaslab_t *msp, uint64_t txg) 4176eda14cbcSMatt Macy { 4177eda14cbcSMatt Macy if (!msp->ms_loaded || msp->ms_disabled != 0) 4178eda14cbcSMatt Macy return; 4179eda14cbcSMatt Macy 4180eda14cbcSMatt Macy for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 4181eda14cbcSMatt Macy VERIFY0(range_tree_space( 4182eda14cbcSMatt Macy msp->ms_allocating[(txg + t) & TXG_MASK])); 4183eda14cbcSMatt Macy } 4184eda14cbcSMatt Macy if (msp->ms_allocator != -1) 4185eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK); 4186eda14cbcSMatt Macy 4187eda14cbcSMatt Macy if (!metaslab_debug_unload) 4188eda14cbcSMatt Macy metaslab_unload(msp); 4189eda14cbcSMatt Macy } 4190eda14cbcSMatt Macy 4191eda14cbcSMatt Macy /* 4192eda14cbcSMatt Macy * Called after a transaction group has completely synced to mark 4193eda14cbcSMatt Macy * all of the metaslab's free space as usable. 4194eda14cbcSMatt Macy */ 4195eda14cbcSMatt Macy void 4196eda14cbcSMatt Macy metaslab_sync_done(metaslab_t *msp, uint64_t txg) 4197eda14cbcSMatt Macy { 4198eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 4199eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 4200eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 4201eda14cbcSMatt Macy range_tree_t **defer_tree; 4202eda14cbcSMatt Macy int64_t alloc_delta, defer_delta; 4203eda14cbcSMatt Macy boolean_t defer_allowed = B_TRUE; 4204eda14cbcSMatt Macy 4205eda14cbcSMatt Macy ASSERT(!vd->vdev_ishole); 4206eda14cbcSMatt Macy 4207eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4208eda14cbcSMatt Macy 4209eda14cbcSMatt Macy /* 4210eda14cbcSMatt Macy * If this metaslab is just becoming available, initialize its 4211eda14cbcSMatt Macy * range trees and add its capacity to the vdev. 4212eda14cbcSMatt Macy */ 4213eda14cbcSMatt Macy if (msp->ms_freed == NULL) { 4214eda14cbcSMatt Macy range_seg_type_t type; 4215eda14cbcSMatt Macy uint64_t shift, start; 4216eda14cbcSMatt Macy type = metaslab_calculate_range_tree_type(vd, msp, &start, 4217eda14cbcSMatt Macy &shift); 4218eda14cbcSMatt Macy 4219eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) { 4220eda14cbcSMatt Macy ASSERT(msp->ms_allocating[t] == NULL); 4221eda14cbcSMatt Macy 4222eda14cbcSMatt Macy msp->ms_allocating[t] = range_tree_create(NULL, type, 4223eda14cbcSMatt Macy NULL, start, shift); 4224eda14cbcSMatt Macy } 4225eda14cbcSMatt Macy 4226eda14cbcSMatt Macy ASSERT3P(msp->ms_freeing, ==, NULL); 4227eda14cbcSMatt Macy msp->ms_freeing = range_tree_create(NULL, type, NULL, start, 4228eda14cbcSMatt Macy shift); 4229eda14cbcSMatt Macy 4230eda14cbcSMatt Macy ASSERT3P(msp->ms_freed, ==, NULL); 4231eda14cbcSMatt Macy msp->ms_freed = range_tree_create(NULL, type, NULL, start, 4232eda14cbcSMatt Macy shift); 4233eda14cbcSMatt Macy 4234eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 4235eda14cbcSMatt Macy ASSERT3P(msp->ms_defer[t], ==, NULL); 4236eda14cbcSMatt Macy msp->ms_defer[t] = range_tree_create(NULL, type, NULL, 4237eda14cbcSMatt Macy start, shift); 4238eda14cbcSMatt Macy } 4239eda14cbcSMatt Macy 4240eda14cbcSMatt Macy ASSERT3P(msp->ms_checkpointing, ==, NULL); 4241eda14cbcSMatt Macy msp->ms_checkpointing = range_tree_create(NULL, type, NULL, 4242eda14cbcSMatt Macy start, shift); 4243eda14cbcSMatt Macy 4244eda14cbcSMatt Macy ASSERT3P(msp->ms_unflushed_allocs, ==, NULL); 4245eda14cbcSMatt Macy msp->ms_unflushed_allocs = range_tree_create(NULL, type, NULL, 4246eda14cbcSMatt Macy start, shift); 4247eda14cbcSMatt Macy 4248eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 4249eda14cbcSMatt Macy mrap->mra_bt = &msp->ms_unflushed_frees_by_size; 4250eda14cbcSMatt Macy mrap->mra_floor_shift = metaslab_by_size_min_shift; 4251eda14cbcSMatt Macy ASSERT3P(msp->ms_unflushed_frees, ==, NULL); 4252eda14cbcSMatt Macy msp->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops, 4253eda14cbcSMatt Macy type, mrap, start, shift); 4254eda14cbcSMatt Macy 4255eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size); 4256eda14cbcSMatt Macy } 4257eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freeing)); 4258eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_checkpointing)); 4259eda14cbcSMatt Macy 4260eda14cbcSMatt Macy defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE]; 4261eda14cbcSMatt Macy 4262eda14cbcSMatt Macy uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - 4263eda14cbcSMatt Macy metaslab_class_get_alloc(spa_normal_class(spa)); 4264eda14cbcSMatt Macy if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) { 4265eda14cbcSMatt Macy defer_allowed = B_FALSE; 4266eda14cbcSMatt Macy } 4267eda14cbcSMatt Macy 4268eda14cbcSMatt Macy defer_delta = 0; 4269eda14cbcSMatt Macy alloc_delta = msp->ms_allocated_this_txg - 4270eda14cbcSMatt Macy range_tree_space(msp->ms_freed); 4271eda14cbcSMatt Macy 4272eda14cbcSMatt Macy if (defer_allowed) { 4273eda14cbcSMatt Macy defer_delta = range_tree_space(msp->ms_freed) - 4274eda14cbcSMatt Macy range_tree_space(*defer_tree); 4275eda14cbcSMatt Macy } else { 4276eda14cbcSMatt Macy defer_delta -= range_tree_space(*defer_tree); 4277eda14cbcSMatt Macy } 4278eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta, 4279eda14cbcSMatt Macy defer_delta, 0); 4280eda14cbcSMatt Macy 4281eda14cbcSMatt Macy if (spa_syncing_log_sm(spa) == NULL) { 4282eda14cbcSMatt Macy /* 4283eda14cbcSMatt Macy * If there's a metaslab_load() in progress and we don't have 4284eda14cbcSMatt Macy * a log space map, it means that we probably wrote to the 4285eda14cbcSMatt Macy * metaslab's space map. If this is the case, we need to 4286eda14cbcSMatt Macy * make sure that we wait for the load to complete so that we 4287eda14cbcSMatt Macy * have a consistent view at the in-core side of the metaslab. 4288eda14cbcSMatt Macy */ 4289eda14cbcSMatt Macy metaslab_load_wait(msp); 4290eda14cbcSMatt Macy } else { 4291eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 4292eda14cbcSMatt Macy } 4293eda14cbcSMatt Macy 4294eda14cbcSMatt Macy /* 4295eda14cbcSMatt Macy * When auto-trimming is enabled, free ranges which are added to 4296eda14cbcSMatt Macy * ms_allocatable are also be added to ms_trim. The ms_trim tree is 4297eda14cbcSMatt Macy * periodically consumed by the vdev_autotrim_thread() which issues 4298eda14cbcSMatt Macy * trims for all ranges and then vacates the tree. The ms_trim tree 4299eda14cbcSMatt Macy * can be discarded at any time with the sole consequence of recent 4300eda14cbcSMatt Macy * frees not being trimmed. 4301eda14cbcSMatt Macy */ 4302eda14cbcSMatt Macy if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) { 4303eda14cbcSMatt Macy range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim); 4304eda14cbcSMatt Macy if (!defer_allowed) { 4305eda14cbcSMatt Macy range_tree_walk(msp->ms_freed, range_tree_add, 4306eda14cbcSMatt Macy msp->ms_trim); 4307eda14cbcSMatt Macy } 4308eda14cbcSMatt Macy } else { 4309eda14cbcSMatt Macy range_tree_vacate(msp->ms_trim, NULL, NULL); 4310eda14cbcSMatt Macy } 4311eda14cbcSMatt Macy 4312eda14cbcSMatt Macy /* 4313eda14cbcSMatt Macy * Move the frees from the defer_tree back to the free 4314eda14cbcSMatt Macy * range tree (if it's loaded). Swap the freed_tree and 4315eda14cbcSMatt Macy * the defer_tree -- this is safe to do because we've 4316eda14cbcSMatt Macy * just emptied out the defer_tree. 4317eda14cbcSMatt Macy */ 4318eda14cbcSMatt Macy range_tree_vacate(*defer_tree, 4319eda14cbcSMatt Macy msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable); 4320eda14cbcSMatt Macy if (defer_allowed) { 4321eda14cbcSMatt Macy range_tree_swap(&msp->ms_freed, defer_tree); 4322eda14cbcSMatt Macy } else { 4323eda14cbcSMatt Macy range_tree_vacate(msp->ms_freed, 4324eda14cbcSMatt Macy msp->ms_loaded ? range_tree_add : NULL, 4325eda14cbcSMatt Macy msp->ms_allocatable); 4326eda14cbcSMatt Macy } 4327eda14cbcSMatt Macy 4328eda14cbcSMatt Macy msp->ms_synced_length = space_map_length(msp->ms_sm); 4329eda14cbcSMatt Macy 4330eda14cbcSMatt Macy msp->ms_deferspace += defer_delta; 4331eda14cbcSMatt Macy ASSERT3S(msp->ms_deferspace, >=, 0); 4332eda14cbcSMatt Macy ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); 4333eda14cbcSMatt Macy if (msp->ms_deferspace != 0) { 4334eda14cbcSMatt Macy /* 4335eda14cbcSMatt Macy * Keep syncing this metaslab until all deferred frees 4336eda14cbcSMatt Macy * are back in circulation. 4337eda14cbcSMatt Macy */ 4338eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 4339eda14cbcSMatt Macy } 4340eda14cbcSMatt Macy metaslab_aux_histograms_update_done(msp, defer_allowed); 4341eda14cbcSMatt Macy 4342eda14cbcSMatt Macy if (msp->ms_new) { 4343eda14cbcSMatt Macy msp->ms_new = B_FALSE; 4344eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 4345eda14cbcSMatt Macy mg->mg_ms_ready++; 4346eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 4347eda14cbcSMatt Macy } 4348eda14cbcSMatt Macy 4349eda14cbcSMatt Macy /* 4350eda14cbcSMatt Macy * Re-sort metaslab within its group now that we've adjusted 4351eda14cbcSMatt Macy * its allocatable space. 4352eda14cbcSMatt Macy */ 4353eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 4354eda14cbcSMatt Macy 4355eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4356eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freeing)); 4357eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freed)); 4358eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_checkpointing)); 4359eda14cbcSMatt Macy msp->ms_allocating_total -= msp->ms_allocated_this_txg; 4360eda14cbcSMatt Macy msp->ms_allocated_this_txg = 0; 4361eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4362eda14cbcSMatt Macy } 4363eda14cbcSMatt Macy 4364eda14cbcSMatt Macy void 4365eda14cbcSMatt Macy metaslab_sync_reassess(metaslab_group_t *mg) 4366eda14cbcSMatt Macy { 4367eda14cbcSMatt Macy spa_t *spa = mg->mg_class->mc_spa; 4368eda14cbcSMatt Macy 4369eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 4370eda14cbcSMatt Macy metaslab_group_alloc_update(mg); 4371eda14cbcSMatt Macy mg->mg_fragmentation = metaslab_group_fragmentation(mg); 4372eda14cbcSMatt Macy 4373eda14cbcSMatt Macy /* 4374eda14cbcSMatt Macy * Preload the next potential metaslabs but only on active 4375eda14cbcSMatt Macy * metaslab groups. We can get into a state where the metaslab 4376eda14cbcSMatt Macy * is no longer active since we dirty metaslabs as we remove a 4377eda14cbcSMatt Macy * a device, thus potentially making the metaslab group eligible 4378eda14cbcSMatt Macy * for preloading. 4379eda14cbcSMatt Macy */ 4380eda14cbcSMatt Macy if (mg->mg_activation_count > 0) { 4381eda14cbcSMatt Macy metaslab_group_preload(mg); 4382eda14cbcSMatt Macy } 4383eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 4384eda14cbcSMatt Macy } 4385eda14cbcSMatt Macy 4386eda14cbcSMatt Macy /* 4387eda14cbcSMatt Macy * When writing a ditto block (i.e. more than one DVA for a given BP) on 4388eda14cbcSMatt Macy * the same vdev as an existing DVA of this BP, then try to allocate it 4389eda14cbcSMatt Macy * on a different metaslab than existing DVAs (i.e. a unique metaslab). 4390eda14cbcSMatt Macy */ 4391eda14cbcSMatt Macy static boolean_t 4392eda14cbcSMatt Macy metaslab_is_unique(metaslab_t *msp, dva_t *dva) 4393eda14cbcSMatt Macy { 4394eda14cbcSMatt Macy uint64_t dva_ms_id; 4395eda14cbcSMatt Macy 4396eda14cbcSMatt Macy if (DVA_GET_ASIZE(dva) == 0) 4397eda14cbcSMatt Macy return (B_TRUE); 4398eda14cbcSMatt Macy 4399eda14cbcSMatt Macy if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 4400eda14cbcSMatt Macy return (B_TRUE); 4401eda14cbcSMatt Macy 4402eda14cbcSMatt Macy dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift; 4403eda14cbcSMatt Macy 4404eda14cbcSMatt Macy return (msp->ms_id != dva_ms_id); 4405eda14cbcSMatt Macy } 4406eda14cbcSMatt Macy 4407eda14cbcSMatt Macy /* 4408eda14cbcSMatt Macy * ========================================================================== 4409eda14cbcSMatt Macy * Metaslab allocation tracing facility 4410eda14cbcSMatt Macy * ========================================================================== 4411eda14cbcSMatt Macy */ 4412eda14cbcSMatt Macy 4413eda14cbcSMatt Macy /* 4414eda14cbcSMatt Macy * Add an allocation trace element to the allocation tracing list. 4415eda14cbcSMatt Macy */ 4416eda14cbcSMatt Macy static void 4417eda14cbcSMatt Macy metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, 4418eda14cbcSMatt Macy metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset, 4419eda14cbcSMatt Macy int allocator) 4420eda14cbcSMatt Macy { 4421eda14cbcSMatt Macy metaslab_alloc_trace_t *mat; 4422eda14cbcSMatt Macy 4423eda14cbcSMatt Macy if (!metaslab_trace_enabled) 4424eda14cbcSMatt Macy return; 4425eda14cbcSMatt Macy 4426eda14cbcSMatt Macy /* 4427eda14cbcSMatt Macy * When the tracing list reaches its maximum we remove 4428eda14cbcSMatt Macy * the second element in the list before adding a new one. 4429eda14cbcSMatt Macy * By removing the second element we preserve the original 4430eda14cbcSMatt Macy * entry as a clue to what allocations steps have already been 4431eda14cbcSMatt Macy * performed. 4432eda14cbcSMatt Macy */ 4433eda14cbcSMatt Macy if (zal->zal_size == metaslab_trace_max_entries) { 4434eda14cbcSMatt Macy metaslab_alloc_trace_t *mat_next; 4435eda14cbcSMatt Macy #ifdef ZFS_DEBUG 4436eda14cbcSMatt Macy panic("too many entries in allocation list"); 4437eda14cbcSMatt Macy #endif 4438eda14cbcSMatt Macy METASLABSTAT_BUMP(metaslabstat_trace_over_limit); 4439eda14cbcSMatt Macy zal->zal_size--; 4440eda14cbcSMatt Macy mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); 4441eda14cbcSMatt Macy list_remove(&zal->zal_list, mat_next); 4442eda14cbcSMatt Macy kmem_cache_free(metaslab_alloc_trace_cache, mat_next); 4443eda14cbcSMatt Macy } 4444eda14cbcSMatt Macy 4445eda14cbcSMatt Macy mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); 4446eda14cbcSMatt Macy list_link_init(&mat->mat_list_node); 4447eda14cbcSMatt Macy mat->mat_mg = mg; 4448eda14cbcSMatt Macy mat->mat_msp = msp; 4449eda14cbcSMatt Macy mat->mat_size = psize; 4450eda14cbcSMatt Macy mat->mat_dva_id = dva_id; 4451eda14cbcSMatt Macy mat->mat_offset = offset; 4452eda14cbcSMatt Macy mat->mat_weight = 0; 4453eda14cbcSMatt Macy mat->mat_allocator = allocator; 4454eda14cbcSMatt Macy 4455eda14cbcSMatt Macy if (msp != NULL) 4456eda14cbcSMatt Macy mat->mat_weight = msp->ms_weight; 4457eda14cbcSMatt Macy 4458eda14cbcSMatt Macy /* 4459eda14cbcSMatt Macy * The list is part of the zio so locking is not required. Only 4460eda14cbcSMatt Macy * a single thread will perform allocations for a given zio. 4461eda14cbcSMatt Macy */ 4462eda14cbcSMatt Macy list_insert_tail(&zal->zal_list, mat); 4463eda14cbcSMatt Macy zal->zal_size++; 4464eda14cbcSMatt Macy 4465eda14cbcSMatt Macy ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); 4466eda14cbcSMatt Macy } 4467eda14cbcSMatt Macy 4468eda14cbcSMatt Macy void 4469eda14cbcSMatt Macy metaslab_trace_init(zio_alloc_list_t *zal) 4470eda14cbcSMatt Macy { 4471eda14cbcSMatt Macy list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), 4472eda14cbcSMatt Macy offsetof(metaslab_alloc_trace_t, mat_list_node)); 4473eda14cbcSMatt Macy zal->zal_size = 0; 4474eda14cbcSMatt Macy } 4475eda14cbcSMatt Macy 4476eda14cbcSMatt Macy void 4477eda14cbcSMatt Macy metaslab_trace_fini(zio_alloc_list_t *zal) 4478eda14cbcSMatt Macy { 4479eda14cbcSMatt Macy metaslab_alloc_trace_t *mat; 4480eda14cbcSMatt Macy 4481eda14cbcSMatt Macy while ((mat = list_remove_head(&zal->zal_list)) != NULL) 4482eda14cbcSMatt Macy kmem_cache_free(metaslab_alloc_trace_cache, mat); 4483eda14cbcSMatt Macy list_destroy(&zal->zal_list); 4484eda14cbcSMatt Macy zal->zal_size = 0; 4485eda14cbcSMatt Macy } 4486eda14cbcSMatt Macy 4487eda14cbcSMatt Macy /* 4488eda14cbcSMatt Macy * ========================================================================== 4489eda14cbcSMatt Macy * Metaslab block operations 4490eda14cbcSMatt Macy * ========================================================================== 4491eda14cbcSMatt Macy */ 4492eda14cbcSMatt Macy 4493eda14cbcSMatt Macy static void 4494eda14cbcSMatt Macy metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags, 4495eda14cbcSMatt Macy int allocator) 4496eda14cbcSMatt Macy { 4497eda14cbcSMatt Macy if (!(flags & METASLAB_ASYNC_ALLOC) || 4498eda14cbcSMatt Macy (flags & METASLAB_DONT_THROTTLE)) 4499eda14cbcSMatt Macy return; 4500eda14cbcSMatt Macy 4501eda14cbcSMatt Macy metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4502eda14cbcSMatt Macy if (!mg->mg_class->mc_alloc_throttle_enabled) 4503eda14cbcSMatt Macy return; 4504eda14cbcSMatt Macy 4505eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4506eda14cbcSMatt Macy (void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag); 4507eda14cbcSMatt Macy } 4508eda14cbcSMatt Macy 4509eda14cbcSMatt Macy static void 4510eda14cbcSMatt Macy metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator) 4511eda14cbcSMatt Macy { 4512eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 45137877fdebSMatt Macy metaslab_class_allocator_t *mca = 45147877fdebSMatt Macy &mg->mg_class->mc_allocator[allocator]; 4515eda14cbcSMatt Macy uint64_t max = mg->mg_max_alloc_queue_depth; 4516eda14cbcSMatt Macy uint64_t cur = mga->mga_cur_max_alloc_queue_depth; 4517eda14cbcSMatt Macy while (cur < max) { 4518eda14cbcSMatt Macy if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth, 4519eda14cbcSMatt Macy cur, cur + 1) == cur) { 45207877fdebSMatt Macy atomic_inc_64(&mca->mca_alloc_max_slots); 4521eda14cbcSMatt Macy return; 4522eda14cbcSMatt Macy } 4523eda14cbcSMatt Macy cur = mga->mga_cur_max_alloc_queue_depth; 4524eda14cbcSMatt Macy } 4525eda14cbcSMatt Macy } 4526eda14cbcSMatt Macy 4527eda14cbcSMatt Macy void 4528eda14cbcSMatt Macy metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags, 4529eda14cbcSMatt Macy int allocator, boolean_t io_complete) 4530eda14cbcSMatt Macy { 4531eda14cbcSMatt Macy if (!(flags & METASLAB_ASYNC_ALLOC) || 4532eda14cbcSMatt Macy (flags & METASLAB_DONT_THROTTLE)) 4533eda14cbcSMatt Macy return; 4534eda14cbcSMatt Macy 4535eda14cbcSMatt Macy metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4536eda14cbcSMatt Macy if (!mg->mg_class->mc_alloc_throttle_enabled) 4537eda14cbcSMatt Macy return; 4538eda14cbcSMatt Macy 4539eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4540eda14cbcSMatt Macy (void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag); 4541eda14cbcSMatt Macy if (io_complete) 4542eda14cbcSMatt Macy metaslab_group_increment_qdepth(mg, allocator); 4543eda14cbcSMatt Macy } 4544eda14cbcSMatt Macy 4545eda14cbcSMatt Macy void 4546eda14cbcSMatt Macy metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag, 4547eda14cbcSMatt Macy int allocator) 4548eda14cbcSMatt Macy { 4549eda14cbcSMatt Macy #ifdef ZFS_DEBUG 4550eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 4551eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 4552eda14cbcSMatt Macy 4553eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 4554eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(&dva[d]); 4555eda14cbcSMatt Macy metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4556eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4557eda14cbcSMatt Macy VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag)); 4558eda14cbcSMatt Macy } 4559eda14cbcSMatt Macy #endif 4560eda14cbcSMatt Macy } 4561eda14cbcSMatt Macy 4562eda14cbcSMatt Macy static uint64_t 4563eda14cbcSMatt Macy metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) 4564eda14cbcSMatt Macy { 4565eda14cbcSMatt Macy uint64_t start; 4566eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 4567eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 4568eda14cbcSMatt Macy 4569eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 4570eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 4571eda14cbcSMatt Macy VERIFY0(msp->ms_disabled); 4572eda14cbcSMatt Macy 4573eda14cbcSMatt Macy start = mc->mc_ops->msop_alloc(msp, size); 4574eda14cbcSMatt Macy if (start != -1ULL) { 4575eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 4576eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 4577eda14cbcSMatt Macy 4578eda14cbcSMatt Macy VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); 4579eda14cbcSMatt Macy VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 4580eda14cbcSMatt Macy VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); 4581eda14cbcSMatt Macy range_tree_remove(rt, start, size); 4582eda14cbcSMatt Macy range_tree_clear(msp->ms_trim, start, size); 4583eda14cbcSMatt Macy 4584eda14cbcSMatt Macy if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 4585eda14cbcSMatt Macy vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 4586eda14cbcSMatt Macy 4587eda14cbcSMatt Macy range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size); 4588eda14cbcSMatt Macy msp->ms_allocating_total += size; 4589eda14cbcSMatt Macy 4590eda14cbcSMatt Macy /* Track the last successful allocation */ 4591eda14cbcSMatt Macy msp->ms_alloc_txg = txg; 4592eda14cbcSMatt Macy metaslab_verify_space(msp, txg); 4593eda14cbcSMatt Macy } 4594eda14cbcSMatt Macy 4595eda14cbcSMatt Macy /* 4596eda14cbcSMatt Macy * Now that we've attempted the allocation we need to update the 4597eda14cbcSMatt Macy * metaslab's maximum block size since it may have changed. 4598eda14cbcSMatt Macy */ 4599eda14cbcSMatt Macy msp->ms_max_size = metaslab_largest_allocatable(msp); 4600eda14cbcSMatt Macy return (start); 4601eda14cbcSMatt Macy } 4602eda14cbcSMatt Macy 4603eda14cbcSMatt Macy /* 4604eda14cbcSMatt Macy * Find the metaslab with the highest weight that is less than what we've 4605eda14cbcSMatt Macy * already tried. In the common case, this means that we will examine each 4606eda14cbcSMatt Macy * metaslab at most once. Note that concurrent callers could reorder metaslabs 4607eda14cbcSMatt Macy * by activation/passivation once we have dropped the mg_lock. If a metaslab is 4608eda14cbcSMatt Macy * activated by another thread, and we fail to allocate from the metaslab we 4609eda14cbcSMatt Macy * have selected, we may not try the newly-activated metaslab, and instead 4610eda14cbcSMatt Macy * activate another metaslab. This is not optimal, but generally does not cause 4611eda14cbcSMatt Macy * any problems (a possible exception being if every metaslab is completely full 4612eda14cbcSMatt Macy * except for the newly-activated metaslab which we fail to examine). 4613eda14cbcSMatt Macy */ 4614eda14cbcSMatt Macy static metaslab_t * 4615eda14cbcSMatt Macy find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, 4616eda14cbcSMatt Macy dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator, 4617eda14cbcSMatt Macy boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search, 4618eda14cbcSMatt Macy boolean_t *was_active) 4619eda14cbcSMatt Macy { 4620eda14cbcSMatt Macy avl_index_t idx; 4621eda14cbcSMatt Macy avl_tree_t *t = &mg->mg_metaslab_tree; 4622eda14cbcSMatt Macy metaslab_t *msp = avl_find(t, search, &idx); 4623eda14cbcSMatt Macy if (msp == NULL) 4624eda14cbcSMatt Macy msp = avl_nearest(t, idx, AVL_AFTER); 4625eda14cbcSMatt Macy 46267877fdebSMatt Macy int tries = 0; 4627eda14cbcSMatt Macy for (; msp != NULL; msp = AVL_NEXT(t, msp)) { 4628eda14cbcSMatt Macy int i; 46297877fdebSMatt Macy 46307877fdebSMatt Macy if (!try_hard && tries > zfs_metaslab_find_max_tries) { 46317877fdebSMatt Macy METASLABSTAT_BUMP(metaslabstat_too_many_tries); 46327877fdebSMatt Macy return (NULL); 46337877fdebSMatt Macy } 46347877fdebSMatt Macy tries++; 46357877fdebSMatt Macy 4636eda14cbcSMatt Macy if (!metaslab_should_allocate(msp, asize, try_hard)) { 4637eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4638eda14cbcSMatt Macy TRACE_TOO_SMALL, allocator); 4639eda14cbcSMatt Macy continue; 4640eda14cbcSMatt Macy } 4641eda14cbcSMatt Macy 4642eda14cbcSMatt Macy /* 4643eda14cbcSMatt Macy * If the selected metaslab is condensing or disabled, 4644eda14cbcSMatt Macy * skip it. 4645eda14cbcSMatt Macy */ 4646eda14cbcSMatt Macy if (msp->ms_condensing || msp->ms_disabled > 0) 4647eda14cbcSMatt Macy continue; 4648eda14cbcSMatt Macy 4649eda14cbcSMatt Macy *was_active = msp->ms_allocator != -1; 4650eda14cbcSMatt Macy /* 4651eda14cbcSMatt Macy * If we're activating as primary, this is our first allocation 4652eda14cbcSMatt Macy * from this disk, so we don't need to check how close we are. 4653eda14cbcSMatt Macy * If the metaslab under consideration was already active, 4654eda14cbcSMatt Macy * we're getting desperate enough to steal another allocator's 4655eda14cbcSMatt Macy * metaslab, so we still don't care about distances. 4656eda14cbcSMatt Macy */ 4657eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active) 4658eda14cbcSMatt Macy break; 4659eda14cbcSMatt Macy 4660eda14cbcSMatt Macy for (i = 0; i < d; i++) { 4661eda14cbcSMatt Macy if (want_unique && 4662eda14cbcSMatt Macy !metaslab_is_unique(msp, &dva[i])) 4663eda14cbcSMatt Macy break; /* try another metaslab */ 4664eda14cbcSMatt Macy } 4665eda14cbcSMatt Macy if (i == d) 4666eda14cbcSMatt Macy break; 4667eda14cbcSMatt Macy } 4668eda14cbcSMatt Macy 4669eda14cbcSMatt Macy if (msp != NULL) { 4670eda14cbcSMatt Macy search->ms_weight = msp->ms_weight; 4671eda14cbcSMatt Macy search->ms_start = msp->ms_start + 1; 4672eda14cbcSMatt Macy search->ms_allocator = msp->ms_allocator; 4673eda14cbcSMatt Macy search->ms_primary = msp->ms_primary; 4674eda14cbcSMatt Macy } 4675eda14cbcSMatt Macy return (msp); 4676eda14cbcSMatt Macy } 4677eda14cbcSMatt Macy 4678eda14cbcSMatt Macy static void 4679eda14cbcSMatt Macy metaslab_active_mask_verify(metaslab_t *msp) 4680eda14cbcSMatt Macy { 4681eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 4682eda14cbcSMatt Macy 4683eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 4684eda14cbcSMatt Macy return; 4685eda14cbcSMatt Macy 4686eda14cbcSMatt Macy if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) 4687eda14cbcSMatt Macy return; 4688eda14cbcSMatt Macy 4689eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) { 4690eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4691eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4692eda14cbcSMatt Macy VERIFY3S(msp->ms_allocator, !=, -1); 4693eda14cbcSMatt Macy VERIFY(msp->ms_primary); 4694eda14cbcSMatt Macy return; 4695eda14cbcSMatt Macy } 4696eda14cbcSMatt Macy 4697eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) { 4698eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4699eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4700eda14cbcSMatt Macy VERIFY3S(msp->ms_allocator, !=, -1); 4701eda14cbcSMatt Macy VERIFY(!msp->ms_primary); 4702eda14cbcSMatt Macy return; 4703eda14cbcSMatt Macy } 4704eda14cbcSMatt Macy 4705eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 4706eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4707eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4708eda14cbcSMatt Macy VERIFY3S(msp->ms_allocator, ==, -1); 4709eda14cbcSMatt Macy return; 4710eda14cbcSMatt Macy } 4711eda14cbcSMatt Macy } 4712eda14cbcSMatt Macy 4713eda14cbcSMatt Macy /* ARGSUSED */ 4714eda14cbcSMatt Macy static uint64_t 4715eda14cbcSMatt Macy metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, 4716eda14cbcSMatt Macy uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 4717eda14cbcSMatt Macy int allocator, boolean_t try_hard) 4718eda14cbcSMatt Macy { 4719eda14cbcSMatt Macy metaslab_t *msp = NULL; 4720eda14cbcSMatt Macy uint64_t offset = -1ULL; 4721eda14cbcSMatt Macy 4722eda14cbcSMatt Macy uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY; 4723eda14cbcSMatt Macy for (int i = 0; i < d; i++) { 4724eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4725eda14cbcSMatt Macy DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4726eda14cbcSMatt Macy activation_weight = METASLAB_WEIGHT_SECONDARY; 4727eda14cbcSMatt Macy } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4728eda14cbcSMatt Macy DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4729eda14cbcSMatt Macy activation_weight = METASLAB_WEIGHT_CLAIM; 4730eda14cbcSMatt Macy break; 4731eda14cbcSMatt Macy } 4732eda14cbcSMatt Macy } 4733eda14cbcSMatt Macy 4734eda14cbcSMatt Macy /* 4735eda14cbcSMatt Macy * If we don't have enough metaslabs active to fill the entire array, we 4736eda14cbcSMatt Macy * just use the 0th slot. 4737eda14cbcSMatt Macy */ 4738eda14cbcSMatt Macy if (mg->mg_ms_ready < mg->mg_allocators * 3) 4739eda14cbcSMatt Macy allocator = 0; 4740eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4741eda14cbcSMatt Macy 4742eda14cbcSMatt Macy ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2); 4743eda14cbcSMatt Macy 4744eda14cbcSMatt Macy metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); 4745eda14cbcSMatt Macy search->ms_weight = UINT64_MAX; 4746eda14cbcSMatt Macy search->ms_start = 0; 4747eda14cbcSMatt Macy /* 4748eda14cbcSMatt Macy * At the end of the metaslab tree are the already-active metaslabs, 4749eda14cbcSMatt Macy * first the primaries, then the secondaries. When we resume searching 4750eda14cbcSMatt Macy * through the tree, we need to consider ms_allocator and ms_primary so 4751eda14cbcSMatt Macy * we start in the location right after where we left off, and don't 4752eda14cbcSMatt Macy * accidentally loop forever considering the same metaslabs. 4753eda14cbcSMatt Macy */ 4754eda14cbcSMatt Macy search->ms_allocator = -1; 4755eda14cbcSMatt Macy search->ms_primary = B_TRUE; 4756eda14cbcSMatt Macy for (;;) { 4757eda14cbcSMatt Macy boolean_t was_active = B_FALSE; 4758eda14cbcSMatt Macy 4759eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 4760eda14cbcSMatt Macy 4761eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4762eda14cbcSMatt Macy mga->mga_primary != NULL) { 4763eda14cbcSMatt Macy msp = mga->mga_primary; 4764eda14cbcSMatt Macy 4765eda14cbcSMatt Macy /* 4766eda14cbcSMatt Macy * Even though we don't hold the ms_lock for the 4767eda14cbcSMatt Macy * primary metaslab, those fields should not 4768eda14cbcSMatt Macy * change while we hold the mg_lock. Thus it is 4769eda14cbcSMatt Macy * safe to make assertions on them. 4770eda14cbcSMatt Macy */ 4771eda14cbcSMatt Macy ASSERT(msp->ms_primary); 4772eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, allocator); 4773eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4774eda14cbcSMatt Macy 4775eda14cbcSMatt Macy was_active = B_TRUE; 4776eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4777eda14cbcSMatt Macy } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4778eda14cbcSMatt Macy mga->mga_secondary != NULL) { 4779eda14cbcSMatt Macy msp = mga->mga_secondary; 4780eda14cbcSMatt Macy 4781eda14cbcSMatt Macy /* 4782eda14cbcSMatt Macy * See comment above about the similar assertions 4783eda14cbcSMatt Macy * for the primary metaslab. 4784eda14cbcSMatt Macy */ 4785eda14cbcSMatt Macy ASSERT(!msp->ms_primary); 4786eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, allocator); 4787eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4788eda14cbcSMatt Macy 4789eda14cbcSMatt Macy was_active = B_TRUE; 4790eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4791eda14cbcSMatt Macy } else { 4792eda14cbcSMatt Macy msp = find_valid_metaslab(mg, activation_weight, dva, d, 4793eda14cbcSMatt Macy want_unique, asize, allocator, try_hard, zal, 4794eda14cbcSMatt Macy search, &was_active); 4795eda14cbcSMatt Macy } 4796eda14cbcSMatt Macy 4797eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 4798eda14cbcSMatt Macy if (msp == NULL) { 4799eda14cbcSMatt Macy kmem_free(search, sizeof (*search)); 4800eda14cbcSMatt Macy return (-1ULL); 4801eda14cbcSMatt Macy } 4802eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4803eda14cbcSMatt Macy 4804eda14cbcSMatt Macy metaslab_active_mask_verify(msp); 4805eda14cbcSMatt Macy 4806eda14cbcSMatt Macy /* 4807eda14cbcSMatt Macy * This code is disabled out because of issues with 4808eda14cbcSMatt Macy * tracepoints in non-gpl kernel modules. 4809eda14cbcSMatt Macy */ 4810eda14cbcSMatt Macy #if 0 4811eda14cbcSMatt Macy DTRACE_PROBE3(ms__activation__attempt, 4812eda14cbcSMatt Macy metaslab_t *, msp, uint64_t, activation_weight, 4813eda14cbcSMatt Macy boolean_t, was_active); 4814eda14cbcSMatt Macy #endif 4815eda14cbcSMatt Macy 4816eda14cbcSMatt Macy /* 4817eda14cbcSMatt Macy * Ensure that the metaslab we have selected is still 4818eda14cbcSMatt Macy * capable of handling our request. It's possible that 4819eda14cbcSMatt Macy * another thread may have changed the weight while we 4820eda14cbcSMatt Macy * were blocked on the metaslab lock. We check the 4821eda14cbcSMatt Macy * active status first to see if we need to set_selected_txg 4822eda14cbcSMatt Macy * a new metaslab. 4823eda14cbcSMatt Macy */ 4824eda14cbcSMatt Macy if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { 4825eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, -1); 4826eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4827eda14cbcSMatt Macy continue; 4828eda14cbcSMatt Macy } 4829eda14cbcSMatt Macy 4830eda14cbcSMatt Macy /* 4831eda14cbcSMatt Macy * If the metaslab was activated for another allocator 4832eda14cbcSMatt Macy * while we were waiting in the ms_lock above, or it's 4833eda14cbcSMatt Macy * a primary and we're seeking a secondary (or vice versa), 4834eda14cbcSMatt Macy * we go back and select a new metaslab. 4835eda14cbcSMatt Macy */ 4836eda14cbcSMatt Macy if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) && 4837eda14cbcSMatt Macy (msp->ms_allocator != -1) && 4838eda14cbcSMatt Macy (msp->ms_allocator != allocator || ((activation_weight == 4839eda14cbcSMatt Macy METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) { 4840eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4841eda14cbcSMatt Macy ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) || 4842eda14cbcSMatt Macy msp->ms_allocator != -1); 4843eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4844eda14cbcSMatt Macy continue; 4845eda14cbcSMatt Macy } 4846eda14cbcSMatt Macy 4847eda14cbcSMatt Macy /* 4848eda14cbcSMatt Macy * This metaslab was used for claiming regions allocated 4849eda14cbcSMatt Macy * by the ZIL during pool import. Once these regions are 4850eda14cbcSMatt Macy * claimed we don't need to keep the CLAIM bit set 4851eda14cbcSMatt Macy * anymore. Passivate this metaslab to zero its activation 4852eda14cbcSMatt Macy * mask. 4853eda14cbcSMatt Macy */ 4854eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_CLAIM && 4855eda14cbcSMatt Macy activation_weight != METASLAB_WEIGHT_CLAIM) { 4856eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4857eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, -1); 4858eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & 4859eda14cbcSMatt Macy ~METASLAB_WEIGHT_CLAIM); 4860eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4861eda14cbcSMatt Macy continue; 4862eda14cbcSMatt Macy } 4863eda14cbcSMatt Macy 4864eda14cbcSMatt Macy metaslab_set_selected_txg(msp, txg); 4865eda14cbcSMatt Macy 4866eda14cbcSMatt Macy int activation_error = 4867eda14cbcSMatt Macy metaslab_activate(msp, allocator, activation_weight); 4868eda14cbcSMatt Macy metaslab_active_mask_verify(msp); 4869eda14cbcSMatt Macy 4870eda14cbcSMatt Macy /* 4871eda14cbcSMatt Macy * If the metaslab was activated by another thread for 4872eda14cbcSMatt Macy * another allocator or activation_weight (EBUSY), or it 4873eda14cbcSMatt Macy * failed because another metaslab was assigned as primary 4874eda14cbcSMatt Macy * for this allocator (EEXIST) we continue using this 4875eda14cbcSMatt Macy * metaslab for our allocation, rather than going on to a 4876eda14cbcSMatt Macy * worse metaslab (we waited for that metaslab to be loaded 4877eda14cbcSMatt Macy * after all). 4878eda14cbcSMatt Macy * 4879eda14cbcSMatt Macy * If the activation failed due to an I/O error or ENOSPC we 4880eda14cbcSMatt Macy * skip to the next metaslab. 4881eda14cbcSMatt Macy */ 4882eda14cbcSMatt Macy boolean_t activated; 4883eda14cbcSMatt Macy if (activation_error == 0) { 4884eda14cbcSMatt Macy activated = B_TRUE; 4885eda14cbcSMatt Macy } else if (activation_error == EBUSY || 4886eda14cbcSMatt Macy activation_error == EEXIST) { 4887eda14cbcSMatt Macy activated = B_FALSE; 4888eda14cbcSMatt Macy } else { 4889eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4890eda14cbcSMatt Macy continue; 4891eda14cbcSMatt Macy } 4892eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4893eda14cbcSMatt Macy 4894eda14cbcSMatt Macy /* 4895eda14cbcSMatt Macy * Now that we have the lock, recheck to see if we should 4896eda14cbcSMatt Macy * continue to use this metaslab for this allocation. The 4897eda14cbcSMatt Macy * the metaslab is now loaded so metaslab_should_allocate() 4898eda14cbcSMatt Macy * can accurately determine if the allocation attempt should 4899eda14cbcSMatt Macy * proceed. 4900eda14cbcSMatt Macy */ 4901eda14cbcSMatt Macy if (!metaslab_should_allocate(msp, asize, try_hard)) { 4902eda14cbcSMatt Macy /* Passivate this metaslab and select a new one. */ 4903eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4904eda14cbcSMatt Macy TRACE_TOO_SMALL, allocator); 4905eda14cbcSMatt Macy goto next; 4906eda14cbcSMatt Macy } 4907eda14cbcSMatt Macy 4908eda14cbcSMatt Macy /* 4909eda14cbcSMatt Macy * If this metaslab is currently condensing then pick again 4910eda14cbcSMatt Macy * as we can't manipulate this metaslab until it's committed 4911eda14cbcSMatt Macy * to disk. If this metaslab is being initialized, we shouldn't 4912eda14cbcSMatt Macy * allocate from it since the allocated region might be 4913eda14cbcSMatt Macy * overwritten after allocation. 4914eda14cbcSMatt Macy */ 4915eda14cbcSMatt Macy if (msp->ms_condensing) { 4916eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4917eda14cbcSMatt Macy TRACE_CONDENSING, allocator); 4918eda14cbcSMatt Macy if (activated) { 4919eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & 4920eda14cbcSMatt Macy ~METASLAB_ACTIVE_MASK); 4921eda14cbcSMatt Macy } 4922eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4923eda14cbcSMatt Macy continue; 4924eda14cbcSMatt Macy } else if (msp->ms_disabled > 0) { 4925eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4926eda14cbcSMatt Macy TRACE_DISABLED, allocator); 4927eda14cbcSMatt Macy if (activated) { 4928eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & 4929eda14cbcSMatt Macy ~METASLAB_ACTIVE_MASK); 4930eda14cbcSMatt Macy } 4931eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4932eda14cbcSMatt Macy continue; 4933eda14cbcSMatt Macy } 4934eda14cbcSMatt Macy 4935eda14cbcSMatt Macy offset = metaslab_block_alloc(msp, asize, txg); 4936eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator); 4937eda14cbcSMatt Macy 4938eda14cbcSMatt Macy if (offset != -1ULL) { 4939eda14cbcSMatt Macy /* Proactively passivate the metaslab, if needed */ 4940eda14cbcSMatt Macy if (activated) 4941eda14cbcSMatt Macy metaslab_segment_may_passivate(msp); 4942eda14cbcSMatt Macy break; 4943eda14cbcSMatt Macy } 4944eda14cbcSMatt Macy next: 4945eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4946eda14cbcSMatt Macy 4947eda14cbcSMatt Macy /* 4948eda14cbcSMatt Macy * This code is disabled out because of issues with 4949eda14cbcSMatt Macy * tracepoints in non-gpl kernel modules. 4950eda14cbcSMatt Macy */ 4951eda14cbcSMatt Macy #if 0 4952eda14cbcSMatt Macy DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp, 4953eda14cbcSMatt Macy uint64_t, asize); 4954eda14cbcSMatt Macy #endif 4955eda14cbcSMatt Macy 4956eda14cbcSMatt Macy /* 4957eda14cbcSMatt Macy * We were unable to allocate from this metaslab so determine 4958eda14cbcSMatt Macy * a new weight for this metaslab. Now that we have loaded 4959eda14cbcSMatt Macy * the metaslab we can provide a better hint to the metaslab 4960eda14cbcSMatt Macy * selector. 4961eda14cbcSMatt Macy * 4962eda14cbcSMatt Macy * For space-based metaslabs, we use the maximum block size. 4963eda14cbcSMatt Macy * This information is only available when the metaslab 4964eda14cbcSMatt Macy * is loaded and is more accurate than the generic free 4965eda14cbcSMatt Macy * space weight that was calculated by metaslab_weight(). 4966eda14cbcSMatt Macy * This information allows us to quickly compare the maximum 4967eda14cbcSMatt Macy * available allocation in the metaslab to the allocation 4968eda14cbcSMatt Macy * size being requested. 4969eda14cbcSMatt Macy * 4970eda14cbcSMatt Macy * For segment-based metaslabs, determine the new weight 4971eda14cbcSMatt Macy * based on the highest bucket in the range tree. We 4972eda14cbcSMatt Macy * explicitly use the loaded segment weight (i.e. the range 4973eda14cbcSMatt Macy * tree histogram) since it contains the space that is 4974eda14cbcSMatt Macy * currently available for allocation and is accurate 4975eda14cbcSMatt Macy * even within a sync pass. 4976eda14cbcSMatt Macy */ 4977eda14cbcSMatt Macy uint64_t weight; 4978eda14cbcSMatt Macy if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 4979eda14cbcSMatt Macy weight = metaslab_largest_allocatable(msp); 4980eda14cbcSMatt Macy WEIGHT_SET_SPACEBASED(weight); 4981eda14cbcSMatt Macy } else { 4982eda14cbcSMatt Macy weight = metaslab_weight_from_range_tree(msp); 4983eda14cbcSMatt Macy } 4984eda14cbcSMatt Macy 4985eda14cbcSMatt Macy if (activated) { 4986eda14cbcSMatt Macy metaslab_passivate(msp, weight); 4987eda14cbcSMatt Macy } else { 4988eda14cbcSMatt Macy /* 4989eda14cbcSMatt Macy * For the case where we use the metaslab that is 4990eda14cbcSMatt Macy * active for another allocator we want to make 4991eda14cbcSMatt Macy * sure that we retain the activation mask. 4992eda14cbcSMatt Macy * 4993eda14cbcSMatt Macy * Note that we could attempt to use something like 4994eda14cbcSMatt Macy * metaslab_recalculate_weight_and_sort() that 4995eda14cbcSMatt Macy * retains the activation mask here. That function 4996eda14cbcSMatt Macy * uses metaslab_weight() to set the weight though 4997eda14cbcSMatt Macy * which is not as accurate as the calculations 4998eda14cbcSMatt Macy * above. 4999eda14cbcSMatt Macy */ 5000eda14cbcSMatt Macy weight |= msp->ms_weight & METASLAB_ACTIVE_MASK; 5001eda14cbcSMatt Macy metaslab_group_sort(mg, msp, weight); 5002eda14cbcSMatt Macy } 5003eda14cbcSMatt Macy metaslab_active_mask_verify(msp); 5004eda14cbcSMatt Macy 5005eda14cbcSMatt Macy /* 5006eda14cbcSMatt Macy * We have just failed an allocation attempt, check 5007eda14cbcSMatt Macy * that metaslab_should_allocate() agrees. Otherwise, 5008eda14cbcSMatt Macy * we may end up in an infinite loop retrying the same 5009eda14cbcSMatt Macy * metaslab. 5010eda14cbcSMatt Macy */ 5011eda14cbcSMatt Macy ASSERT(!metaslab_should_allocate(msp, asize, try_hard)); 5012eda14cbcSMatt Macy 5013eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5014eda14cbcSMatt Macy } 5015eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5016eda14cbcSMatt Macy kmem_free(search, sizeof (*search)); 5017eda14cbcSMatt Macy return (offset); 5018eda14cbcSMatt Macy } 5019eda14cbcSMatt Macy 5020eda14cbcSMatt Macy static uint64_t 5021eda14cbcSMatt Macy metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, 5022eda14cbcSMatt Macy uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 5023eda14cbcSMatt Macy int allocator, boolean_t try_hard) 5024eda14cbcSMatt Macy { 5025eda14cbcSMatt Macy uint64_t offset; 5026eda14cbcSMatt Macy ASSERT(mg->mg_initialized); 5027eda14cbcSMatt Macy 5028eda14cbcSMatt Macy offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique, 5029eda14cbcSMatt Macy dva, d, allocator, try_hard); 5030eda14cbcSMatt Macy 5031eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 5032eda14cbcSMatt Macy if (offset == -1ULL) { 5033eda14cbcSMatt Macy mg->mg_failed_allocations++; 5034eda14cbcSMatt Macy metaslab_trace_add(zal, mg, NULL, asize, d, 5035eda14cbcSMatt Macy TRACE_GROUP_FAILURE, allocator); 5036eda14cbcSMatt Macy if (asize == SPA_GANGBLOCKSIZE) { 5037eda14cbcSMatt Macy /* 5038eda14cbcSMatt Macy * This metaslab group was unable to allocate 5039eda14cbcSMatt Macy * the minimum gang block size so it must be out of 5040eda14cbcSMatt Macy * space. We must notify the allocation throttle 5041eda14cbcSMatt Macy * to start skipping allocation attempts to this 5042eda14cbcSMatt Macy * metaslab group until more space becomes available. 5043eda14cbcSMatt Macy * Note: this failure cannot be caused by the 5044eda14cbcSMatt Macy * allocation throttle since the allocation throttle 5045eda14cbcSMatt Macy * is only responsible for skipping devices and 5046eda14cbcSMatt Macy * not failing block allocations. 5047eda14cbcSMatt Macy */ 5048eda14cbcSMatt Macy mg->mg_no_free_space = B_TRUE; 5049eda14cbcSMatt Macy } 5050eda14cbcSMatt Macy } 5051eda14cbcSMatt Macy mg->mg_allocations++; 5052eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 5053eda14cbcSMatt Macy return (offset); 5054eda14cbcSMatt Macy } 5055eda14cbcSMatt Macy 5056eda14cbcSMatt Macy /* 5057eda14cbcSMatt Macy * Allocate a block for the specified i/o. 5058eda14cbcSMatt Macy */ 5059eda14cbcSMatt Macy int 5060eda14cbcSMatt Macy metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 5061eda14cbcSMatt Macy dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, 5062eda14cbcSMatt Macy zio_alloc_list_t *zal, int allocator) 5063eda14cbcSMatt Macy { 50647877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5065eda14cbcSMatt Macy metaslab_group_t *mg, *fast_mg, *rotor; 5066eda14cbcSMatt Macy vdev_t *vd; 5067eda14cbcSMatt Macy boolean_t try_hard = B_FALSE; 5068eda14cbcSMatt Macy 5069eda14cbcSMatt Macy ASSERT(!DVA_IS_VALID(&dva[d])); 5070eda14cbcSMatt Macy 5071eda14cbcSMatt Macy /* 5072eda14cbcSMatt Macy * For testing, make some blocks above a certain size be gang blocks. 5073eda14cbcSMatt Macy * This will result in more split blocks when using device removal, 5074eda14cbcSMatt Macy * and a large number of split blocks coupled with ztest-induced 5075eda14cbcSMatt Macy * damage can result in extremely long reconstruction times. This 5076eda14cbcSMatt Macy * will also test spilling from special to normal. 5077eda14cbcSMatt Macy */ 5078eda14cbcSMatt Macy if (psize >= metaslab_force_ganging && (spa_get_random(100) < 3)) { 5079eda14cbcSMatt Macy metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG, 5080eda14cbcSMatt Macy allocator); 5081eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 5082eda14cbcSMatt Macy } 5083eda14cbcSMatt Macy 5084eda14cbcSMatt Macy /* 5085eda14cbcSMatt Macy * Start at the rotor and loop through all mgs until we find something. 50867877fdebSMatt Macy * Note that there's no locking on mca_rotor or mca_aliquot because 5087eda14cbcSMatt Macy * nothing actually breaks if we miss a few updates -- we just won't 5088eda14cbcSMatt Macy * allocate quite as evenly. It all balances out over time. 5089eda14cbcSMatt Macy * 5090eda14cbcSMatt Macy * If we are doing ditto or log blocks, try to spread them across 5091eda14cbcSMatt Macy * consecutive vdevs. If we're forced to reuse a vdev before we've 5092eda14cbcSMatt Macy * allocated all of our ditto blocks, then try and spread them out on 5093eda14cbcSMatt Macy * that vdev as much as possible. If it turns out to not be possible, 5094eda14cbcSMatt Macy * gradually lower our standards until anything becomes acceptable. 5095eda14cbcSMatt Macy * Also, allocating on consecutive vdevs (as opposed to random vdevs) 5096eda14cbcSMatt Macy * gives us hope of containing our fault domains to something we're 5097eda14cbcSMatt Macy * able to reason about. Otherwise, any two top-level vdev failures 5098eda14cbcSMatt Macy * will guarantee the loss of data. With consecutive allocation, 5099eda14cbcSMatt Macy * only two adjacent top-level vdev failures will result in data loss. 5100eda14cbcSMatt Macy * 5101eda14cbcSMatt Macy * If we are doing gang blocks (hintdva is non-NULL), try to keep 5102eda14cbcSMatt Macy * ourselves on the same vdev as our gang block header. That 5103eda14cbcSMatt Macy * way, we can hope for locality in vdev_cache, plus it makes our 5104eda14cbcSMatt Macy * fault domains something tractable. 5105eda14cbcSMatt Macy */ 5106eda14cbcSMatt Macy if (hintdva) { 5107eda14cbcSMatt Macy vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 5108eda14cbcSMatt Macy 5109eda14cbcSMatt Macy /* 5110eda14cbcSMatt Macy * It's possible the vdev we're using as the hint no 5111eda14cbcSMatt Macy * longer exists or its mg has been closed (e.g. by 5112eda14cbcSMatt Macy * device removal). Consult the rotor when 5113eda14cbcSMatt Macy * all else fails. 5114eda14cbcSMatt Macy */ 5115eda14cbcSMatt Macy if (vd != NULL && vd->vdev_mg != NULL) { 5116eda14cbcSMatt Macy mg = vd->vdev_mg; 5117eda14cbcSMatt Macy 5118eda14cbcSMatt Macy if (flags & METASLAB_HINTBP_AVOID && 5119eda14cbcSMatt Macy mg->mg_next != NULL) 5120eda14cbcSMatt Macy mg = mg->mg_next; 5121eda14cbcSMatt Macy } else { 51227877fdebSMatt Macy mg = mca->mca_rotor; 5123eda14cbcSMatt Macy } 5124eda14cbcSMatt Macy } else if (d != 0) { 5125eda14cbcSMatt Macy vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 5126eda14cbcSMatt Macy mg = vd->vdev_mg->mg_next; 5127eda14cbcSMatt Macy } else if (flags & METASLAB_FASTWRITE) { 51287877fdebSMatt Macy mg = fast_mg = mca->mca_rotor; 5129eda14cbcSMatt Macy 5130eda14cbcSMatt Macy do { 5131eda14cbcSMatt Macy if (fast_mg->mg_vd->vdev_pending_fastwrite < 5132eda14cbcSMatt Macy mg->mg_vd->vdev_pending_fastwrite) 5133eda14cbcSMatt Macy mg = fast_mg; 51347877fdebSMatt Macy } while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor); 5135eda14cbcSMatt Macy 5136eda14cbcSMatt Macy } else { 51377877fdebSMatt Macy ASSERT(mca->mca_rotor != NULL); 51387877fdebSMatt Macy mg = mca->mca_rotor; 5139eda14cbcSMatt Macy } 5140eda14cbcSMatt Macy 5141eda14cbcSMatt Macy /* 5142eda14cbcSMatt Macy * If the hint put us into the wrong metaslab class, or into a 5143eda14cbcSMatt Macy * metaslab group that has been passivated, just follow the rotor. 5144eda14cbcSMatt Macy */ 5145eda14cbcSMatt Macy if (mg->mg_class != mc || mg->mg_activation_count <= 0) 51467877fdebSMatt Macy mg = mca->mca_rotor; 5147eda14cbcSMatt Macy 5148eda14cbcSMatt Macy rotor = mg; 5149eda14cbcSMatt Macy top: 5150eda14cbcSMatt Macy do { 5151eda14cbcSMatt Macy boolean_t allocatable; 5152eda14cbcSMatt Macy 5153eda14cbcSMatt Macy ASSERT(mg->mg_activation_count == 1); 5154eda14cbcSMatt Macy vd = mg->mg_vd; 5155eda14cbcSMatt Macy 5156eda14cbcSMatt Macy /* 5157eda14cbcSMatt Macy * Don't allocate from faulted devices. 5158eda14cbcSMatt Macy */ 5159eda14cbcSMatt Macy if (try_hard) { 5160eda14cbcSMatt Macy spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 5161eda14cbcSMatt Macy allocatable = vdev_allocatable(vd); 5162eda14cbcSMatt Macy spa_config_exit(spa, SCL_ZIO, FTAG); 5163eda14cbcSMatt Macy } else { 5164eda14cbcSMatt Macy allocatable = vdev_allocatable(vd); 5165eda14cbcSMatt Macy } 5166eda14cbcSMatt Macy 5167eda14cbcSMatt Macy /* 5168eda14cbcSMatt Macy * Determine if the selected metaslab group is eligible 5169eda14cbcSMatt Macy * for allocations. If we're ganging then don't allow 5170eda14cbcSMatt Macy * this metaslab group to skip allocations since that would 5171eda14cbcSMatt Macy * inadvertently return ENOSPC and suspend the pool 5172eda14cbcSMatt Macy * even though space is still available. 5173eda14cbcSMatt Macy */ 5174eda14cbcSMatt Macy if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { 5175eda14cbcSMatt Macy allocatable = metaslab_group_allocatable(mg, rotor, 5176eda14cbcSMatt Macy psize, allocator, d); 5177eda14cbcSMatt Macy } 5178eda14cbcSMatt Macy 5179eda14cbcSMatt Macy if (!allocatable) { 5180eda14cbcSMatt Macy metaslab_trace_add(zal, mg, NULL, psize, d, 5181eda14cbcSMatt Macy TRACE_NOT_ALLOCATABLE, allocator); 5182eda14cbcSMatt Macy goto next; 5183eda14cbcSMatt Macy } 5184eda14cbcSMatt Macy 5185eda14cbcSMatt Macy ASSERT(mg->mg_initialized); 5186eda14cbcSMatt Macy 5187eda14cbcSMatt Macy /* 5188eda14cbcSMatt Macy * Avoid writing single-copy data to a failing, 5189eda14cbcSMatt Macy * non-redundant vdev, unless we've already tried all 5190eda14cbcSMatt Macy * other vdevs. 5191eda14cbcSMatt Macy */ 5192eda14cbcSMatt Macy if ((vd->vdev_stat.vs_write_errors > 0 || 5193eda14cbcSMatt Macy vd->vdev_state < VDEV_STATE_HEALTHY) && 5194eda14cbcSMatt Macy d == 0 && !try_hard && vd->vdev_children == 0) { 5195eda14cbcSMatt Macy metaslab_trace_add(zal, mg, NULL, psize, d, 5196eda14cbcSMatt Macy TRACE_VDEV_ERROR, allocator); 5197eda14cbcSMatt Macy goto next; 5198eda14cbcSMatt Macy } 5199eda14cbcSMatt Macy 5200eda14cbcSMatt Macy ASSERT(mg->mg_class == mc); 5201eda14cbcSMatt Macy 5202eda14cbcSMatt Macy uint64_t asize = vdev_psize_to_asize(vd, psize); 5203eda14cbcSMatt Macy ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 5204eda14cbcSMatt Macy 5205eda14cbcSMatt Macy /* 5206eda14cbcSMatt Macy * If we don't need to try hard, then require that the 5207eda14cbcSMatt Macy * block be on a different metaslab from any other DVAs 5208eda14cbcSMatt Macy * in this BP (unique=true). If we are trying hard, then 5209eda14cbcSMatt Macy * allow any metaslab to be used (unique=false). 5210eda14cbcSMatt Macy */ 5211eda14cbcSMatt Macy uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, 5212eda14cbcSMatt Macy !try_hard, dva, d, allocator, try_hard); 5213eda14cbcSMatt Macy 5214eda14cbcSMatt Macy if (offset != -1ULL) { 5215eda14cbcSMatt Macy /* 5216eda14cbcSMatt Macy * If we've just selected this metaslab group, 5217eda14cbcSMatt Macy * figure out whether the corresponding vdev is 5218eda14cbcSMatt Macy * over- or under-used relative to the pool, 5219eda14cbcSMatt Macy * and set an allocation bias to even it out. 5220eda14cbcSMatt Macy * 5221eda14cbcSMatt Macy * Bias is also used to compensate for unequally 5222eda14cbcSMatt Macy * sized vdevs so that space is allocated fairly. 5223eda14cbcSMatt Macy */ 52247877fdebSMatt Macy if (mca->mca_aliquot == 0 && metaslab_bias_enabled) { 5225eda14cbcSMatt Macy vdev_stat_t *vs = &vd->vdev_stat; 5226eda14cbcSMatt Macy int64_t vs_free = vs->vs_space - vs->vs_alloc; 5227eda14cbcSMatt Macy int64_t mc_free = mc->mc_space - mc->mc_alloc; 5228eda14cbcSMatt Macy int64_t ratio; 5229eda14cbcSMatt Macy 5230eda14cbcSMatt Macy /* 5231eda14cbcSMatt Macy * Calculate how much more or less we should 5232eda14cbcSMatt Macy * try to allocate from this device during 5233eda14cbcSMatt Macy * this iteration around the rotor. 5234eda14cbcSMatt Macy * 5235eda14cbcSMatt Macy * This basically introduces a zero-centered 5236eda14cbcSMatt Macy * bias towards the devices with the most 5237eda14cbcSMatt Macy * free space, while compensating for vdev 5238eda14cbcSMatt Macy * size differences. 5239eda14cbcSMatt Macy * 5240eda14cbcSMatt Macy * Examples: 5241eda14cbcSMatt Macy * vdev V1 = 16M/128M 5242eda14cbcSMatt Macy * vdev V2 = 16M/128M 5243eda14cbcSMatt Macy * ratio(V1) = 100% ratio(V2) = 100% 5244eda14cbcSMatt Macy * 5245eda14cbcSMatt Macy * vdev V1 = 16M/128M 5246eda14cbcSMatt Macy * vdev V2 = 64M/128M 5247eda14cbcSMatt Macy * ratio(V1) = 127% ratio(V2) = 72% 5248eda14cbcSMatt Macy * 5249eda14cbcSMatt Macy * vdev V1 = 16M/128M 5250eda14cbcSMatt Macy * vdev V2 = 64M/512M 5251eda14cbcSMatt Macy * ratio(V1) = 40% ratio(V2) = 160% 5252eda14cbcSMatt Macy */ 5253eda14cbcSMatt Macy ratio = (vs_free * mc->mc_alloc_groups * 100) / 5254eda14cbcSMatt Macy (mc_free + 1); 5255eda14cbcSMatt Macy mg->mg_bias = ((ratio - 100) * 5256eda14cbcSMatt Macy (int64_t)mg->mg_aliquot) / 100; 5257eda14cbcSMatt Macy } else if (!metaslab_bias_enabled) { 5258eda14cbcSMatt Macy mg->mg_bias = 0; 5259eda14cbcSMatt Macy } 5260eda14cbcSMatt Macy 5261eda14cbcSMatt Macy if ((flags & METASLAB_FASTWRITE) || 52627877fdebSMatt Macy atomic_add_64_nv(&mca->mca_aliquot, asize) >= 5263eda14cbcSMatt Macy mg->mg_aliquot + mg->mg_bias) { 52647877fdebSMatt Macy mca->mca_rotor = mg->mg_next; 52657877fdebSMatt Macy mca->mca_aliquot = 0; 5266eda14cbcSMatt Macy } 5267eda14cbcSMatt Macy 5268eda14cbcSMatt Macy DVA_SET_VDEV(&dva[d], vd->vdev_id); 5269eda14cbcSMatt Macy DVA_SET_OFFSET(&dva[d], offset); 5270eda14cbcSMatt Macy DVA_SET_GANG(&dva[d], 5271eda14cbcSMatt Macy ((flags & METASLAB_GANG_HEADER) ? 1 : 0)); 5272eda14cbcSMatt Macy DVA_SET_ASIZE(&dva[d], asize); 5273eda14cbcSMatt Macy 5274eda14cbcSMatt Macy if (flags & METASLAB_FASTWRITE) { 5275eda14cbcSMatt Macy atomic_add_64(&vd->vdev_pending_fastwrite, 5276eda14cbcSMatt Macy psize); 5277eda14cbcSMatt Macy } 5278eda14cbcSMatt Macy 5279eda14cbcSMatt Macy return (0); 5280eda14cbcSMatt Macy } 5281eda14cbcSMatt Macy next: 52827877fdebSMatt Macy mca->mca_rotor = mg->mg_next; 52837877fdebSMatt Macy mca->mca_aliquot = 0; 5284eda14cbcSMatt Macy } while ((mg = mg->mg_next) != rotor); 5285eda14cbcSMatt Macy 5286eda14cbcSMatt Macy /* 52877877fdebSMatt Macy * If we haven't tried hard, perhaps do so now. 5288eda14cbcSMatt Macy */ 52897877fdebSMatt Macy if (!try_hard && (zfs_metaslab_try_hard_before_gang || 52907877fdebSMatt Macy GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 || 52917877fdebSMatt Macy psize <= 1 << spa->spa_min_ashift)) { 52927877fdebSMatt Macy METASLABSTAT_BUMP(metaslabstat_try_hard); 5293eda14cbcSMatt Macy try_hard = B_TRUE; 5294eda14cbcSMatt Macy goto top; 5295eda14cbcSMatt Macy } 5296eda14cbcSMatt Macy 5297eda14cbcSMatt Macy bzero(&dva[d], sizeof (dva_t)); 5298eda14cbcSMatt Macy 5299eda14cbcSMatt Macy metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator); 5300eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 5301eda14cbcSMatt Macy } 5302eda14cbcSMatt Macy 5303eda14cbcSMatt Macy void 5304eda14cbcSMatt Macy metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize, 5305eda14cbcSMatt Macy boolean_t checkpoint) 5306eda14cbcSMatt Macy { 5307eda14cbcSMatt Macy metaslab_t *msp; 5308eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 5309eda14cbcSMatt Macy 5310eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 5311eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5312eda14cbcSMatt Macy ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 5313eda14cbcSMatt Macy 5314eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5315eda14cbcSMatt Macy 5316eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 5317eda14cbcSMatt Macy VERIFY3U(offset, >=, msp->ms_start); 5318eda14cbcSMatt Macy VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size); 5319eda14cbcSMatt Macy VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5320eda14cbcSMatt Macy VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift)); 5321eda14cbcSMatt Macy 5322eda14cbcSMatt Macy metaslab_check_free_impl(vd, offset, asize); 5323eda14cbcSMatt Macy 5324eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 5325eda14cbcSMatt Macy if (range_tree_is_empty(msp->ms_freeing) && 5326eda14cbcSMatt Macy range_tree_is_empty(msp->ms_checkpointing)) { 5327eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa)); 5328eda14cbcSMatt Macy } 5329eda14cbcSMatt Macy 5330eda14cbcSMatt Macy if (checkpoint) { 5331eda14cbcSMatt Macy ASSERT(spa_has_checkpoint(spa)); 5332eda14cbcSMatt Macy range_tree_add(msp->ms_checkpointing, offset, asize); 5333eda14cbcSMatt Macy } else { 5334eda14cbcSMatt Macy range_tree_add(msp->ms_freeing, offset, asize); 5335eda14cbcSMatt Macy } 5336eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5337eda14cbcSMatt Macy } 5338eda14cbcSMatt Macy 5339eda14cbcSMatt Macy /* ARGSUSED */ 5340eda14cbcSMatt Macy void 5341eda14cbcSMatt Macy metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5342eda14cbcSMatt Macy uint64_t size, void *arg) 5343eda14cbcSMatt Macy { 5344eda14cbcSMatt Macy boolean_t *checkpoint = arg; 5345eda14cbcSMatt Macy 5346eda14cbcSMatt Macy ASSERT3P(checkpoint, !=, NULL); 5347eda14cbcSMatt Macy 5348eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap != NULL) 5349eda14cbcSMatt Macy vdev_indirect_mark_obsolete(vd, offset, size); 5350eda14cbcSMatt Macy else 5351eda14cbcSMatt Macy metaslab_free_impl(vd, offset, size, *checkpoint); 5352eda14cbcSMatt Macy } 5353eda14cbcSMatt Macy 5354eda14cbcSMatt Macy static void 5355eda14cbcSMatt Macy metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size, 5356eda14cbcSMatt Macy boolean_t checkpoint) 5357eda14cbcSMatt Macy { 5358eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 5359eda14cbcSMatt Macy 5360eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5361eda14cbcSMatt Macy 5362eda14cbcSMatt Macy if (spa_syncing_txg(spa) > spa_freeze_txg(spa)) 5363eda14cbcSMatt Macy return; 5364eda14cbcSMatt Macy 5365eda14cbcSMatt Macy if (spa->spa_vdev_removal != NULL && 5366eda14cbcSMatt Macy spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id && 5367eda14cbcSMatt Macy vdev_is_concrete(vd)) { 5368eda14cbcSMatt Macy /* 5369eda14cbcSMatt Macy * Note: we check if the vdev is concrete because when 5370eda14cbcSMatt Macy * we complete the removal, we first change the vdev to be 5371eda14cbcSMatt Macy * an indirect vdev (in open context), and then (in syncing 5372eda14cbcSMatt Macy * context) clear spa_vdev_removal. 5373eda14cbcSMatt Macy */ 5374eda14cbcSMatt Macy free_from_removing_vdev(vd, offset, size); 5375eda14cbcSMatt Macy } else if (vd->vdev_ops->vdev_op_remap != NULL) { 5376eda14cbcSMatt Macy vdev_indirect_mark_obsolete(vd, offset, size); 5377eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, 5378eda14cbcSMatt Macy metaslab_free_impl_cb, &checkpoint); 5379eda14cbcSMatt Macy } else { 5380eda14cbcSMatt Macy metaslab_free_concrete(vd, offset, size, checkpoint); 5381eda14cbcSMatt Macy } 5382eda14cbcSMatt Macy } 5383eda14cbcSMatt Macy 5384eda14cbcSMatt Macy typedef struct remap_blkptr_cb_arg { 5385eda14cbcSMatt Macy blkptr_t *rbca_bp; 5386eda14cbcSMatt Macy spa_remap_cb_t rbca_cb; 5387eda14cbcSMatt Macy vdev_t *rbca_remap_vd; 5388eda14cbcSMatt Macy uint64_t rbca_remap_offset; 5389eda14cbcSMatt Macy void *rbca_cb_arg; 5390eda14cbcSMatt Macy } remap_blkptr_cb_arg_t; 5391eda14cbcSMatt Macy 5392eda14cbcSMatt Macy static void 5393eda14cbcSMatt Macy remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5394eda14cbcSMatt Macy uint64_t size, void *arg) 5395eda14cbcSMatt Macy { 5396eda14cbcSMatt Macy remap_blkptr_cb_arg_t *rbca = arg; 5397eda14cbcSMatt Macy blkptr_t *bp = rbca->rbca_bp; 5398eda14cbcSMatt Macy 5399eda14cbcSMatt Macy /* We can not remap split blocks. */ 5400eda14cbcSMatt Macy if (size != DVA_GET_ASIZE(&bp->blk_dva[0])) 5401eda14cbcSMatt Macy return; 5402eda14cbcSMatt Macy ASSERT0(inner_offset); 5403eda14cbcSMatt Macy 5404eda14cbcSMatt Macy if (rbca->rbca_cb != NULL) { 5405eda14cbcSMatt Macy /* 5406eda14cbcSMatt Macy * At this point we know that we are not handling split 5407eda14cbcSMatt Macy * blocks and we invoke the callback on the previous 5408eda14cbcSMatt Macy * vdev which must be indirect. 5409eda14cbcSMatt Macy */ 5410eda14cbcSMatt Macy ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops); 5411eda14cbcSMatt Macy 5412eda14cbcSMatt Macy rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id, 5413eda14cbcSMatt Macy rbca->rbca_remap_offset, size, rbca->rbca_cb_arg); 5414eda14cbcSMatt Macy 5415eda14cbcSMatt Macy /* set up remap_blkptr_cb_arg for the next call */ 5416eda14cbcSMatt Macy rbca->rbca_remap_vd = vd; 5417eda14cbcSMatt Macy rbca->rbca_remap_offset = offset; 5418eda14cbcSMatt Macy } 5419eda14cbcSMatt Macy 5420eda14cbcSMatt Macy /* 5421eda14cbcSMatt Macy * The phys birth time is that of dva[0]. This ensures that we know 5422eda14cbcSMatt Macy * when each dva was written, so that resilver can determine which 5423eda14cbcSMatt Macy * blocks need to be scrubbed (i.e. those written during the time 5424eda14cbcSMatt Macy * the vdev was offline). It also ensures that the key used in 5425eda14cbcSMatt Macy * the ARC hash table is unique (i.e. dva[0] + phys_birth). If 5426eda14cbcSMatt Macy * we didn't change the phys_birth, a lookup in the ARC for a 5427eda14cbcSMatt Macy * remapped BP could find the data that was previously stored at 5428eda14cbcSMatt Macy * this vdev + offset. 5429eda14cbcSMatt Macy */ 5430eda14cbcSMatt Macy vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa, 5431eda14cbcSMatt Macy DVA_GET_VDEV(&bp->blk_dva[0])); 5432eda14cbcSMatt Macy vdev_indirect_births_t *vib = oldvd->vdev_indirect_births; 5433eda14cbcSMatt Macy bp->blk_phys_birth = vdev_indirect_births_physbirth(vib, 5434eda14cbcSMatt Macy DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0])); 5435eda14cbcSMatt Macy 5436eda14cbcSMatt Macy DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); 5437eda14cbcSMatt Macy DVA_SET_OFFSET(&bp->blk_dva[0], offset); 5438eda14cbcSMatt Macy } 5439eda14cbcSMatt Macy 5440eda14cbcSMatt Macy /* 5441eda14cbcSMatt Macy * If the block pointer contains any indirect DVAs, modify them to refer to 5442eda14cbcSMatt Macy * concrete DVAs. Note that this will sometimes not be possible, leaving 5443eda14cbcSMatt Macy * the indirect DVA in place. This happens if the indirect DVA spans multiple 5444eda14cbcSMatt Macy * segments in the mapping (i.e. it is a "split block"). 5445eda14cbcSMatt Macy * 5446eda14cbcSMatt Macy * If the BP was remapped, calls the callback on the original dva (note the 5447eda14cbcSMatt Macy * callback can be called multiple times if the original indirect DVA refers 5448eda14cbcSMatt Macy * to another indirect DVA, etc). 5449eda14cbcSMatt Macy * 5450eda14cbcSMatt Macy * Returns TRUE if the BP was remapped. 5451eda14cbcSMatt Macy */ 5452eda14cbcSMatt Macy boolean_t 5453eda14cbcSMatt Macy spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg) 5454eda14cbcSMatt Macy { 5455eda14cbcSMatt Macy remap_blkptr_cb_arg_t rbca; 5456eda14cbcSMatt Macy 5457eda14cbcSMatt Macy if (!zfs_remap_blkptr_enable) 5458eda14cbcSMatt Macy return (B_FALSE); 5459eda14cbcSMatt Macy 5460eda14cbcSMatt Macy if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) 5461eda14cbcSMatt Macy return (B_FALSE); 5462eda14cbcSMatt Macy 5463eda14cbcSMatt Macy /* 5464eda14cbcSMatt Macy * Dedup BP's can not be remapped, because ddt_phys_select() depends 5465eda14cbcSMatt Macy * on DVA[0] being the same in the BP as in the DDT (dedup table). 5466eda14cbcSMatt Macy */ 5467eda14cbcSMatt Macy if (BP_GET_DEDUP(bp)) 5468eda14cbcSMatt Macy return (B_FALSE); 5469eda14cbcSMatt Macy 5470eda14cbcSMatt Macy /* 5471eda14cbcSMatt Macy * Gang blocks can not be remapped, because 5472eda14cbcSMatt Macy * zio_checksum_gang_verifier() depends on the DVA[0] that's in 5473eda14cbcSMatt Macy * the BP used to read the gang block header (GBH) being the same 5474eda14cbcSMatt Macy * as the DVA[0] that we allocated for the GBH. 5475eda14cbcSMatt Macy */ 5476eda14cbcSMatt Macy if (BP_IS_GANG(bp)) 5477eda14cbcSMatt Macy return (B_FALSE); 5478eda14cbcSMatt Macy 5479eda14cbcSMatt Macy /* 5480eda14cbcSMatt Macy * Embedded BP's have no DVA to remap. 5481eda14cbcSMatt Macy */ 5482eda14cbcSMatt Macy if (BP_GET_NDVAS(bp) < 1) 5483eda14cbcSMatt Macy return (B_FALSE); 5484eda14cbcSMatt Macy 5485eda14cbcSMatt Macy /* 5486eda14cbcSMatt Macy * Note: we only remap dva[0]. If we remapped other dvas, we 5487eda14cbcSMatt Macy * would no longer know what their phys birth txg is. 5488eda14cbcSMatt Macy */ 5489eda14cbcSMatt Macy dva_t *dva = &bp->blk_dva[0]; 5490eda14cbcSMatt Macy 5491eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5492eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5493eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 5494eda14cbcSMatt Macy 5495eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap == NULL) 5496eda14cbcSMatt Macy return (B_FALSE); 5497eda14cbcSMatt Macy 5498eda14cbcSMatt Macy rbca.rbca_bp = bp; 5499eda14cbcSMatt Macy rbca.rbca_cb = callback; 5500eda14cbcSMatt Macy rbca.rbca_remap_vd = vd; 5501eda14cbcSMatt Macy rbca.rbca_remap_offset = offset; 5502eda14cbcSMatt Macy rbca.rbca_cb_arg = arg; 5503eda14cbcSMatt Macy 5504eda14cbcSMatt Macy /* 5505eda14cbcSMatt Macy * remap_blkptr_cb() will be called in order for each level of 5506eda14cbcSMatt Macy * indirection, until a concrete vdev is reached or a split block is 5507eda14cbcSMatt Macy * encountered. old_vd and old_offset are updated within the callback 5508eda14cbcSMatt Macy * as we go from the one indirect vdev to the next one (either concrete 5509eda14cbcSMatt Macy * or indirect again) in that order. 5510eda14cbcSMatt Macy */ 5511eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca); 5512eda14cbcSMatt Macy 5513eda14cbcSMatt Macy /* Check if the DVA wasn't remapped because it is a split block */ 5514eda14cbcSMatt Macy if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id) 5515eda14cbcSMatt Macy return (B_FALSE); 5516eda14cbcSMatt Macy 5517eda14cbcSMatt Macy return (B_TRUE); 5518eda14cbcSMatt Macy } 5519eda14cbcSMatt Macy 5520eda14cbcSMatt Macy /* 5521eda14cbcSMatt Macy * Undo the allocation of a DVA which happened in the given transaction group. 5522eda14cbcSMatt Macy */ 5523eda14cbcSMatt Macy void 5524eda14cbcSMatt Macy metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5525eda14cbcSMatt Macy { 5526eda14cbcSMatt Macy metaslab_t *msp; 5527eda14cbcSMatt Macy vdev_t *vd; 5528eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(dva); 5529eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5530eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5531eda14cbcSMatt Macy 5532eda14cbcSMatt Macy ASSERT(DVA_IS_VALID(dva)); 5533eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5534eda14cbcSMatt Macy 5535eda14cbcSMatt Macy if (txg > spa_freeze_txg(spa)) 5536eda14cbcSMatt Macy return; 5537eda14cbcSMatt Macy 5538eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || 5539eda14cbcSMatt Macy (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 5540eda14cbcSMatt Macy zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", 5541eda14cbcSMatt Macy (u_longlong_t)vdev, (u_longlong_t)offset, 5542eda14cbcSMatt Macy (u_longlong_t)size); 5543eda14cbcSMatt Macy return; 5544eda14cbcSMatt Macy } 5545eda14cbcSMatt Macy 5546eda14cbcSMatt Macy ASSERT(!vd->vdev_removing); 5547eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 5548eda14cbcSMatt Macy ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 5549eda14cbcSMatt Macy ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 5550eda14cbcSMatt Macy 5551eda14cbcSMatt Macy if (DVA_GET_GANG(dva)) 5552eda14cbcSMatt Macy size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 5553eda14cbcSMatt Macy 5554eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5555eda14cbcSMatt Macy 5556eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 5557eda14cbcSMatt Macy range_tree_remove(msp->ms_allocating[txg & TXG_MASK], 5558eda14cbcSMatt Macy offset, size); 5559eda14cbcSMatt Macy msp->ms_allocating_total -= size; 5560eda14cbcSMatt Macy 5561eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 5562eda14cbcSMatt Macy VERIFY3U(offset, >=, msp->ms_start); 5563eda14cbcSMatt Macy VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); 5564eda14cbcSMatt Macy VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=, 5565eda14cbcSMatt Macy msp->ms_size); 5566eda14cbcSMatt Macy VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5567eda14cbcSMatt Macy VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5568eda14cbcSMatt Macy range_tree_add(msp->ms_allocatable, offset, size); 5569eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5570eda14cbcSMatt Macy } 5571eda14cbcSMatt Macy 5572eda14cbcSMatt Macy /* 5573eda14cbcSMatt Macy * Free the block represented by the given DVA. 5574eda14cbcSMatt Macy */ 5575eda14cbcSMatt Macy void 5576eda14cbcSMatt Macy metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint) 5577eda14cbcSMatt Macy { 5578eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(dva); 5579eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5580eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5581eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, vdev); 5582eda14cbcSMatt Macy 5583eda14cbcSMatt Macy ASSERT(DVA_IS_VALID(dva)); 5584eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5585eda14cbcSMatt Macy 5586eda14cbcSMatt Macy if (DVA_GET_GANG(dva)) { 5587eda14cbcSMatt Macy size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 5588eda14cbcSMatt Macy } 5589eda14cbcSMatt Macy 5590eda14cbcSMatt Macy metaslab_free_impl(vd, offset, size, checkpoint); 5591eda14cbcSMatt Macy } 5592eda14cbcSMatt Macy 5593eda14cbcSMatt Macy /* 5594eda14cbcSMatt Macy * Reserve some allocation slots. The reservation system must be called 5595eda14cbcSMatt Macy * before we call into the allocator. If there aren't any available slots 5596eda14cbcSMatt Macy * then the I/O will be throttled until an I/O completes and its slots are 5597eda14cbcSMatt Macy * freed up. The function returns true if it was successful in placing 5598eda14cbcSMatt Macy * the reservation. 5599eda14cbcSMatt Macy */ 5600eda14cbcSMatt Macy boolean_t 5601eda14cbcSMatt Macy metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator, 5602eda14cbcSMatt Macy zio_t *zio, int flags) 5603eda14cbcSMatt Macy { 56047877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5605eda14cbcSMatt Macy uint64_t available_slots = 0; 5606eda14cbcSMatt Macy boolean_t slot_reserved = B_FALSE; 56077877fdebSMatt Macy uint64_t max = mca->mca_alloc_max_slots; 5608eda14cbcSMatt Macy 5609eda14cbcSMatt Macy ASSERT(mc->mc_alloc_throttle_enabled); 5610eda14cbcSMatt Macy mutex_enter(&mc->mc_lock); 5611eda14cbcSMatt Macy 56127877fdebSMatt Macy uint64_t reserved_slots = zfs_refcount_count(&mca->mca_alloc_slots); 5613eda14cbcSMatt Macy if (reserved_slots < max) 5614eda14cbcSMatt Macy available_slots = max - reserved_slots; 5615eda14cbcSMatt Macy 5616eda14cbcSMatt Macy if (slots <= available_slots || GANG_ALLOCATION(flags) || 5617eda14cbcSMatt Macy flags & METASLAB_MUST_RESERVE) { 5618eda14cbcSMatt Macy /* 5619eda14cbcSMatt Macy * We reserve the slots individually so that we can unreserve 5620eda14cbcSMatt Macy * them individually when an I/O completes. 5621eda14cbcSMatt Macy */ 56227877fdebSMatt Macy for (int d = 0; d < slots; d++) 56237877fdebSMatt Macy zfs_refcount_add(&mca->mca_alloc_slots, zio); 5624eda14cbcSMatt Macy zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; 5625eda14cbcSMatt Macy slot_reserved = B_TRUE; 5626eda14cbcSMatt Macy } 5627eda14cbcSMatt Macy 5628eda14cbcSMatt Macy mutex_exit(&mc->mc_lock); 5629eda14cbcSMatt Macy return (slot_reserved); 5630eda14cbcSMatt Macy } 5631eda14cbcSMatt Macy 5632eda14cbcSMatt Macy void 5633eda14cbcSMatt Macy metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, 5634eda14cbcSMatt Macy int allocator, zio_t *zio) 5635eda14cbcSMatt Macy { 56367877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 56377877fdebSMatt Macy 5638eda14cbcSMatt Macy ASSERT(mc->mc_alloc_throttle_enabled); 5639eda14cbcSMatt Macy mutex_enter(&mc->mc_lock); 56407877fdebSMatt Macy for (int d = 0; d < slots; d++) 56417877fdebSMatt Macy zfs_refcount_remove(&mca->mca_alloc_slots, zio); 5642eda14cbcSMatt Macy mutex_exit(&mc->mc_lock); 5643eda14cbcSMatt Macy } 5644eda14cbcSMatt Macy 5645eda14cbcSMatt Macy static int 5646eda14cbcSMatt Macy metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, 5647eda14cbcSMatt Macy uint64_t txg) 5648eda14cbcSMatt Macy { 5649eda14cbcSMatt Macy metaslab_t *msp; 5650eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 5651eda14cbcSMatt Macy int error = 0; 5652eda14cbcSMatt Macy 5653eda14cbcSMatt Macy if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count) 5654eda14cbcSMatt Macy return (SET_ERROR(ENXIO)); 5655eda14cbcSMatt Macy 5656eda14cbcSMatt Macy ASSERT3P(vd->vdev_ms, !=, NULL); 5657eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5658eda14cbcSMatt Macy 5659eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 5660eda14cbcSMatt Macy 5661eda14cbcSMatt Macy if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) { 5662eda14cbcSMatt Macy error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM); 5663eda14cbcSMatt Macy if (error == EBUSY) { 5664eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 5665eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 5666eda14cbcSMatt Macy error = 0; 5667eda14cbcSMatt Macy } 5668eda14cbcSMatt Macy } 5669eda14cbcSMatt Macy 5670eda14cbcSMatt Macy if (error == 0 && 5671eda14cbcSMatt Macy !range_tree_contains(msp->ms_allocatable, offset, size)) 5672eda14cbcSMatt Macy error = SET_ERROR(ENOENT); 5673eda14cbcSMatt Macy 5674eda14cbcSMatt Macy if (error || txg == 0) { /* txg == 0 indicates dry run */ 5675eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5676eda14cbcSMatt Macy return (error); 5677eda14cbcSMatt Macy } 5678eda14cbcSMatt Macy 5679eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 5680eda14cbcSMatt Macy VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5681eda14cbcSMatt Macy VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5682eda14cbcSMatt Macy VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=, 5683eda14cbcSMatt Macy msp->ms_size); 5684eda14cbcSMatt Macy range_tree_remove(msp->ms_allocatable, offset, size); 5685eda14cbcSMatt Macy range_tree_clear(msp->ms_trim, offset, size); 5686eda14cbcSMatt Macy 56877877fdebSMatt Macy if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */ 5688eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 5689eda14cbcSMatt Macy multilist_sublist_t *mls = 5690eda14cbcSMatt Macy multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp); 5691eda14cbcSMatt Macy if (!multilist_link_active(&msp->ms_class_txg_node)) { 5692eda14cbcSMatt Macy msp->ms_selected_txg = txg; 5693eda14cbcSMatt Macy multilist_sublist_insert_head(mls, msp); 5694eda14cbcSMatt Macy } 5695eda14cbcSMatt Macy multilist_sublist_unlock(mls); 5696eda14cbcSMatt Macy 5697eda14cbcSMatt Macy if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 5698eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, txg); 5699eda14cbcSMatt Macy range_tree_add(msp->ms_allocating[txg & TXG_MASK], 5700eda14cbcSMatt Macy offset, size); 5701eda14cbcSMatt Macy msp->ms_allocating_total += size; 5702eda14cbcSMatt Macy } 5703eda14cbcSMatt Macy 5704eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5705eda14cbcSMatt Macy 5706eda14cbcSMatt Macy return (0); 5707eda14cbcSMatt Macy } 5708eda14cbcSMatt Macy 5709eda14cbcSMatt Macy typedef struct metaslab_claim_cb_arg_t { 5710eda14cbcSMatt Macy uint64_t mcca_txg; 5711eda14cbcSMatt Macy int mcca_error; 5712eda14cbcSMatt Macy } metaslab_claim_cb_arg_t; 5713eda14cbcSMatt Macy 5714eda14cbcSMatt Macy /* ARGSUSED */ 5715eda14cbcSMatt Macy static void 5716eda14cbcSMatt Macy metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5717eda14cbcSMatt Macy uint64_t size, void *arg) 5718eda14cbcSMatt Macy { 5719eda14cbcSMatt Macy metaslab_claim_cb_arg_t *mcca_arg = arg; 5720eda14cbcSMatt Macy 5721eda14cbcSMatt Macy if (mcca_arg->mcca_error == 0) { 5722eda14cbcSMatt Macy mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset, 5723eda14cbcSMatt Macy size, mcca_arg->mcca_txg); 5724eda14cbcSMatt Macy } 5725eda14cbcSMatt Macy } 5726eda14cbcSMatt Macy 5727eda14cbcSMatt Macy int 5728eda14cbcSMatt Macy metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) 5729eda14cbcSMatt Macy { 5730eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap != NULL) { 5731eda14cbcSMatt Macy metaslab_claim_cb_arg_t arg; 5732eda14cbcSMatt Macy 5733eda14cbcSMatt Macy /* 57347877fdebSMatt Macy * Only zdb(8) can claim on indirect vdevs. This is used 5735eda14cbcSMatt Macy * to detect leaks of mapped space (that are not accounted 5736eda14cbcSMatt Macy * for in the obsolete counts, spacemap, or bpobj). 5737eda14cbcSMatt Macy */ 5738eda14cbcSMatt Macy ASSERT(!spa_writeable(vd->vdev_spa)); 5739eda14cbcSMatt Macy arg.mcca_error = 0; 5740eda14cbcSMatt Macy arg.mcca_txg = txg; 5741eda14cbcSMatt Macy 5742eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, 5743eda14cbcSMatt Macy metaslab_claim_impl_cb, &arg); 5744eda14cbcSMatt Macy 5745eda14cbcSMatt Macy if (arg.mcca_error == 0) { 5746eda14cbcSMatt Macy arg.mcca_error = metaslab_claim_concrete(vd, 5747eda14cbcSMatt Macy offset, size, txg); 5748eda14cbcSMatt Macy } 5749eda14cbcSMatt Macy return (arg.mcca_error); 5750eda14cbcSMatt Macy } else { 5751eda14cbcSMatt Macy return (metaslab_claim_concrete(vd, offset, size, txg)); 5752eda14cbcSMatt Macy } 5753eda14cbcSMatt Macy } 5754eda14cbcSMatt Macy 5755eda14cbcSMatt Macy /* 5756eda14cbcSMatt Macy * Intent log support: upon opening the pool after a crash, notify the SPA 5757eda14cbcSMatt Macy * of blocks that the intent log has allocated for immediate write, but 5758eda14cbcSMatt Macy * which are still considered free by the SPA because the last transaction 5759eda14cbcSMatt Macy * group didn't commit yet. 5760eda14cbcSMatt Macy */ 5761eda14cbcSMatt Macy static int 5762eda14cbcSMatt Macy metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5763eda14cbcSMatt Macy { 5764eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(dva); 5765eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5766eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5767eda14cbcSMatt Macy vdev_t *vd; 5768eda14cbcSMatt Macy 5769eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, vdev)) == NULL) { 5770eda14cbcSMatt Macy return (SET_ERROR(ENXIO)); 5771eda14cbcSMatt Macy } 5772eda14cbcSMatt Macy 5773eda14cbcSMatt Macy ASSERT(DVA_IS_VALID(dva)); 5774eda14cbcSMatt Macy 5775eda14cbcSMatt Macy if (DVA_GET_GANG(dva)) 5776eda14cbcSMatt Macy size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 5777eda14cbcSMatt Macy 5778eda14cbcSMatt Macy return (metaslab_claim_impl(vd, offset, size, txg)); 5779eda14cbcSMatt Macy } 5780eda14cbcSMatt Macy 5781eda14cbcSMatt Macy int 5782eda14cbcSMatt Macy metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 5783eda14cbcSMatt Macy int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, 5784eda14cbcSMatt Macy zio_alloc_list_t *zal, zio_t *zio, int allocator) 5785eda14cbcSMatt Macy { 5786eda14cbcSMatt Macy dva_t *dva = bp->blk_dva; 5787eda14cbcSMatt Macy dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL; 5788eda14cbcSMatt Macy int error = 0; 5789eda14cbcSMatt Macy 5790eda14cbcSMatt Macy ASSERT(bp->blk_birth == 0); 5791eda14cbcSMatt Macy ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); 5792eda14cbcSMatt Macy 5793eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 5794eda14cbcSMatt Macy 57957877fdebSMatt Macy if (mc->mc_allocator[allocator].mca_rotor == NULL) { 57967877fdebSMatt Macy /* no vdevs in this class */ 5797eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5798eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 5799eda14cbcSMatt Macy } 5800eda14cbcSMatt Macy 5801eda14cbcSMatt Macy ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 5802eda14cbcSMatt Macy ASSERT(BP_GET_NDVAS(bp) == 0); 5803eda14cbcSMatt Macy ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 5804eda14cbcSMatt Macy ASSERT3P(zal, !=, NULL); 5805eda14cbcSMatt Macy 5806eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 5807eda14cbcSMatt Macy error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 5808eda14cbcSMatt Macy txg, flags, zal, allocator); 5809eda14cbcSMatt Macy if (error != 0) { 5810eda14cbcSMatt Macy for (d--; d >= 0; d--) { 5811eda14cbcSMatt Macy metaslab_unalloc_dva(spa, &dva[d], txg); 5812eda14cbcSMatt Macy metaslab_group_alloc_decrement(spa, 5813eda14cbcSMatt Macy DVA_GET_VDEV(&dva[d]), zio, flags, 5814eda14cbcSMatt Macy allocator, B_FALSE); 5815eda14cbcSMatt Macy bzero(&dva[d], sizeof (dva_t)); 5816eda14cbcSMatt Macy } 5817eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5818eda14cbcSMatt Macy return (error); 5819eda14cbcSMatt Macy } else { 5820eda14cbcSMatt Macy /* 5821eda14cbcSMatt Macy * Update the metaslab group's queue depth 5822eda14cbcSMatt Macy * based on the newly allocated dva. 5823eda14cbcSMatt Macy */ 5824eda14cbcSMatt Macy metaslab_group_alloc_increment(spa, 5825eda14cbcSMatt Macy DVA_GET_VDEV(&dva[d]), zio, flags, allocator); 5826eda14cbcSMatt Macy } 5827eda14cbcSMatt Macy } 5828eda14cbcSMatt Macy ASSERT(error == 0); 5829eda14cbcSMatt Macy ASSERT(BP_GET_NDVAS(bp) == ndvas); 5830eda14cbcSMatt Macy 5831eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5832eda14cbcSMatt Macy 5833eda14cbcSMatt Macy BP_SET_BIRTH(bp, txg, 0); 5834eda14cbcSMatt Macy 5835eda14cbcSMatt Macy return (0); 5836eda14cbcSMatt Macy } 5837eda14cbcSMatt Macy 5838eda14cbcSMatt Macy void 5839eda14cbcSMatt Macy metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 5840eda14cbcSMatt Macy { 5841eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5842eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5843eda14cbcSMatt Macy 5844eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5845eda14cbcSMatt Macy ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); 5846eda14cbcSMatt Macy 5847eda14cbcSMatt Macy /* 5848eda14cbcSMatt Macy * If we have a checkpoint for the pool we need to make sure that 5849eda14cbcSMatt Macy * the blocks that we free that are part of the checkpoint won't be 5850eda14cbcSMatt Macy * reused until the checkpoint is discarded or we revert to it. 5851eda14cbcSMatt Macy * 5852eda14cbcSMatt Macy * The checkpoint flag is passed down the metaslab_free code path 5853eda14cbcSMatt Macy * and is set whenever we want to add a block to the checkpoint's 5854eda14cbcSMatt Macy * accounting. That is, we "checkpoint" blocks that existed at the 5855eda14cbcSMatt Macy * time the checkpoint was created and are therefore referenced by 5856eda14cbcSMatt Macy * the checkpointed uberblock. 5857eda14cbcSMatt Macy * 5858eda14cbcSMatt Macy * Note that, we don't checkpoint any blocks if the current 5859eda14cbcSMatt Macy * syncing txg <= spa_checkpoint_txg. We want these frees to sync 5860eda14cbcSMatt Macy * normally as they will be referenced by the checkpointed uberblock. 5861eda14cbcSMatt Macy */ 5862eda14cbcSMatt Macy boolean_t checkpoint = B_FALSE; 5863eda14cbcSMatt Macy if (bp->blk_birth <= spa->spa_checkpoint_txg && 5864eda14cbcSMatt Macy spa_syncing_txg(spa) > spa->spa_checkpoint_txg) { 5865eda14cbcSMatt Macy /* 5866eda14cbcSMatt Macy * At this point, if the block is part of the checkpoint 5867eda14cbcSMatt Macy * there is no way it was created in the current txg. 5868eda14cbcSMatt Macy */ 5869eda14cbcSMatt Macy ASSERT(!now); 5870eda14cbcSMatt Macy ASSERT3U(spa_syncing_txg(spa), ==, txg); 5871eda14cbcSMatt Macy checkpoint = B_TRUE; 5872eda14cbcSMatt Macy } 5873eda14cbcSMatt Macy 5874eda14cbcSMatt Macy spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 5875eda14cbcSMatt Macy 5876eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 5877eda14cbcSMatt Macy if (now) { 5878eda14cbcSMatt Macy metaslab_unalloc_dva(spa, &dva[d], txg); 5879eda14cbcSMatt Macy } else { 5880eda14cbcSMatt Macy ASSERT3U(txg, ==, spa_syncing_txg(spa)); 5881eda14cbcSMatt Macy metaslab_free_dva(spa, &dva[d], checkpoint); 5882eda14cbcSMatt Macy } 5883eda14cbcSMatt Macy } 5884eda14cbcSMatt Macy 5885eda14cbcSMatt Macy spa_config_exit(spa, SCL_FREE, FTAG); 5886eda14cbcSMatt Macy } 5887eda14cbcSMatt Macy 5888eda14cbcSMatt Macy int 5889eda14cbcSMatt Macy metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 5890eda14cbcSMatt Macy { 5891eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5892eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5893eda14cbcSMatt Macy int error = 0; 5894eda14cbcSMatt Macy 5895eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5896eda14cbcSMatt Macy 5897eda14cbcSMatt Macy if (txg != 0) { 5898eda14cbcSMatt Macy /* 5899eda14cbcSMatt Macy * First do a dry run to make sure all DVAs are claimable, 5900eda14cbcSMatt Macy * so we don't have to unwind from partial failures below. 5901eda14cbcSMatt Macy */ 5902eda14cbcSMatt Macy if ((error = metaslab_claim(spa, bp, 0)) != 0) 5903eda14cbcSMatt Macy return (error); 5904eda14cbcSMatt Macy } 5905eda14cbcSMatt Macy 5906eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 5907eda14cbcSMatt Macy 5908eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 5909eda14cbcSMatt Macy error = metaslab_claim_dva(spa, &dva[d], txg); 5910eda14cbcSMatt Macy if (error != 0) 5911eda14cbcSMatt Macy break; 5912eda14cbcSMatt Macy } 5913eda14cbcSMatt Macy 5914eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5915eda14cbcSMatt Macy 5916eda14cbcSMatt Macy ASSERT(error == 0 || txg == 0); 5917eda14cbcSMatt Macy 5918eda14cbcSMatt Macy return (error); 5919eda14cbcSMatt Macy } 5920eda14cbcSMatt Macy 5921eda14cbcSMatt Macy void 5922eda14cbcSMatt Macy metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) 5923eda14cbcSMatt Macy { 5924eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5925eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5926eda14cbcSMatt Macy uint64_t psize = BP_GET_PSIZE(bp); 5927eda14cbcSMatt Macy int d; 5928eda14cbcSMatt Macy vdev_t *vd; 5929eda14cbcSMatt Macy 5930eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5931eda14cbcSMatt Macy ASSERT(!BP_IS_EMBEDDED(bp)); 5932eda14cbcSMatt Macy ASSERT(psize > 0); 5933eda14cbcSMatt Macy 5934eda14cbcSMatt Macy spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 5935eda14cbcSMatt Macy 5936eda14cbcSMatt Macy for (d = 0; d < ndvas; d++) { 5937eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) 5938eda14cbcSMatt Macy continue; 5939eda14cbcSMatt Macy atomic_add_64(&vd->vdev_pending_fastwrite, psize); 5940eda14cbcSMatt Macy } 5941eda14cbcSMatt Macy 5942eda14cbcSMatt Macy spa_config_exit(spa, SCL_VDEV, FTAG); 5943eda14cbcSMatt Macy } 5944eda14cbcSMatt Macy 5945eda14cbcSMatt Macy void 5946eda14cbcSMatt Macy metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) 5947eda14cbcSMatt Macy { 5948eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5949eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5950eda14cbcSMatt Macy uint64_t psize = BP_GET_PSIZE(bp); 5951eda14cbcSMatt Macy int d; 5952eda14cbcSMatt Macy vdev_t *vd; 5953eda14cbcSMatt Macy 5954eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5955eda14cbcSMatt Macy ASSERT(!BP_IS_EMBEDDED(bp)); 5956eda14cbcSMatt Macy ASSERT(psize > 0); 5957eda14cbcSMatt Macy 5958eda14cbcSMatt Macy spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 5959eda14cbcSMatt Macy 5960eda14cbcSMatt Macy for (d = 0; d < ndvas; d++) { 5961eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) 5962eda14cbcSMatt Macy continue; 5963eda14cbcSMatt Macy ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); 5964eda14cbcSMatt Macy atomic_sub_64(&vd->vdev_pending_fastwrite, psize); 5965eda14cbcSMatt Macy } 5966eda14cbcSMatt Macy 5967eda14cbcSMatt Macy spa_config_exit(spa, SCL_VDEV, FTAG); 5968eda14cbcSMatt Macy } 5969eda14cbcSMatt Macy 5970eda14cbcSMatt Macy /* ARGSUSED */ 5971eda14cbcSMatt Macy static void 5972eda14cbcSMatt Macy metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset, 5973eda14cbcSMatt Macy uint64_t size, void *arg) 5974eda14cbcSMatt Macy { 5975eda14cbcSMatt Macy if (vd->vdev_ops == &vdev_indirect_ops) 5976eda14cbcSMatt Macy return; 5977eda14cbcSMatt Macy 5978eda14cbcSMatt Macy metaslab_check_free_impl(vd, offset, size); 5979eda14cbcSMatt Macy } 5980eda14cbcSMatt Macy 5981eda14cbcSMatt Macy static void 5982eda14cbcSMatt Macy metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size) 5983eda14cbcSMatt Macy { 5984eda14cbcSMatt Macy metaslab_t *msp; 5985eda14cbcSMatt Macy spa_t *spa __maybe_unused = vd->vdev_spa; 5986eda14cbcSMatt Macy 5987eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 5988eda14cbcSMatt Macy return; 5989eda14cbcSMatt Macy 5990eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap != NULL) { 5991eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, 5992eda14cbcSMatt Macy metaslab_check_free_impl_cb, NULL); 5993eda14cbcSMatt Macy return; 5994eda14cbcSMatt Macy } 5995eda14cbcSMatt Macy 5996eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 5997eda14cbcSMatt Macy ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 5998eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5999eda14cbcSMatt Macy 6000eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 6001eda14cbcSMatt Macy 6002eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 6003eda14cbcSMatt Macy if (msp->ms_loaded) { 6004eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_allocatable, 6005eda14cbcSMatt Macy offset, size); 6006eda14cbcSMatt Macy } 6007eda14cbcSMatt Macy 6008eda14cbcSMatt Macy /* 6009eda14cbcSMatt Macy * Check all segments that currently exist in the freeing pipeline. 6010eda14cbcSMatt Macy * 6011eda14cbcSMatt Macy * It would intuitively make sense to also check the current allocating 6012eda14cbcSMatt Macy * tree since metaslab_unalloc_dva() exists for extents that are 6013eda14cbcSMatt Macy * allocated and freed in the same sync pass within the same txg. 6014eda14cbcSMatt Macy * Unfortunately there are places (e.g. the ZIL) where we allocate a 6015eda14cbcSMatt Macy * segment but then we free part of it within the same txg 6016eda14cbcSMatt Macy * [see zil_sync()]. Thus, we don't call range_tree_verify() in the 6017eda14cbcSMatt Macy * current allocating tree. 6018eda14cbcSMatt Macy */ 6019eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_freeing, offset, size); 6020eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_checkpointing, offset, size); 6021eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_freed, offset, size); 6022eda14cbcSMatt Macy for (int j = 0; j < TXG_DEFER_SIZE; j++) 6023eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_defer[j], offset, size); 6024eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_trim, offset, size); 6025eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 6026eda14cbcSMatt Macy } 6027eda14cbcSMatt Macy 6028eda14cbcSMatt Macy void 6029eda14cbcSMatt Macy metaslab_check_free(spa_t *spa, const blkptr_t *bp) 6030eda14cbcSMatt Macy { 6031eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 6032eda14cbcSMatt Macy return; 6033eda14cbcSMatt Macy 6034eda14cbcSMatt Macy spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 6035eda14cbcSMatt Macy for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 6036eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 6037eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, vdev); 6038eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 6039eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); 6040eda14cbcSMatt Macy 6041eda14cbcSMatt Macy if (DVA_GET_GANG(&bp->blk_dva[i])) 6042eda14cbcSMatt Macy size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 6043eda14cbcSMatt Macy 6044eda14cbcSMatt Macy ASSERT3P(vd, !=, NULL); 6045eda14cbcSMatt Macy 6046eda14cbcSMatt Macy metaslab_check_free_impl(vd, offset, size); 6047eda14cbcSMatt Macy } 6048eda14cbcSMatt Macy spa_config_exit(spa, SCL_VDEV, FTAG); 6049eda14cbcSMatt Macy } 6050eda14cbcSMatt Macy 6051eda14cbcSMatt Macy static void 6052eda14cbcSMatt Macy metaslab_group_disable_wait(metaslab_group_t *mg) 6053eda14cbcSMatt Macy { 6054eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6055eda14cbcSMatt Macy while (mg->mg_disabled_updating) { 6056eda14cbcSMatt Macy cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6057eda14cbcSMatt Macy } 6058eda14cbcSMatt Macy } 6059eda14cbcSMatt Macy 6060eda14cbcSMatt Macy static void 6061eda14cbcSMatt Macy metaslab_group_disabled_increment(metaslab_group_t *mg) 6062eda14cbcSMatt Macy { 6063eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6064eda14cbcSMatt Macy ASSERT(mg->mg_disabled_updating); 6065eda14cbcSMatt Macy 6066eda14cbcSMatt Macy while (mg->mg_ms_disabled >= max_disabled_ms) { 6067eda14cbcSMatt Macy cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6068eda14cbcSMatt Macy } 6069eda14cbcSMatt Macy mg->mg_ms_disabled++; 6070eda14cbcSMatt Macy ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms); 6071eda14cbcSMatt Macy } 6072eda14cbcSMatt Macy 6073eda14cbcSMatt Macy /* 6074eda14cbcSMatt Macy * Mark the metaslab as disabled to prevent any allocations on this metaslab. 6075eda14cbcSMatt Macy * We must also track how many metaslabs are currently disabled within a 6076eda14cbcSMatt Macy * metaslab group and limit them to prevent allocation failures from 6077eda14cbcSMatt Macy * occurring because all metaslabs are disabled. 6078eda14cbcSMatt Macy */ 6079eda14cbcSMatt Macy void 6080eda14cbcSMatt Macy metaslab_disable(metaslab_t *msp) 6081eda14cbcSMatt Macy { 6082eda14cbcSMatt Macy ASSERT(!MUTEX_HELD(&msp->ms_lock)); 6083eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 6084eda14cbcSMatt Macy 6085eda14cbcSMatt Macy mutex_enter(&mg->mg_ms_disabled_lock); 6086eda14cbcSMatt Macy 6087eda14cbcSMatt Macy /* 6088eda14cbcSMatt Macy * To keep an accurate count of how many threads have disabled 6089eda14cbcSMatt Macy * a specific metaslab group, we only allow one thread to mark 6090eda14cbcSMatt Macy * the metaslab group at a time. This ensures that the value of 6091eda14cbcSMatt Macy * ms_disabled will be accurate when we decide to mark a metaslab 6092eda14cbcSMatt Macy * group as disabled. To do this we force all other threads 6093eda14cbcSMatt Macy * to wait till the metaslab's mg_disabled_updating flag is no 6094eda14cbcSMatt Macy * longer set. 6095eda14cbcSMatt Macy */ 6096eda14cbcSMatt Macy metaslab_group_disable_wait(mg); 6097eda14cbcSMatt Macy mg->mg_disabled_updating = B_TRUE; 6098eda14cbcSMatt Macy if (msp->ms_disabled == 0) { 6099eda14cbcSMatt Macy metaslab_group_disabled_increment(mg); 6100eda14cbcSMatt Macy } 6101eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 6102eda14cbcSMatt Macy msp->ms_disabled++; 6103eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 6104eda14cbcSMatt Macy 6105eda14cbcSMatt Macy mg->mg_disabled_updating = B_FALSE; 6106eda14cbcSMatt Macy cv_broadcast(&mg->mg_ms_disabled_cv); 6107eda14cbcSMatt Macy mutex_exit(&mg->mg_ms_disabled_lock); 6108eda14cbcSMatt Macy } 6109eda14cbcSMatt Macy 6110eda14cbcSMatt Macy void 6111eda14cbcSMatt Macy metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload) 6112eda14cbcSMatt Macy { 6113eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 6114eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 6115eda14cbcSMatt Macy 6116eda14cbcSMatt Macy /* 6117eda14cbcSMatt Macy * Wait for the outstanding IO to be synced to prevent newly 6118eda14cbcSMatt Macy * allocated blocks from being overwritten. This used by 6119eda14cbcSMatt Macy * initialize and TRIM which are modifying unallocated space. 6120eda14cbcSMatt Macy */ 6121eda14cbcSMatt Macy if (sync) 6122eda14cbcSMatt Macy txg_wait_synced(spa_get_dsl(spa), 0); 6123eda14cbcSMatt Macy 6124eda14cbcSMatt Macy mutex_enter(&mg->mg_ms_disabled_lock); 6125eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 6126eda14cbcSMatt Macy if (--msp->ms_disabled == 0) { 6127eda14cbcSMatt Macy mg->mg_ms_disabled--; 6128eda14cbcSMatt Macy cv_broadcast(&mg->mg_ms_disabled_cv); 6129eda14cbcSMatt Macy if (unload) 6130eda14cbcSMatt Macy metaslab_unload(msp); 6131eda14cbcSMatt Macy } 6132eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 6133eda14cbcSMatt Macy mutex_exit(&mg->mg_ms_disabled_lock); 6134eda14cbcSMatt Macy } 6135eda14cbcSMatt Macy 6136eda14cbcSMatt Macy static void 6137eda14cbcSMatt Macy metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx) 6138eda14cbcSMatt Macy { 6139eda14cbcSMatt Macy vdev_t *vd = ms->ms_group->mg_vd; 6140eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 6141eda14cbcSMatt Macy objset_t *mos = spa_meta_objset(spa); 6142eda14cbcSMatt Macy 6143eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 6144eda14cbcSMatt Macy 6145eda14cbcSMatt Macy metaslab_unflushed_phys_t entry = { 6146eda14cbcSMatt Macy .msp_unflushed_txg = metaslab_unflushed_txg(ms), 6147eda14cbcSMatt Macy }; 6148eda14cbcSMatt Macy uint64_t entry_size = sizeof (entry); 6149eda14cbcSMatt Macy uint64_t entry_offset = ms->ms_id * entry_size; 6150eda14cbcSMatt Macy 6151eda14cbcSMatt Macy uint64_t object = 0; 6152eda14cbcSMatt Macy int err = zap_lookup(mos, vd->vdev_top_zap, 6153eda14cbcSMatt Macy VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6154eda14cbcSMatt Macy &object); 6155eda14cbcSMatt Macy if (err == ENOENT) { 6156eda14cbcSMatt Macy object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA, 6157eda14cbcSMatt Macy SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx); 6158eda14cbcSMatt Macy VERIFY0(zap_add(mos, vd->vdev_top_zap, 6159eda14cbcSMatt Macy VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6160eda14cbcSMatt Macy &object, tx)); 6161eda14cbcSMatt Macy } else { 6162eda14cbcSMatt Macy VERIFY0(err); 6163eda14cbcSMatt Macy } 6164eda14cbcSMatt Macy 6165eda14cbcSMatt Macy dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size, 6166eda14cbcSMatt Macy &entry, tx); 6167eda14cbcSMatt Macy } 6168eda14cbcSMatt Macy 6169eda14cbcSMatt Macy void 6170eda14cbcSMatt Macy metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx) 6171eda14cbcSMatt Macy { 6172eda14cbcSMatt Macy spa_t *spa = ms->ms_group->mg_vd->vdev_spa; 6173eda14cbcSMatt Macy 6174eda14cbcSMatt Macy if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 6175eda14cbcSMatt Macy return; 6176eda14cbcSMatt Macy 6177eda14cbcSMatt Macy ms->ms_unflushed_txg = txg; 6178eda14cbcSMatt Macy metaslab_update_ondisk_flush_data(ms, tx); 6179eda14cbcSMatt Macy } 6180eda14cbcSMatt Macy 6181eda14cbcSMatt Macy uint64_t 6182eda14cbcSMatt Macy metaslab_unflushed_txg(metaslab_t *ms) 6183eda14cbcSMatt Macy { 6184eda14cbcSMatt Macy return (ms->ms_unflushed_txg); 6185eda14cbcSMatt Macy } 6186eda14cbcSMatt Macy 6187eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, ULONG, ZMOD_RW, 6188eda14cbcSMatt Macy "Allocation granularity (a.k.a. stripe size)"); 6189eda14cbcSMatt Macy 6190eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW, 6191eda14cbcSMatt Macy "Load all metaslabs when pool is first opened"); 6192eda14cbcSMatt Macy 6193eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW, 6194eda14cbcSMatt Macy "Prevent metaslabs from being unloaded"); 6195eda14cbcSMatt Macy 6196eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW, 6197eda14cbcSMatt Macy "Preload potential metaslabs during reassessment"); 6198eda14cbcSMatt Macy 6199eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, INT, ZMOD_RW, 6200eda14cbcSMatt Macy "Delay in txgs after metaslab was last used before unloading"); 6201eda14cbcSMatt Macy 6202eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, INT, ZMOD_RW, 6203eda14cbcSMatt Macy "Delay in milliseconds after metaslab was last used before unloading"); 6204eda14cbcSMatt Macy 6205eda14cbcSMatt Macy /* BEGIN CSTYLED */ 6206eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, INT, ZMOD_RW, 6207eda14cbcSMatt Macy "Percentage of metaslab group size that should be free to make it " 6208eda14cbcSMatt Macy "eligible for allocation"); 6209eda14cbcSMatt Macy 6210eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, INT, ZMOD_RW, 6211eda14cbcSMatt Macy "Percentage of metaslab group size that should be considered eligible " 6212eda14cbcSMatt Macy "for allocations unless all metaslab groups within the metaslab class " 6213eda14cbcSMatt Macy "have also crossed this threshold"); 6214eda14cbcSMatt Macy 6215eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, INT, 6216eda14cbcSMatt Macy ZMOD_RW, "Fragmentation for metaslab to allow allocation"); 6217eda14cbcSMatt Macy 6218eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, ZMOD_RW, 6219eda14cbcSMatt Macy "Use the fragmentation metric to prefer less fragmented metaslabs"); 6220eda14cbcSMatt Macy /* END CSTYLED */ 6221eda14cbcSMatt Macy 6222eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW, 6223eda14cbcSMatt Macy "Prefer metaslabs with lower LBAs"); 6224eda14cbcSMatt Macy 6225eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW, 6226eda14cbcSMatt Macy "Enable metaslab group biasing"); 6227eda14cbcSMatt Macy 6228eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT, 6229eda14cbcSMatt Macy ZMOD_RW, "Enable segment-based metaslab selection"); 6230eda14cbcSMatt Macy 6231eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW, 6232eda14cbcSMatt Macy "Segment-based metaslab selection maximum buckets before switching"); 6233eda14cbcSMatt Macy 6234eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW, 6235eda14cbcSMatt Macy "Blocks larger than this size are forced to be gang blocks"); 6236eda14cbcSMatt Macy 6237eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, INT, ZMOD_RW, 6238eda14cbcSMatt Macy "Max distance (bytes) to search forward before using size tree"); 6239eda14cbcSMatt Macy 6240eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW, 6241eda14cbcSMatt Macy "When looking in size tree, use largest segment instead of exact fit"); 6242eda14cbcSMatt Macy 6243eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG, 6244eda14cbcSMatt Macy ZMOD_RW, "How long to trust the cached max chunk size of a metaslab"); 6245eda14cbcSMatt Macy 6246eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, INT, ZMOD_RW, 6247eda14cbcSMatt Macy "Percentage of memory that can be used to store metaslab range trees"); 62487877fdebSMatt Macy 62497877fdebSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT, 62507877fdebSMatt Macy ZMOD_RW, "Try hard to allocate before ganging"); 62517877fdebSMatt Macy 62527877fdebSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, INT, ZMOD_RW, 62537877fdebSMatt Macy "Normally only consider this many of the best metaslabs in each vdev"); 6254