1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25  * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26  * Copyright (c) 2017, Intel Corporation.
27  */
28 
29 #include <sys/zfs_context.h>
30 #include <sys/dmu.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/vdev_draid.h>
36 #include <sys/zio.h>
37 #include <sys/spa_impl.h>
38 #include <sys/zfeature.h>
39 #include <sys/vdev_indirect_mapping.h>
40 #include <sys/zap.h>
41 #include <sys/btree.h>
42 
43 #define	GANG_ALLOCATION(flags) \
44 	((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
45 
46 /*
47  * Metaslab granularity, in bytes. This is roughly similar to what would be
48  * referred to as the "stripe size" in traditional RAID arrays. In normal
49  * operation, we will try to write this amount of data to each disk before
50  * moving on to the next top-level vdev.
51  */
52 static uint64_t metaslab_aliquot = 1024 * 1024;
53 
54 /*
55  * For testing, make some blocks above a certain size be gang blocks.
56  */
57 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
58 
59 /*
60  * Of blocks of size >= metaslab_force_ganging, actually gang them this often.
61  */
62 uint_t metaslab_force_ganging_pct = 3;
63 
64 /*
65  * In pools where the log space map feature is not enabled we touch
66  * multiple metaslabs (and their respective space maps) with each
67  * transaction group. Thus, we benefit from having a small space map
68  * block size since it allows us to issue more I/O operations scattered
69  * around the disk. So a sane default for the space map block size
70  * is 8~16K.
71  */
72 int zfs_metaslab_sm_blksz_no_log = (1 << 14);
73 
74 /*
75  * When the log space map feature is enabled, we accumulate a lot of
76  * changes per metaslab that are flushed once in a while so we benefit
77  * from a bigger block size like 128K for the metaslab space maps.
78  */
79 int zfs_metaslab_sm_blksz_with_log = (1 << 17);
80 
81 /*
82  * The in-core space map representation is more compact than its on-disk form.
83  * The zfs_condense_pct determines how much more compact the in-core
84  * space map representation must be before we compact it on-disk.
85  * Values should be greater than or equal to 100.
86  */
87 uint_t zfs_condense_pct = 200;
88 
89 /*
90  * Condensing a metaslab is not guaranteed to actually reduce the amount of
91  * space used on disk. In particular, a space map uses data in increments of
92  * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
93  * same number of blocks after condensing. Since the goal of condensing is to
94  * reduce the number of IOPs required to read the space map, we only want to
95  * condense when we can be sure we will reduce the number of blocks used by the
96  * space map. Unfortunately, we cannot precisely compute whether or not this is
97  * the case in metaslab_should_condense since we are holding ms_lock. Instead,
98  * we apply the following heuristic: do not condense a spacemap unless the
99  * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
100  * blocks.
101  */
102 static const int zfs_metaslab_condense_block_threshold = 4;
103 
104 /*
105  * The zfs_mg_noalloc_threshold defines which metaslab groups should
106  * be eligible for allocation. The value is defined as a percentage of
107  * free space. Metaslab groups that have more free space than
108  * zfs_mg_noalloc_threshold are always eligible for allocations. Once
109  * a metaslab group's free space is less than or equal to the
110  * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
111  * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
112  * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
113  * groups are allowed to accept allocations. Gang blocks are always
114  * eligible to allocate on any metaslab group. The default value of 0 means
115  * no metaslab group will be excluded based on this criterion.
116  */
117 static uint_t zfs_mg_noalloc_threshold = 0;
118 
119 /*
120  * Metaslab groups are considered eligible for allocations if their
121  * fragmentation metric (measured as a percentage) is less than or
122  * equal to zfs_mg_fragmentation_threshold. If a metaslab group
123  * exceeds this threshold then it will be skipped unless all metaslab
124  * groups within the metaslab class have also crossed this threshold.
125  *
126  * This tunable was introduced to avoid edge cases where we continue
127  * allocating from very fragmented disks in our pool while other, less
128  * fragmented disks, exists. On the other hand, if all disks in the
129  * pool are uniformly approaching the threshold, the threshold can
130  * be a speed bump in performance, where we keep switching the disks
131  * that we allocate from (e.g. we allocate some segments from disk A
132  * making it bypassing the threshold while freeing segments from disk
133  * B getting its fragmentation below the threshold).
134  *
135  * Empirically, we've seen that our vdev selection for allocations is
136  * good enough that fragmentation increases uniformly across all vdevs
137  * the majority of the time. Thus we set the threshold percentage high
138  * enough to avoid hitting the speed bump on pools that are being pushed
139  * to the edge.
140  */
141 static uint_t zfs_mg_fragmentation_threshold = 95;
142 
143 /*
144  * Allow metaslabs to keep their active state as long as their fragmentation
145  * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
146  * active metaslab that exceeds this threshold will no longer keep its active
147  * status allowing better metaslabs to be selected.
148  */
149 static uint_t zfs_metaslab_fragmentation_threshold = 70;
150 
151 /*
152  * When set will load all metaslabs when pool is first opened.
153  */
154 int metaslab_debug_load = B_FALSE;
155 
156 /*
157  * When set will prevent metaslabs from being unloaded.
158  */
159 static int metaslab_debug_unload = B_FALSE;
160 
161 /*
162  * Minimum size which forces the dynamic allocator to change
163  * it's allocation strategy.  Once the space map cannot satisfy
164  * an allocation of this size then it switches to using more
165  * aggressive strategy (i.e search by size rather than offset).
166  */
167 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
168 
169 /*
170  * The minimum free space, in percent, which must be available
171  * in a space map to continue allocations in a first-fit fashion.
172  * Once the space map's free space drops below this level we dynamically
173  * switch to using best-fit allocations.
174  */
175 uint_t metaslab_df_free_pct = 4;
176 
177 /*
178  * Maximum distance to search forward from the last offset. Without this
179  * limit, fragmented pools can see >100,000 iterations and
180  * metaslab_block_picker() becomes the performance limiting factor on
181  * high-performance storage.
182  *
183  * With the default setting of 16MB, we typically see less than 500
184  * iterations, even with very fragmented, ashift=9 pools. The maximum number
185  * of iterations possible is:
186  *     metaslab_df_max_search / (2 * (1<<ashift))
187  * With the default setting of 16MB this is 16*1024 (with ashift=9) or
188  * 2048 (with ashift=12).
189  */
190 static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
191 
192 /*
193  * Forces the metaslab_block_picker function to search for at least this many
194  * segments forwards until giving up on finding a segment that the allocation
195  * will fit into.
196  */
197 static const uint32_t metaslab_min_search_count = 100;
198 
199 /*
200  * If we are not searching forward (due to metaslab_df_max_search,
201  * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
202  * controls what segment is used.  If it is set, we will use the largest free
203  * segment.  If it is not set, we will use a segment of exactly the requested
204  * size (or larger).
205  */
206 static int metaslab_df_use_largest_segment = B_FALSE;
207 
208 /*
209  * Percentage of all cpus that can be used by the metaslab taskq.
210  */
211 int metaslab_load_pct = 50;
212 
213 /*
214  * These tunables control how long a metaslab will remain loaded after the
215  * last allocation from it.  A metaslab can't be unloaded until at least
216  * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
217  * have elapsed.  However, zfs_metaslab_mem_limit may cause it to be
218  * unloaded sooner.  These settings are intended to be generous -- to keep
219  * metaslabs loaded for a long time, reducing the rate of metaslab loading.
220  */
221 static uint_t metaslab_unload_delay = 32;
222 static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
223 
224 /*
225  * Max number of metaslabs per group to preload.
226  */
227 uint_t metaslab_preload_limit = 10;
228 
229 /*
230  * Enable/disable preloading of metaslab.
231  */
232 static int metaslab_preload_enabled = B_TRUE;
233 
234 /*
235  * Enable/disable fragmentation weighting on metaslabs.
236  */
237 static int metaslab_fragmentation_factor_enabled = B_TRUE;
238 
239 /*
240  * Enable/disable lba weighting (i.e. outer tracks are given preference).
241  */
242 static int metaslab_lba_weighting_enabled = B_TRUE;
243 
244 /*
245  * Enable/disable metaslab group biasing.
246  */
247 static int metaslab_bias_enabled = B_TRUE;
248 
249 /*
250  * Enable/disable remapping of indirect DVAs to their concrete vdevs.
251  */
252 static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
253 
254 /*
255  * Enable/disable segment-based metaslab selection.
256  */
257 static int zfs_metaslab_segment_weight_enabled = B_TRUE;
258 
259 /*
260  * When using segment-based metaslab selection, we will continue
261  * allocating from the active metaslab until we have exhausted
262  * zfs_metaslab_switch_threshold of its buckets.
263  */
264 static int zfs_metaslab_switch_threshold = 2;
265 
266 /*
267  * Internal switch to enable/disable the metaslab allocation tracing
268  * facility.
269  */
270 static const boolean_t metaslab_trace_enabled = B_FALSE;
271 
272 /*
273  * Maximum entries that the metaslab allocation tracing facility will keep
274  * in a given list when running in non-debug mode. We limit the number
275  * of entries in non-debug mode to prevent us from using up too much memory.
276  * The limit should be sufficiently large that we don't expect any allocation
277  * to every exceed this value. In debug mode, the system will panic if this
278  * limit is ever reached allowing for further investigation.
279  */
280 static const uint64_t metaslab_trace_max_entries = 5000;
281 
282 /*
283  * Maximum number of metaslabs per group that can be disabled
284  * simultaneously.
285  */
286 static const int max_disabled_ms = 3;
287 
288 /*
289  * Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
290  * To avoid 64-bit overflow, don't set above UINT32_MAX.
291  */
292 static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
293 
294 /*
295  * Maximum percentage of memory to use on storing loaded metaslabs. If loading
296  * a metaslab would take it over this percentage, the oldest selected metaslab
297  * is automatically unloaded.
298  */
299 static uint_t zfs_metaslab_mem_limit = 25;
300 
301 /*
302  * Force the per-metaslab range trees to use 64-bit integers to store
303  * segments. Used for debugging purposes.
304  */
305 static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
306 
307 /*
308  * By default we only store segments over a certain size in the size-sorted
309  * metaslab trees (ms_allocatable_by_size and
310  * ms_unflushed_frees_by_size). This dramatically reduces memory usage and
311  * improves load and unload times at the cost of causing us to use slightly
312  * larger segments than we would otherwise in some cases.
313  */
314 static const uint32_t metaslab_by_size_min_shift = 14;
315 
316 /*
317  * If not set, we will first try normal allocation.  If that fails then
318  * we will do a gang allocation.  If that fails then we will do a "try hard"
319  * gang allocation.  If that fails then we will have a multi-layer gang
320  * block.
321  *
322  * If set, we will first try normal allocation.  If that fails then
323  * we will do a "try hard" allocation.  If that fails we will do a gang
324  * allocation.  If that fails we will do a "try hard" gang allocation.  If
325  * that fails then we will have a multi-layer gang block.
326  */
327 static int zfs_metaslab_try_hard_before_gang = B_FALSE;
328 
329 /*
330  * When not trying hard, we only consider the best zfs_metaslab_find_max_tries
331  * metaslabs.  This improves performance, especially when there are many
332  * metaslabs per vdev and the allocation can't actually be satisfied (so we
333  * would otherwise iterate all the metaslabs).  If there is a metaslab with a
334  * worse weight but it can actually satisfy the allocation, we won't find it
335  * until trying hard.  This may happen if the worse metaslab is not loaded
336  * (and the true weight is better than we have calculated), or due to weight
337  * bucketization.  E.g. we are looking for a 60K segment, and the best
338  * metaslabs all have free segments in the 32-63K bucket, but the best
339  * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
340  * subsequent metaslab has ms_max_size >60KB (but fewer segments in this
341  * bucket, and therefore a lower weight).
342  */
343 static uint_t zfs_metaslab_find_max_tries = 100;
344 
345 static uint64_t metaslab_weight(metaslab_t *, boolean_t);
346 static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
347 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
348 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
349 
350 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
351 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
352 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
353 static unsigned int metaslab_idx_func(multilist_t *, void *);
354 static void metaslab_evict(metaslab_t *, uint64_t);
355 static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
356 kmem_cache_t *metaslab_alloc_trace_cache;
357 
358 typedef struct metaslab_stats {
359 	kstat_named_t metaslabstat_trace_over_limit;
360 	kstat_named_t metaslabstat_reload_tree;
361 	kstat_named_t metaslabstat_too_many_tries;
362 	kstat_named_t metaslabstat_try_hard;
363 } metaslab_stats_t;
364 
365 static metaslab_stats_t metaslab_stats = {
366 	{ "trace_over_limit",		KSTAT_DATA_UINT64 },
367 	{ "reload_tree",		KSTAT_DATA_UINT64 },
368 	{ "too_many_tries",		KSTAT_DATA_UINT64 },
369 	{ "try_hard",			KSTAT_DATA_UINT64 },
370 };
371 
372 #define	METASLABSTAT_BUMP(stat) \
373 	atomic_inc_64(&metaslab_stats.stat.value.ui64);
374 
375 
376 static kstat_t *metaslab_ksp;
377 
378 void
379 metaslab_stat_init(void)
380 {
381 	ASSERT(metaslab_alloc_trace_cache == NULL);
382 	metaslab_alloc_trace_cache = kmem_cache_create(
383 	    "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
384 	    0, NULL, NULL, NULL, NULL, NULL, 0);
385 	metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
386 	    "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
387 	    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
388 	if (metaslab_ksp != NULL) {
389 		metaslab_ksp->ks_data = &metaslab_stats;
390 		kstat_install(metaslab_ksp);
391 	}
392 }
393 
394 void
395 metaslab_stat_fini(void)
396 {
397 	if (metaslab_ksp != NULL) {
398 		kstat_delete(metaslab_ksp);
399 		metaslab_ksp = NULL;
400 	}
401 
402 	kmem_cache_destroy(metaslab_alloc_trace_cache);
403 	metaslab_alloc_trace_cache = NULL;
404 }
405 
406 /*
407  * ==========================================================================
408  * Metaslab classes
409  * ==========================================================================
410  */
411 metaslab_class_t *
412 metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops)
413 {
414 	metaslab_class_t *mc;
415 
416 	mc = kmem_zalloc(offsetof(metaslab_class_t,
417 	    mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
418 
419 	mc->mc_spa = spa;
420 	mc->mc_ops = ops;
421 	mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
422 	multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
423 	    offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
424 	for (int i = 0; i < spa->spa_alloc_count; i++) {
425 		metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
426 		mca->mca_rotor = NULL;
427 		zfs_refcount_create_tracked(&mca->mca_alloc_slots);
428 	}
429 
430 	return (mc);
431 }
432 
433 void
434 metaslab_class_destroy(metaslab_class_t *mc)
435 {
436 	spa_t *spa = mc->mc_spa;
437 
438 	ASSERT(mc->mc_alloc == 0);
439 	ASSERT(mc->mc_deferred == 0);
440 	ASSERT(mc->mc_space == 0);
441 	ASSERT(mc->mc_dspace == 0);
442 
443 	for (int i = 0; i < spa->spa_alloc_count; i++) {
444 		metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
445 		ASSERT(mca->mca_rotor == NULL);
446 		zfs_refcount_destroy(&mca->mca_alloc_slots);
447 	}
448 	mutex_destroy(&mc->mc_lock);
449 	multilist_destroy(&mc->mc_metaslab_txg_list);
450 	kmem_free(mc, offsetof(metaslab_class_t,
451 	    mc_allocator[spa->spa_alloc_count]));
452 }
453 
454 int
455 metaslab_class_validate(metaslab_class_t *mc)
456 {
457 	metaslab_group_t *mg;
458 	vdev_t *vd;
459 
460 	/*
461 	 * Must hold one of the spa_config locks.
462 	 */
463 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
464 	    spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
465 
466 	if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
467 		return (0);
468 
469 	do {
470 		vd = mg->mg_vd;
471 		ASSERT(vd->vdev_mg != NULL);
472 		ASSERT3P(vd->vdev_top, ==, vd);
473 		ASSERT3P(mg->mg_class, ==, mc);
474 		ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
475 	} while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
476 
477 	return (0);
478 }
479 
480 static void
481 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
482     int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
483 {
484 	atomic_add_64(&mc->mc_alloc, alloc_delta);
485 	atomic_add_64(&mc->mc_deferred, defer_delta);
486 	atomic_add_64(&mc->mc_space, space_delta);
487 	atomic_add_64(&mc->mc_dspace, dspace_delta);
488 }
489 
490 uint64_t
491 metaslab_class_get_alloc(metaslab_class_t *mc)
492 {
493 	return (mc->mc_alloc);
494 }
495 
496 uint64_t
497 metaslab_class_get_deferred(metaslab_class_t *mc)
498 {
499 	return (mc->mc_deferred);
500 }
501 
502 uint64_t
503 metaslab_class_get_space(metaslab_class_t *mc)
504 {
505 	return (mc->mc_space);
506 }
507 
508 uint64_t
509 metaslab_class_get_dspace(metaslab_class_t *mc)
510 {
511 	return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
512 }
513 
514 void
515 metaslab_class_histogram_verify(metaslab_class_t *mc)
516 {
517 	spa_t *spa = mc->mc_spa;
518 	vdev_t *rvd = spa->spa_root_vdev;
519 	uint64_t *mc_hist;
520 	int i;
521 
522 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
523 		return;
524 
525 	mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
526 	    KM_SLEEP);
527 
528 	mutex_enter(&mc->mc_lock);
529 	for (int c = 0; c < rvd->vdev_children; c++) {
530 		vdev_t *tvd = rvd->vdev_child[c];
531 		metaslab_group_t *mg = vdev_get_mg(tvd, mc);
532 
533 		/*
534 		 * Skip any holes, uninitialized top-levels, or
535 		 * vdevs that are not in this metalab class.
536 		 */
537 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
538 		    mg->mg_class != mc) {
539 			continue;
540 		}
541 
542 		IMPLY(mg == mg->mg_vd->vdev_log_mg,
543 		    mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
544 
545 		for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
546 			mc_hist[i] += mg->mg_histogram[i];
547 	}
548 
549 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
550 		VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
551 	}
552 
553 	mutex_exit(&mc->mc_lock);
554 	kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
555 }
556 
557 /*
558  * Calculate the metaslab class's fragmentation metric. The metric
559  * is weighted based on the space contribution of each metaslab group.
560  * The return value will be a number between 0 and 100 (inclusive), or
561  * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
562  * zfs_frag_table for more information about the metric.
563  */
564 uint64_t
565 metaslab_class_fragmentation(metaslab_class_t *mc)
566 {
567 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
568 	uint64_t fragmentation = 0;
569 
570 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
571 
572 	for (int c = 0; c < rvd->vdev_children; c++) {
573 		vdev_t *tvd = rvd->vdev_child[c];
574 		metaslab_group_t *mg = tvd->vdev_mg;
575 
576 		/*
577 		 * Skip any holes, uninitialized top-levels,
578 		 * or vdevs that are not in this metalab class.
579 		 */
580 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
581 		    mg->mg_class != mc) {
582 			continue;
583 		}
584 
585 		/*
586 		 * If a metaslab group does not contain a fragmentation
587 		 * metric then just bail out.
588 		 */
589 		if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
590 			spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
591 			return (ZFS_FRAG_INVALID);
592 		}
593 
594 		/*
595 		 * Determine how much this metaslab_group is contributing
596 		 * to the overall pool fragmentation metric.
597 		 */
598 		fragmentation += mg->mg_fragmentation *
599 		    metaslab_group_get_space(mg);
600 	}
601 	fragmentation /= metaslab_class_get_space(mc);
602 
603 	ASSERT3U(fragmentation, <=, 100);
604 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
605 	return (fragmentation);
606 }
607 
608 /*
609  * Calculate the amount of expandable space that is available in
610  * this metaslab class. If a device is expanded then its expandable
611  * space will be the amount of allocatable space that is currently not
612  * part of this metaslab class.
613  */
614 uint64_t
615 metaslab_class_expandable_space(metaslab_class_t *mc)
616 {
617 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
618 	uint64_t space = 0;
619 
620 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
621 	for (int c = 0; c < rvd->vdev_children; c++) {
622 		vdev_t *tvd = rvd->vdev_child[c];
623 		metaslab_group_t *mg = tvd->vdev_mg;
624 
625 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
626 		    mg->mg_class != mc) {
627 			continue;
628 		}
629 
630 		/*
631 		 * Calculate if we have enough space to add additional
632 		 * metaslabs. We report the expandable space in terms
633 		 * of the metaslab size since that's the unit of expansion.
634 		 */
635 		space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
636 		    1ULL << tvd->vdev_ms_shift);
637 	}
638 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
639 	return (space);
640 }
641 
642 void
643 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
644 {
645 	multilist_t *ml = &mc->mc_metaslab_txg_list;
646 	for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
647 		multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
648 		metaslab_t *msp = multilist_sublist_head(mls);
649 		multilist_sublist_unlock(mls);
650 		while (msp != NULL) {
651 			mutex_enter(&msp->ms_lock);
652 
653 			/*
654 			 * If the metaslab has been removed from the list
655 			 * (which could happen if we were at the memory limit
656 			 * and it was evicted during this loop), then we can't
657 			 * proceed and we should restart the sublist.
658 			 */
659 			if (!multilist_link_active(&msp->ms_class_txg_node)) {
660 				mutex_exit(&msp->ms_lock);
661 				i--;
662 				break;
663 			}
664 			mls = multilist_sublist_lock(ml, i);
665 			metaslab_t *next_msp = multilist_sublist_next(mls, msp);
666 			multilist_sublist_unlock(mls);
667 			if (txg >
668 			    msp->ms_selected_txg + metaslab_unload_delay &&
669 			    gethrtime() > msp->ms_selected_time +
670 			    (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
671 				metaslab_evict(msp, txg);
672 			} else {
673 				/*
674 				 * Once we've hit a metaslab selected too
675 				 * recently to evict, we're done evicting for
676 				 * now.
677 				 */
678 				mutex_exit(&msp->ms_lock);
679 				break;
680 			}
681 			mutex_exit(&msp->ms_lock);
682 			msp = next_msp;
683 		}
684 	}
685 }
686 
687 static int
688 metaslab_compare(const void *x1, const void *x2)
689 {
690 	const metaslab_t *m1 = (const metaslab_t *)x1;
691 	const metaslab_t *m2 = (const metaslab_t *)x2;
692 
693 	int sort1 = 0;
694 	int sort2 = 0;
695 	if (m1->ms_allocator != -1 && m1->ms_primary)
696 		sort1 = 1;
697 	else if (m1->ms_allocator != -1 && !m1->ms_primary)
698 		sort1 = 2;
699 	if (m2->ms_allocator != -1 && m2->ms_primary)
700 		sort2 = 1;
701 	else if (m2->ms_allocator != -1 && !m2->ms_primary)
702 		sort2 = 2;
703 
704 	/*
705 	 * Sort inactive metaslabs first, then primaries, then secondaries. When
706 	 * selecting a metaslab to allocate from, an allocator first tries its
707 	 * primary, then secondary active metaslab. If it doesn't have active
708 	 * metaslabs, or can't allocate from them, it searches for an inactive
709 	 * metaslab to activate. If it can't find a suitable one, it will steal
710 	 * a primary or secondary metaslab from another allocator.
711 	 */
712 	if (sort1 < sort2)
713 		return (-1);
714 	if (sort1 > sort2)
715 		return (1);
716 
717 	int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
718 	if (likely(cmp))
719 		return (cmp);
720 
721 	IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
722 
723 	return (TREE_CMP(m1->ms_start, m2->ms_start));
724 }
725 
726 /*
727  * ==========================================================================
728  * Metaslab groups
729  * ==========================================================================
730  */
731 /*
732  * Update the allocatable flag and the metaslab group's capacity.
733  * The allocatable flag is set to true if the capacity is below
734  * the zfs_mg_noalloc_threshold or has a fragmentation value that is
735  * greater than zfs_mg_fragmentation_threshold. If a metaslab group
736  * transitions from allocatable to non-allocatable or vice versa then the
737  * metaslab group's class is updated to reflect the transition.
738  */
739 static void
740 metaslab_group_alloc_update(metaslab_group_t *mg)
741 {
742 	vdev_t *vd = mg->mg_vd;
743 	metaslab_class_t *mc = mg->mg_class;
744 	vdev_stat_t *vs = &vd->vdev_stat;
745 	boolean_t was_allocatable;
746 	boolean_t was_initialized;
747 
748 	ASSERT(vd == vd->vdev_top);
749 	ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
750 	    SCL_ALLOC);
751 
752 	mutex_enter(&mg->mg_lock);
753 	was_allocatable = mg->mg_allocatable;
754 	was_initialized = mg->mg_initialized;
755 
756 	mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
757 	    (vs->vs_space + 1);
758 
759 	mutex_enter(&mc->mc_lock);
760 
761 	/*
762 	 * If the metaslab group was just added then it won't
763 	 * have any space until we finish syncing out this txg.
764 	 * At that point we will consider it initialized and available
765 	 * for allocations.  We also don't consider non-activated
766 	 * metaslab groups (e.g. vdevs that are in the middle of being removed)
767 	 * to be initialized, because they can't be used for allocation.
768 	 */
769 	mg->mg_initialized = metaslab_group_initialized(mg);
770 	if (!was_initialized && mg->mg_initialized) {
771 		mc->mc_groups++;
772 	} else if (was_initialized && !mg->mg_initialized) {
773 		ASSERT3U(mc->mc_groups, >, 0);
774 		mc->mc_groups--;
775 	}
776 	if (mg->mg_initialized)
777 		mg->mg_no_free_space = B_FALSE;
778 
779 	/*
780 	 * A metaslab group is considered allocatable if it has plenty
781 	 * of free space or is not heavily fragmented. We only take
782 	 * fragmentation into account if the metaslab group has a valid
783 	 * fragmentation metric (i.e. a value between 0 and 100).
784 	 */
785 	mg->mg_allocatable = (mg->mg_activation_count > 0 &&
786 	    mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
787 	    (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
788 	    mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
789 
790 	/*
791 	 * The mc_alloc_groups maintains a count of the number of
792 	 * groups in this metaslab class that are still above the
793 	 * zfs_mg_noalloc_threshold. This is used by the allocating
794 	 * threads to determine if they should avoid allocations to
795 	 * a given group. The allocator will avoid allocations to a group
796 	 * if that group has reached or is below the zfs_mg_noalloc_threshold
797 	 * and there are still other groups that are above the threshold.
798 	 * When a group transitions from allocatable to non-allocatable or
799 	 * vice versa we update the metaslab class to reflect that change.
800 	 * When the mc_alloc_groups value drops to 0 that means that all
801 	 * groups have reached the zfs_mg_noalloc_threshold making all groups
802 	 * eligible for allocations. This effectively means that all devices
803 	 * are balanced again.
804 	 */
805 	if (was_allocatable && !mg->mg_allocatable)
806 		mc->mc_alloc_groups--;
807 	else if (!was_allocatable && mg->mg_allocatable)
808 		mc->mc_alloc_groups++;
809 	mutex_exit(&mc->mc_lock);
810 
811 	mutex_exit(&mg->mg_lock);
812 }
813 
814 int
815 metaslab_sort_by_flushed(const void *va, const void *vb)
816 {
817 	const metaslab_t *a = va;
818 	const metaslab_t *b = vb;
819 
820 	int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
821 	if (likely(cmp))
822 		return (cmp);
823 
824 	uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
825 	uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
826 	cmp = TREE_CMP(a_vdev_id, b_vdev_id);
827 	if (cmp)
828 		return (cmp);
829 
830 	return (TREE_CMP(a->ms_id, b->ms_id));
831 }
832 
833 metaslab_group_t *
834 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
835 {
836 	metaslab_group_t *mg;
837 
838 	mg = kmem_zalloc(offsetof(metaslab_group_t,
839 	    mg_allocator[allocators]), KM_SLEEP);
840 	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
841 	mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
842 	cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
843 	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
844 	    sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
845 	mg->mg_vd = vd;
846 	mg->mg_class = mc;
847 	mg->mg_activation_count = 0;
848 	mg->mg_initialized = B_FALSE;
849 	mg->mg_no_free_space = B_TRUE;
850 	mg->mg_allocators = allocators;
851 
852 	for (int i = 0; i < allocators; i++) {
853 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
854 		zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
855 	}
856 
857 	mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
858 	    maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
859 
860 	return (mg);
861 }
862 
863 void
864 metaslab_group_destroy(metaslab_group_t *mg)
865 {
866 	ASSERT(mg->mg_prev == NULL);
867 	ASSERT(mg->mg_next == NULL);
868 	/*
869 	 * We may have gone below zero with the activation count
870 	 * either because we never activated in the first place or
871 	 * because we're done, and possibly removing the vdev.
872 	 */
873 	ASSERT(mg->mg_activation_count <= 0);
874 
875 	taskq_destroy(mg->mg_taskq);
876 	avl_destroy(&mg->mg_metaslab_tree);
877 	mutex_destroy(&mg->mg_lock);
878 	mutex_destroy(&mg->mg_ms_disabled_lock);
879 	cv_destroy(&mg->mg_ms_disabled_cv);
880 
881 	for (int i = 0; i < mg->mg_allocators; i++) {
882 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
883 		zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
884 	}
885 	kmem_free(mg, offsetof(metaslab_group_t,
886 	    mg_allocator[mg->mg_allocators]));
887 }
888 
889 void
890 metaslab_group_activate(metaslab_group_t *mg)
891 {
892 	metaslab_class_t *mc = mg->mg_class;
893 	spa_t *spa = mc->mc_spa;
894 	metaslab_group_t *mgprev, *mgnext;
895 
896 	ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
897 
898 	ASSERT(mg->mg_prev == NULL);
899 	ASSERT(mg->mg_next == NULL);
900 	ASSERT(mg->mg_activation_count <= 0);
901 
902 	if (++mg->mg_activation_count <= 0)
903 		return;
904 
905 	mg->mg_aliquot = metaslab_aliquot * MAX(1,
906 	    vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd));
907 	metaslab_group_alloc_update(mg);
908 
909 	if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
910 		mg->mg_prev = mg;
911 		mg->mg_next = mg;
912 	} else {
913 		mgnext = mgprev->mg_next;
914 		mg->mg_prev = mgprev;
915 		mg->mg_next = mgnext;
916 		mgprev->mg_next = mg;
917 		mgnext->mg_prev = mg;
918 	}
919 	for (int i = 0; i < spa->spa_alloc_count; i++) {
920 		mc->mc_allocator[i].mca_rotor = mg;
921 		mg = mg->mg_next;
922 	}
923 }
924 
925 /*
926  * Passivate a metaslab group and remove it from the allocation rotor.
927  * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
928  * a metaslab group. This function will momentarily drop spa_config_locks
929  * that are lower than the SCL_ALLOC lock (see comment below).
930  */
931 void
932 metaslab_group_passivate(metaslab_group_t *mg)
933 {
934 	metaslab_class_t *mc = mg->mg_class;
935 	spa_t *spa = mc->mc_spa;
936 	metaslab_group_t *mgprev, *mgnext;
937 	int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
938 
939 	ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
940 	    (SCL_ALLOC | SCL_ZIO));
941 
942 	if (--mg->mg_activation_count != 0) {
943 		for (int i = 0; i < spa->spa_alloc_count; i++)
944 			ASSERT(mc->mc_allocator[i].mca_rotor != mg);
945 		ASSERT(mg->mg_prev == NULL);
946 		ASSERT(mg->mg_next == NULL);
947 		ASSERT(mg->mg_activation_count < 0);
948 		return;
949 	}
950 
951 	/*
952 	 * The spa_config_lock is an array of rwlocks, ordered as
953 	 * follows (from highest to lowest):
954 	 *	SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
955 	 *	SCL_ZIO > SCL_FREE > SCL_VDEV
956 	 * (For more information about the spa_config_lock see spa_misc.c)
957 	 * The higher the lock, the broader its coverage. When we passivate
958 	 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
959 	 * config locks. However, the metaslab group's taskq might be trying
960 	 * to preload metaslabs so we must drop the SCL_ZIO lock and any
961 	 * lower locks to allow the I/O to complete. At a minimum,
962 	 * we continue to hold the SCL_ALLOC lock, which prevents any future
963 	 * allocations from taking place and any changes to the vdev tree.
964 	 */
965 	spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
966 	taskq_wait_outstanding(mg->mg_taskq, 0);
967 	spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
968 	metaslab_group_alloc_update(mg);
969 	for (int i = 0; i < mg->mg_allocators; i++) {
970 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
971 		metaslab_t *msp = mga->mga_primary;
972 		if (msp != NULL) {
973 			mutex_enter(&msp->ms_lock);
974 			metaslab_passivate(msp,
975 			    metaslab_weight_from_range_tree(msp));
976 			mutex_exit(&msp->ms_lock);
977 		}
978 		msp = mga->mga_secondary;
979 		if (msp != NULL) {
980 			mutex_enter(&msp->ms_lock);
981 			metaslab_passivate(msp,
982 			    metaslab_weight_from_range_tree(msp));
983 			mutex_exit(&msp->ms_lock);
984 		}
985 	}
986 
987 	mgprev = mg->mg_prev;
988 	mgnext = mg->mg_next;
989 
990 	if (mg == mgnext) {
991 		mgnext = NULL;
992 	} else {
993 		mgprev->mg_next = mgnext;
994 		mgnext->mg_prev = mgprev;
995 	}
996 	for (int i = 0; i < spa->spa_alloc_count; i++) {
997 		if (mc->mc_allocator[i].mca_rotor == mg)
998 			mc->mc_allocator[i].mca_rotor = mgnext;
999 	}
1000 
1001 	mg->mg_prev = NULL;
1002 	mg->mg_next = NULL;
1003 }
1004 
1005 boolean_t
1006 metaslab_group_initialized(metaslab_group_t *mg)
1007 {
1008 	vdev_t *vd = mg->mg_vd;
1009 	vdev_stat_t *vs = &vd->vdev_stat;
1010 
1011 	return (vs->vs_space != 0 && mg->mg_activation_count > 0);
1012 }
1013 
1014 uint64_t
1015 metaslab_group_get_space(metaslab_group_t *mg)
1016 {
1017 	/*
1018 	 * Note that the number of nodes in mg_metaslab_tree may be one less
1019 	 * than vdev_ms_count, due to the embedded log metaslab.
1020 	 */
1021 	mutex_enter(&mg->mg_lock);
1022 	uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
1023 	mutex_exit(&mg->mg_lock);
1024 	return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
1025 }
1026 
1027 void
1028 metaslab_group_histogram_verify(metaslab_group_t *mg)
1029 {
1030 	uint64_t *mg_hist;
1031 	avl_tree_t *t = &mg->mg_metaslab_tree;
1032 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1033 
1034 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
1035 		return;
1036 
1037 	mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
1038 	    KM_SLEEP);
1039 
1040 	ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
1041 	    SPACE_MAP_HISTOGRAM_SIZE + ashift);
1042 
1043 	mutex_enter(&mg->mg_lock);
1044 	for (metaslab_t *msp = avl_first(t);
1045 	    msp != NULL; msp = AVL_NEXT(t, msp)) {
1046 		VERIFY3P(msp->ms_group, ==, mg);
1047 		/* skip if not active */
1048 		if (msp->ms_sm == NULL)
1049 			continue;
1050 
1051 		for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1052 			mg_hist[i + ashift] +=
1053 			    msp->ms_sm->sm_phys->smp_histogram[i];
1054 		}
1055 	}
1056 
1057 	for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
1058 		VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
1059 
1060 	mutex_exit(&mg->mg_lock);
1061 
1062 	kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
1063 }
1064 
1065 static void
1066 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
1067 {
1068 	metaslab_class_t *mc = mg->mg_class;
1069 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1070 
1071 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1072 	if (msp->ms_sm == NULL)
1073 		return;
1074 
1075 	mutex_enter(&mg->mg_lock);
1076 	mutex_enter(&mc->mc_lock);
1077 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1078 		IMPLY(mg == mg->mg_vd->vdev_log_mg,
1079 		    mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1080 		mg->mg_histogram[i + ashift] +=
1081 		    msp->ms_sm->sm_phys->smp_histogram[i];
1082 		mc->mc_histogram[i + ashift] +=
1083 		    msp->ms_sm->sm_phys->smp_histogram[i];
1084 	}
1085 	mutex_exit(&mc->mc_lock);
1086 	mutex_exit(&mg->mg_lock);
1087 }
1088 
1089 void
1090 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
1091 {
1092 	metaslab_class_t *mc = mg->mg_class;
1093 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1094 
1095 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1096 	if (msp->ms_sm == NULL)
1097 		return;
1098 
1099 	mutex_enter(&mg->mg_lock);
1100 	mutex_enter(&mc->mc_lock);
1101 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1102 		ASSERT3U(mg->mg_histogram[i + ashift], >=,
1103 		    msp->ms_sm->sm_phys->smp_histogram[i]);
1104 		ASSERT3U(mc->mc_histogram[i + ashift], >=,
1105 		    msp->ms_sm->sm_phys->smp_histogram[i]);
1106 		IMPLY(mg == mg->mg_vd->vdev_log_mg,
1107 		    mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1108 
1109 		mg->mg_histogram[i + ashift] -=
1110 		    msp->ms_sm->sm_phys->smp_histogram[i];
1111 		mc->mc_histogram[i + ashift] -=
1112 		    msp->ms_sm->sm_phys->smp_histogram[i];
1113 	}
1114 	mutex_exit(&mc->mc_lock);
1115 	mutex_exit(&mg->mg_lock);
1116 }
1117 
1118 static void
1119 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
1120 {
1121 	ASSERT(msp->ms_group == NULL);
1122 	mutex_enter(&mg->mg_lock);
1123 	msp->ms_group = mg;
1124 	msp->ms_weight = 0;
1125 	avl_add(&mg->mg_metaslab_tree, msp);
1126 	mutex_exit(&mg->mg_lock);
1127 
1128 	mutex_enter(&msp->ms_lock);
1129 	metaslab_group_histogram_add(mg, msp);
1130 	mutex_exit(&msp->ms_lock);
1131 }
1132 
1133 static void
1134 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1135 {
1136 	mutex_enter(&msp->ms_lock);
1137 	metaslab_group_histogram_remove(mg, msp);
1138 	mutex_exit(&msp->ms_lock);
1139 
1140 	mutex_enter(&mg->mg_lock);
1141 	ASSERT(msp->ms_group == mg);
1142 	avl_remove(&mg->mg_metaslab_tree, msp);
1143 
1144 	metaslab_class_t *mc = msp->ms_group->mg_class;
1145 	multilist_sublist_t *mls =
1146 	    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
1147 	if (multilist_link_active(&msp->ms_class_txg_node))
1148 		multilist_sublist_remove(mls, msp);
1149 	multilist_sublist_unlock(mls);
1150 
1151 	msp->ms_group = NULL;
1152 	mutex_exit(&mg->mg_lock);
1153 }
1154 
1155 static void
1156 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1157 {
1158 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1159 	ASSERT(MUTEX_HELD(&mg->mg_lock));
1160 	ASSERT(msp->ms_group == mg);
1161 
1162 	avl_remove(&mg->mg_metaslab_tree, msp);
1163 	msp->ms_weight = weight;
1164 	avl_add(&mg->mg_metaslab_tree, msp);
1165 
1166 }
1167 
1168 static void
1169 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1170 {
1171 	/*
1172 	 * Although in principle the weight can be any value, in
1173 	 * practice we do not use values in the range [1, 511].
1174 	 */
1175 	ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1176 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1177 
1178 	mutex_enter(&mg->mg_lock);
1179 	metaslab_group_sort_impl(mg, msp, weight);
1180 	mutex_exit(&mg->mg_lock);
1181 }
1182 
1183 /*
1184  * Calculate the fragmentation for a given metaslab group. We can use
1185  * a simple average here since all metaslabs within the group must have
1186  * the same size. The return value will be a value between 0 and 100
1187  * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1188  * group have a fragmentation metric.
1189  */
1190 uint64_t
1191 metaslab_group_fragmentation(metaslab_group_t *mg)
1192 {
1193 	vdev_t *vd = mg->mg_vd;
1194 	uint64_t fragmentation = 0;
1195 	uint64_t valid_ms = 0;
1196 
1197 	for (int m = 0; m < vd->vdev_ms_count; m++) {
1198 		metaslab_t *msp = vd->vdev_ms[m];
1199 
1200 		if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1201 			continue;
1202 		if (msp->ms_group != mg)
1203 			continue;
1204 
1205 		valid_ms++;
1206 		fragmentation += msp->ms_fragmentation;
1207 	}
1208 
1209 	if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
1210 		return (ZFS_FRAG_INVALID);
1211 
1212 	fragmentation /= valid_ms;
1213 	ASSERT3U(fragmentation, <=, 100);
1214 	return (fragmentation);
1215 }
1216 
1217 /*
1218  * Determine if a given metaslab group should skip allocations. A metaslab
1219  * group should avoid allocations if its free capacity is less than the
1220  * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1221  * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1222  * that can still handle allocations. If the allocation throttle is enabled
1223  * then we skip allocations to devices that have reached their maximum
1224  * allocation queue depth unless the selected metaslab group is the only
1225  * eligible group remaining.
1226  */
1227 static boolean_t
1228 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1229     int flags, uint64_t psize, int allocator, int d)
1230 {
1231 	spa_t *spa = mg->mg_vd->vdev_spa;
1232 	metaslab_class_t *mc = mg->mg_class;
1233 
1234 	/*
1235 	 * We can only consider skipping this metaslab group if it's
1236 	 * in the normal metaslab class and there are other metaslab
1237 	 * groups to select from. Otherwise, we always consider it eligible
1238 	 * for allocations.
1239 	 */
1240 	if ((mc != spa_normal_class(spa) &&
1241 	    mc != spa_special_class(spa) &&
1242 	    mc != spa_dedup_class(spa)) ||
1243 	    mc->mc_groups <= 1)
1244 		return (B_TRUE);
1245 
1246 	/*
1247 	 * If the metaslab group's mg_allocatable flag is set (see comments
1248 	 * in metaslab_group_alloc_update() for more information) and
1249 	 * the allocation throttle is disabled then allow allocations to this
1250 	 * device. However, if the allocation throttle is enabled then
1251 	 * check if we have reached our allocation limit (mga_alloc_queue_depth)
1252 	 * to determine if we should allow allocations to this metaslab group.
1253 	 * If all metaslab groups are no longer considered allocatable
1254 	 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1255 	 * gang block size then we allow allocations on this metaslab group
1256 	 * regardless of the mg_allocatable or throttle settings.
1257 	 */
1258 	if (mg->mg_allocatable) {
1259 		metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
1260 		int64_t qdepth;
1261 		uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
1262 
1263 		if (!mc->mc_alloc_throttle_enabled)
1264 			return (B_TRUE);
1265 
1266 		/*
1267 		 * If this metaslab group does not have any free space, then
1268 		 * there is no point in looking further.
1269 		 */
1270 		if (mg->mg_no_free_space)
1271 			return (B_FALSE);
1272 
1273 		/*
1274 		 * Some allocations (e.g., those coming from device removal
1275 		 * where the * allocations are not even counted in the
1276 		 * metaslab * allocation queues) are allowed to bypass
1277 		 * the throttle.
1278 		 */
1279 		if (flags & METASLAB_DONT_THROTTLE)
1280 			return (B_TRUE);
1281 
1282 		/*
1283 		 * Relax allocation throttling for ditto blocks.  Due to
1284 		 * random imbalances in allocation it tends to push copies
1285 		 * to one vdev, that looks a bit better at the moment.
1286 		 */
1287 		qmax = qmax * (4 + d) / 4;
1288 
1289 		qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
1290 
1291 		/*
1292 		 * If this metaslab group is below its qmax or it's
1293 		 * the only allocatable metaslab group, then attempt
1294 		 * to allocate from it.
1295 		 */
1296 		if (qdepth < qmax || mc->mc_alloc_groups == 1)
1297 			return (B_TRUE);
1298 		ASSERT3U(mc->mc_alloc_groups, >, 1);
1299 
1300 		/*
1301 		 * Since this metaslab group is at or over its qmax, we
1302 		 * need to determine if there are metaslab groups after this
1303 		 * one that might be able to handle this allocation. This is
1304 		 * racy since we can't hold the locks for all metaslab
1305 		 * groups at the same time when we make this check.
1306 		 */
1307 		for (metaslab_group_t *mgp = mg->mg_next;
1308 		    mgp != rotor; mgp = mgp->mg_next) {
1309 			metaslab_group_allocator_t *mgap =
1310 			    &mgp->mg_allocator[allocator];
1311 			qmax = mgap->mga_cur_max_alloc_queue_depth;
1312 			qmax = qmax * (4 + d) / 4;
1313 			qdepth =
1314 			    zfs_refcount_count(&mgap->mga_alloc_queue_depth);
1315 
1316 			/*
1317 			 * If there is another metaslab group that
1318 			 * might be able to handle the allocation, then
1319 			 * we return false so that we skip this group.
1320 			 */
1321 			if (qdepth < qmax && !mgp->mg_no_free_space)
1322 				return (B_FALSE);
1323 		}
1324 
1325 		/*
1326 		 * We didn't find another group to handle the allocation
1327 		 * so we can't skip this metaslab group even though
1328 		 * we are at or over our qmax.
1329 		 */
1330 		return (B_TRUE);
1331 
1332 	} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1333 		return (B_TRUE);
1334 	}
1335 	return (B_FALSE);
1336 }
1337 
1338 /*
1339  * ==========================================================================
1340  * Range tree callbacks
1341  * ==========================================================================
1342  */
1343 
1344 /*
1345  * Comparison function for the private size-ordered tree using 32-bit
1346  * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1347  */
1348 __attribute__((always_inline)) inline
1349 static int
1350 metaslab_rangesize32_compare(const void *x1, const void *x2)
1351 {
1352 	const range_seg32_t *r1 = x1;
1353 	const range_seg32_t *r2 = x2;
1354 
1355 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1356 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1357 
1358 	int cmp = TREE_CMP(rs_size1, rs_size2);
1359 
1360 	return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1361 }
1362 
1363 /*
1364  * Comparison function for the private size-ordered tree using 64-bit
1365  * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1366  */
1367 __attribute__((always_inline)) inline
1368 static int
1369 metaslab_rangesize64_compare(const void *x1, const void *x2)
1370 {
1371 	const range_seg64_t *r1 = x1;
1372 	const range_seg64_t *r2 = x2;
1373 
1374 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1375 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1376 
1377 	int cmp = TREE_CMP(rs_size1, rs_size2);
1378 
1379 	return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1380 }
1381 
1382 typedef struct metaslab_rt_arg {
1383 	zfs_btree_t *mra_bt;
1384 	uint32_t mra_floor_shift;
1385 } metaslab_rt_arg_t;
1386 
1387 struct mssa_arg {
1388 	range_tree_t *rt;
1389 	metaslab_rt_arg_t *mra;
1390 };
1391 
1392 static void
1393 metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
1394 {
1395 	struct mssa_arg *mssap = arg;
1396 	range_tree_t *rt = mssap->rt;
1397 	metaslab_rt_arg_t *mrap = mssap->mra;
1398 	range_seg_max_t seg = {0};
1399 	rs_set_start(&seg, rt, start);
1400 	rs_set_end(&seg, rt, start + size);
1401 	metaslab_rt_add(rt, &seg, mrap);
1402 }
1403 
1404 static void
1405 metaslab_size_tree_full_load(range_tree_t *rt)
1406 {
1407 	metaslab_rt_arg_t *mrap = rt->rt_arg;
1408 	METASLABSTAT_BUMP(metaslabstat_reload_tree);
1409 	ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
1410 	mrap->mra_floor_shift = 0;
1411 	struct mssa_arg arg = {0};
1412 	arg.rt = rt;
1413 	arg.mra = mrap;
1414 	range_tree_walk(rt, metaslab_size_sorted_add, &arg);
1415 }
1416 
1417 
1418 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
1419     range_seg32_t, metaslab_rangesize32_compare)
1420 
1421 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
1422     range_seg64_t, metaslab_rangesize64_compare)
1423 
1424 /*
1425  * Create any block allocator specific components. The current allocators
1426  * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1427  */
1428 static void
1429 metaslab_rt_create(range_tree_t *rt, void *arg)
1430 {
1431 	metaslab_rt_arg_t *mrap = arg;
1432 	zfs_btree_t *size_tree = mrap->mra_bt;
1433 
1434 	size_t size;
1435 	int (*compare) (const void *, const void *);
1436 	bt_find_in_buf_f bt_find;
1437 	switch (rt->rt_type) {
1438 	case RANGE_SEG32:
1439 		size = sizeof (range_seg32_t);
1440 		compare = metaslab_rangesize32_compare;
1441 		bt_find = metaslab_rt_find_rangesize32_in_buf;
1442 		break;
1443 	case RANGE_SEG64:
1444 		size = sizeof (range_seg64_t);
1445 		compare = metaslab_rangesize64_compare;
1446 		bt_find = metaslab_rt_find_rangesize64_in_buf;
1447 		break;
1448 	default:
1449 		panic("Invalid range seg type %d", rt->rt_type);
1450 	}
1451 	zfs_btree_create(size_tree, compare, bt_find, size);
1452 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
1453 }
1454 
1455 static void
1456 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1457 {
1458 	(void) rt;
1459 	metaslab_rt_arg_t *mrap = arg;
1460 	zfs_btree_t *size_tree = mrap->mra_bt;
1461 
1462 	zfs_btree_destroy(size_tree);
1463 	kmem_free(mrap, sizeof (*mrap));
1464 }
1465 
1466 static void
1467 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1468 {
1469 	metaslab_rt_arg_t *mrap = arg;
1470 	zfs_btree_t *size_tree = mrap->mra_bt;
1471 
1472 	if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
1473 	    (1ULL << mrap->mra_floor_shift))
1474 		return;
1475 
1476 	zfs_btree_add(size_tree, rs);
1477 }
1478 
1479 static void
1480 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1481 {
1482 	metaslab_rt_arg_t *mrap = arg;
1483 	zfs_btree_t *size_tree = mrap->mra_bt;
1484 
1485 	if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL <<
1486 	    mrap->mra_floor_shift))
1487 		return;
1488 
1489 	zfs_btree_remove(size_tree, rs);
1490 }
1491 
1492 static void
1493 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1494 {
1495 	metaslab_rt_arg_t *mrap = arg;
1496 	zfs_btree_t *size_tree = mrap->mra_bt;
1497 	zfs_btree_clear(size_tree);
1498 	zfs_btree_destroy(size_tree);
1499 
1500 	metaslab_rt_create(rt, arg);
1501 }
1502 
1503 static const range_tree_ops_t metaslab_rt_ops = {
1504 	.rtop_create = metaslab_rt_create,
1505 	.rtop_destroy = metaslab_rt_destroy,
1506 	.rtop_add = metaslab_rt_add,
1507 	.rtop_remove = metaslab_rt_remove,
1508 	.rtop_vacate = metaslab_rt_vacate
1509 };
1510 
1511 /*
1512  * ==========================================================================
1513  * Common allocator routines
1514  * ==========================================================================
1515  */
1516 
1517 /*
1518  * Return the maximum contiguous segment within the metaslab.
1519  */
1520 uint64_t
1521 metaslab_largest_allocatable(metaslab_t *msp)
1522 {
1523 	zfs_btree_t *t = &msp->ms_allocatable_by_size;
1524 	range_seg_t *rs;
1525 
1526 	if (t == NULL)
1527 		return (0);
1528 	if (zfs_btree_numnodes(t) == 0)
1529 		metaslab_size_tree_full_load(msp->ms_allocatable);
1530 
1531 	rs = zfs_btree_last(t, NULL);
1532 	if (rs == NULL)
1533 		return (0);
1534 
1535 	return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
1536 	    msp->ms_allocatable));
1537 }
1538 
1539 /*
1540  * Return the maximum contiguous segment within the unflushed frees of this
1541  * metaslab.
1542  */
1543 static uint64_t
1544 metaslab_largest_unflushed_free(metaslab_t *msp)
1545 {
1546 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1547 
1548 	if (msp->ms_unflushed_frees == NULL)
1549 		return (0);
1550 
1551 	if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
1552 		metaslab_size_tree_full_load(msp->ms_unflushed_frees);
1553 	range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
1554 	    NULL);
1555 	if (rs == NULL)
1556 		return (0);
1557 
1558 	/*
1559 	 * When a range is freed from the metaslab, that range is added to
1560 	 * both the unflushed frees and the deferred frees. While the block
1561 	 * will eventually be usable, if the metaslab were loaded the range
1562 	 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
1563 	 * txgs had passed.  As a result, when attempting to estimate an upper
1564 	 * bound for the largest currently-usable free segment in the
1565 	 * metaslab, we need to not consider any ranges currently in the defer
1566 	 * trees. This algorithm approximates the largest available chunk in
1567 	 * the largest range in the unflushed_frees tree by taking the first
1568 	 * chunk.  While this may be a poor estimate, it should only remain so
1569 	 * briefly and should eventually self-correct as frees are no longer
1570 	 * deferred. Similar logic applies to the ms_freed tree. See
1571 	 * metaslab_load() for more details.
1572 	 *
1573 	 * There are two primary sources of inaccuracy in this estimate. Both
1574 	 * are tolerated for performance reasons. The first source is that we
1575 	 * only check the largest segment for overlaps. Smaller segments may
1576 	 * have more favorable overlaps with the other trees, resulting in
1577 	 * larger usable chunks.  Second, we only look at the first chunk in
1578 	 * the largest segment; there may be other usable chunks in the
1579 	 * largest segment, but we ignore them.
1580 	 */
1581 	uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
1582 	uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
1583 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1584 		uint64_t start = 0;
1585 		uint64_t size = 0;
1586 		boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
1587 		    rsize, &start, &size);
1588 		if (found) {
1589 			if (rstart == start)
1590 				return (0);
1591 			rsize = start - rstart;
1592 		}
1593 	}
1594 
1595 	uint64_t start = 0;
1596 	uint64_t size = 0;
1597 	boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
1598 	    rsize, &start, &size);
1599 	if (found)
1600 		rsize = start - rstart;
1601 
1602 	return (rsize);
1603 }
1604 
1605 static range_seg_t *
1606 metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
1607     uint64_t size, zfs_btree_index_t *where)
1608 {
1609 	range_seg_t *rs;
1610 	range_seg_max_t rsearch;
1611 
1612 	rs_set_start(&rsearch, rt, start);
1613 	rs_set_end(&rsearch, rt, start + size);
1614 
1615 	rs = zfs_btree_find(t, &rsearch, where);
1616 	if (rs == NULL) {
1617 		rs = zfs_btree_next(t, where, where);
1618 	}
1619 
1620 	return (rs);
1621 }
1622 
1623 /*
1624  * This is a helper function that can be used by the allocator to find a
1625  * suitable block to allocate. This will search the specified B-tree looking
1626  * for a block that matches the specified criteria.
1627  */
1628 static uint64_t
1629 metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
1630     uint64_t max_search)
1631 {
1632 	if (*cursor == 0)
1633 		*cursor = rt->rt_start;
1634 	zfs_btree_t *bt = &rt->rt_root;
1635 	zfs_btree_index_t where;
1636 	range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
1637 	uint64_t first_found;
1638 	int count_searched = 0;
1639 
1640 	if (rs != NULL)
1641 		first_found = rs_get_start(rs, rt);
1642 
1643 	while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
1644 	    max_search || count_searched < metaslab_min_search_count)) {
1645 		uint64_t offset = rs_get_start(rs, rt);
1646 		if (offset + size <= rs_get_end(rs, rt)) {
1647 			*cursor = offset + size;
1648 			return (offset);
1649 		}
1650 		rs = zfs_btree_next(bt, &where, &where);
1651 		count_searched++;
1652 	}
1653 
1654 	*cursor = 0;
1655 	return (-1ULL);
1656 }
1657 
1658 static uint64_t metaslab_df_alloc(metaslab_t *msp, uint64_t size);
1659 static uint64_t metaslab_cf_alloc(metaslab_t *msp, uint64_t size);
1660 static uint64_t metaslab_ndf_alloc(metaslab_t *msp, uint64_t size);
1661 metaslab_ops_t *metaslab_allocator(spa_t *spa);
1662 
1663 static metaslab_ops_t metaslab_allocators[] = {
1664 	{ "dynamic", metaslab_df_alloc },
1665 	{ "cursor", metaslab_cf_alloc },
1666 	{ "new-dynamic", metaslab_ndf_alloc },
1667 };
1668 
1669 static int
1670 spa_find_allocator_byname(const char *val)
1671 {
1672 	int a = ARRAY_SIZE(metaslab_allocators) - 1;
1673 	if (strcmp("new-dynamic", val) == 0)
1674 		return (-1); /* remove when ndf is working */
1675 	for (; a >= 0; a--) {
1676 		if (strcmp(val, metaslab_allocators[a].msop_name) == 0)
1677 			return (a);
1678 	}
1679 	return (-1);
1680 }
1681 
1682 void
1683 spa_set_allocator(spa_t *spa, const char *allocator)
1684 {
1685 	int a = spa_find_allocator_byname(allocator);
1686 	if (a < 0) a = 0;
1687 	spa->spa_active_allocator = a;
1688 	zfs_dbgmsg("spa allocator: %s\n", metaslab_allocators[a].msop_name);
1689 }
1690 
1691 int
1692 spa_get_allocator(spa_t *spa)
1693 {
1694 	return (spa->spa_active_allocator);
1695 }
1696 
1697 #if defined(_KERNEL)
1698 int
1699 param_set_active_allocator_common(const char *val)
1700 {
1701 	char *p;
1702 
1703 	if (val == NULL)
1704 		return (SET_ERROR(EINVAL));
1705 
1706 	if ((p = strchr(val, '\n')) != NULL)
1707 		*p = '\0';
1708 
1709 	int a = spa_find_allocator_byname(val);
1710 	if (a < 0)
1711 		return (SET_ERROR(EINVAL));
1712 
1713 	zfs_active_allocator = metaslab_allocators[a].msop_name;
1714 	return (0);
1715 }
1716 #endif
1717 
1718 metaslab_ops_t *
1719 metaslab_allocator(spa_t *spa)
1720 {
1721 	int allocator = spa_get_allocator(spa);
1722 	return (&metaslab_allocators[allocator]);
1723 }
1724 
1725 /*
1726  * ==========================================================================
1727  * Dynamic Fit (df) block allocator
1728  *
1729  * Search for a free chunk of at least this size, starting from the last
1730  * offset (for this alignment of block) looking for up to
1731  * metaslab_df_max_search bytes (16MB).  If a large enough free chunk is not
1732  * found within 16MB, then return a free chunk of exactly the requested size (or
1733  * larger).
1734  *
1735  * If it seems like searching from the last offset will be unproductive, skip
1736  * that and just return a free chunk of exactly the requested size (or larger).
1737  * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct.  This
1738  * mechanism is probably not very useful and may be removed in the future.
1739  *
1740  * The behavior when not searching can be changed to return the largest free
1741  * chunk, instead of a free chunk of exactly the requested size, by setting
1742  * metaslab_df_use_largest_segment.
1743  * ==========================================================================
1744  */
1745 static uint64_t
1746 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1747 {
1748 	/*
1749 	 * Find the largest power of 2 block size that evenly divides the
1750 	 * requested size. This is used to try to allocate blocks with similar
1751 	 * alignment from the same area of the metaslab (i.e. same cursor
1752 	 * bucket) but it does not guarantee that other allocations sizes
1753 	 * may exist in the same region.
1754 	 */
1755 	uint64_t align = size & -size;
1756 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1757 	range_tree_t *rt = msp->ms_allocatable;
1758 	uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1759 	uint64_t offset;
1760 
1761 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1762 
1763 	/*
1764 	 * If we're running low on space, find a segment based on size,
1765 	 * rather than iterating based on offset.
1766 	 */
1767 	if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
1768 	    free_pct < metaslab_df_free_pct) {
1769 		offset = -1;
1770 	} else {
1771 		offset = metaslab_block_picker(rt,
1772 		    cursor, size, metaslab_df_max_search);
1773 	}
1774 
1775 	if (offset == -1) {
1776 		range_seg_t *rs;
1777 		if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
1778 			metaslab_size_tree_full_load(msp->ms_allocatable);
1779 
1780 		if (metaslab_df_use_largest_segment) {
1781 			/* use largest free segment */
1782 			rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
1783 		} else {
1784 			zfs_btree_index_t where;
1785 			/* use segment of this size, or next largest */
1786 			rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1787 			    rt, msp->ms_start, size, &where);
1788 		}
1789 		if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
1790 		    rt)) {
1791 			offset = rs_get_start(rs, rt);
1792 			*cursor = offset + size;
1793 		}
1794 	}
1795 
1796 	return (offset);
1797 }
1798 
1799 /*
1800  * ==========================================================================
1801  * Cursor fit block allocator -
1802  * Select the largest region in the metaslab, set the cursor to the beginning
1803  * of the range and the cursor_end to the end of the range. As allocations
1804  * are made advance the cursor. Continue allocating from the cursor until
1805  * the range is exhausted and then find a new range.
1806  * ==========================================================================
1807  */
1808 static uint64_t
1809 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1810 {
1811 	range_tree_t *rt = msp->ms_allocatable;
1812 	zfs_btree_t *t = &msp->ms_allocatable_by_size;
1813 	uint64_t *cursor = &msp->ms_lbas[0];
1814 	uint64_t *cursor_end = &msp->ms_lbas[1];
1815 	uint64_t offset = 0;
1816 
1817 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1818 
1819 	ASSERT3U(*cursor_end, >=, *cursor);
1820 
1821 	if ((*cursor + size) > *cursor_end) {
1822 		range_seg_t *rs;
1823 
1824 		if (zfs_btree_numnodes(t) == 0)
1825 			metaslab_size_tree_full_load(msp->ms_allocatable);
1826 		rs = zfs_btree_last(t, NULL);
1827 		if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
1828 		    size)
1829 			return (-1ULL);
1830 
1831 		*cursor = rs_get_start(rs, rt);
1832 		*cursor_end = rs_get_end(rs, rt);
1833 	}
1834 
1835 	offset = *cursor;
1836 	*cursor += size;
1837 
1838 	return (offset);
1839 }
1840 
1841 /*
1842  * ==========================================================================
1843  * New dynamic fit allocator -
1844  * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1845  * contiguous blocks. If no region is found then just use the largest segment
1846  * that remains.
1847  * ==========================================================================
1848  */
1849 
1850 /*
1851  * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1852  * to request from the allocator.
1853  */
1854 uint64_t metaslab_ndf_clump_shift = 4;
1855 
1856 static uint64_t
1857 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1858 {
1859 	zfs_btree_t *t = &msp->ms_allocatable->rt_root;
1860 	range_tree_t *rt = msp->ms_allocatable;
1861 	zfs_btree_index_t where;
1862 	range_seg_t *rs;
1863 	range_seg_max_t rsearch;
1864 	uint64_t hbit = highbit64(size);
1865 	uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1866 	uint64_t max_size = metaslab_largest_allocatable(msp);
1867 
1868 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1869 
1870 	if (max_size < size)
1871 		return (-1ULL);
1872 
1873 	rs_set_start(&rsearch, rt, *cursor);
1874 	rs_set_end(&rsearch, rt, *cursor + size);
1875 
1876 	rs = zfs_btree_find(t, &rsearch, &where);
1877 	if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
1878 		t = &msp->ms_allocatable_by_size;
1879 
1880 		rs_set_start(&rsearch, rt, 0);
1881 		rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
1882 		    metaslab_ndf_clump_shift)));
1883 
1884 		rs = zfs_btree_find(t, &rsearch, &where);
1885 		if (rs == NULL)
1886 			rs = zfs_btree_next(t, &where, &where);
1887 		ASSERT(rs != NULL);
1888 	}
1889 
1890 	if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
1891 		*cursor = rs_get_start(rs, rt) + size;
1892 		return (rs_get_start(rs, rt));
1893 	}
1894 	return (-1ULL);
1895 }
1896 
1897 /*
1898  * ==========================================================================
1899  * Metaslabs
1900  * ==========================================================================
1901  */
1902 
1903 /*
1904  * Wait for any in-progress metaslab loads to complete.
1905  */
1906 static void
1907 metaslab_load_wait(metaslab_t *msp)
1908 {
1909 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1910 
1911 	while (msp->ms_loading) {
1912 		ASSERT(!msp->ms_loaded);
1913 		cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1914 	}
1915 }
1916 
1917 /*
1918  * Wait for any in-progress flushing to complete.
1919  */
1920 static void
1921 metaslab_flush_wait(metaslab_t *msp)
1922 {
1923 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1924 
1925 	while (msp->ms_flushing)
1926 		cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
1927 }
1928 
1929 static unsigned int
1930 metaslab_idx_func(multilist_t *ml, void *arg)
1931 {
1932 	metaslab_t *msp = arg;
1933 
1934 	/*
1935 	 * ms_id values are allocated sequentially, so full 64bit
1936 	 * division would be a waste of time, so limit it to 32 bits.
1937 	 */
1938 	return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
1939 }
1940 
1941 uint64_t
1942 metaslab_allocated_space(metaslab_t *msp)
1943 {
1944 	return (msp->ms_allocated_space);
1945 }
1946 
1947 /*
1948  * Verify that the space accounting on disk matches the in-core range_trees.
1949  */
1950 static void
1951 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
1952 {
1953 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1954 	uint64_t allocating = 0;
1955 	uint64_t sm_free_space, msp_free_space;
1956 
1957 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1958 	ASSERT(!msp->ms_condensing);
1959 
1960 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1961 		return;
1962 
1963 	/*
1964 	 * We can only verify the metaslab space when we're called
1965 	 * from syncing context with a loaded metaslab that has an
1966 	 * allocated space map. Calling this in non-syncing context
1967 	 * does not provide a consistent view of the metaslab since
1968 	 * we're performing allocations in the future.
1969 	 */
1970 	if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1971 	    !msp->ms_loaded)
1972 		return;
1973 
1974 	/*
1975 	 * Even though the smp_alloc field can get negative,
1976 	 * when it comes to a metaslab's space map, that should
1977 	 * never be the case.
1978 	 */
1979 	ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
1980 
1981 	ASSERT3U(space_map_allocated(msp->ms_sm), >=,
1982 	    range_tree_space(msp->ms_unflushed_frees));
1983 
1984 	ASSERT3U(metaslab_allocated_space(msp), ==,
1985 	    space_map_allocated(msp->ms_sm) +
1986 	    range_tree_space(msp->ms_unflushed_allocs) -
1987 	    range_tree_space(msp->ms_unflushed_frees));
1988 
1989 	sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
1990 
1991 	/*
1992 	 * Account for future allocations since we would have
1993 	 * already deducted that space from the ms_allocatable.
1994 	 */
1995 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
1996 		allocating +=
1997 		    range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
1998 	}
1999 	ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
2000 	    msp->ms_allocating_total);
2001 
2002 	ASSERT3U(msp->ms_deferspace, ==,
2003 	    range_tree_space(msp->ms_defer[0]) +
2004 	    range_tree_space(msp->ms_defer[1]));
2005 
2006 	msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
2007 	    msp->ms_deferspace + range_tree_space(msp->ms_freed);
2008 
2009 	VERIFY3U(sm_free_space, ==, msp_free_space);
2010 }
2011 
2012 static void
2013 metaslab_aux_histograms_clear(metaslab_t *msp)
2014 {
2015 	/*
2016 	 * Auxiliary histograms are only cleared when resetting them,
2017 	 * which can only happen while the metaslab is loaded.
2018 	 */
2019 	ASSERT(msp->ms_loaded);
2020 
2021 	memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2022 	for (int t = 0; t < TXG_DEFER_SIZE; t++)
2023 		memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
2024 }
2025 
2026 static void
2027 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
2028     range_tree_t *rt)
2029 {
2030 	/*
2031 	 * This is modeled after space_map_histogram_add(), so refer to that
2032 	 * function for implementation details. We want this to work like
2033 	 * the space map histogram, and not the range tree histogram, as we
2034 	 * are essentially constructing a delta that will be later subtracted
2035 	 * from the space map histogram.
2036 	 */
2037 	int idx = 0;
2038 	for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
2039 		ASSERT3U(i, >=, idx + shift);
2040 		histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
2041 
2042 		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
2043 			ASSERT3U(idx + shift, ==, i);
2044 			idx++;
2045 			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
2046 		}
2047 	}
2048 }
2049 
2050 /*
2051  * Called at every sync pass that the metaslab gets synced.
2052  *
2053  * The reason is that we want our auxiliary histograms to be updated
2054  * wherever the metaslab's space map histogram is updated. This way
2055  * we stay consistent on which parts of the metaslab space map's
2056  * histogram are currently not available for allocations (e.g because
2057  * they are in the defer, freed, and freeing trees).
2058  */
2059 static void
2060 metaslab_aux_histograms_update(metaslab_t *msp)
2061 {
2062 	space_map_t *sm = msp->ms_sm;
2063 	ASSERT(sm != NULL);
2064 
2065 	/*
2066 	 * This is similar to the metaslab's space map histogram updates
2067 	 * that take place in metaslab_sync(). The only difference is that
2068 	 * we only care about segments that haven't made it into the
2069 	 * ms_allocatable tree yet.
2070 	 */
2071 	if (msp->ms_loaded) {
2072 		metaslab_aux_histograms_clear(msp);
2073 
2074 		metaslab_aux_histogram_add(msp->ms_synchist,
2075 		    sm->sm_shift, msp->ms_freed);
2076 
2077 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2078 			metaslab_aux_histogram_add(msp->ms_deferhist[t],
2079 			    sm->sm_shift, msp->ms_defer[t]);
2080 		}
2081 	}
2082 
2083 	metaslab_aux_histogram_add(msp->ms_synchist,
2084 	    sm->sm_shift, msp->ms_freeing);
2085 }
2086 
2087 /*
2088  * Called every time we are done syncing (writing to) the metaslab,
2089  * i.e. at the end of each sync pass.
2090  * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
2091  */
2092 static void
2093 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
2094 {
2095 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2096 	space_map_t *sm = msp->ms_sm;
2097 
2098 	if (sm == NULL) {
2099 		/*
2100 		 * We came here from metaslab_init() when creating/opening a
2101 		 * pool, looking at a metaslab that hasn't had any allocations
2102 		 * yet.
2103 		 */
2104 		return;
2105 	}
2106 
2107 	/*
2108 	 * This is similar to the actions that we take for the ms_freed
2109 	 * and ms_defer trees in metaslab_sync_done().
2110 	 */
2111 	uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
2112 	if (defer_allowed) {
2113 		memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
2114 		    sizeof (msp->ms_synchist));
2115 	} else {
2116 		memset(msp->ms_deferhist[hist_index], 0,
2117 		    sizeof (msp->ms_deferhist[hist_index]));
2118 	}
2119 	memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2120 }
2121 
2122 /*
2123  * Ensure that the metaslab's weight and fragmentation are consistent
2124  * with the contents of the histogram (either the range tree's histogram
2125  * or the space map's depending whether the metaslab is loaded).
2126  */
2127 static void
2128 metaslab_verify_weight_and_frag(metaslab_t *msp)
2129 {
2130 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2131 
2132 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2133 		return;
2134 
2135 	/*
2136 	 * We can end up here from vdev_remove_complete(), in which case we
2137 	 * cannot do these assertions because we hold spa config locks and
2138 	 * thus we are not allowed to read from the DMU.
2139 	 *
2140 	 * We check if the metaslab group has been removed and if that's
2141 	 * the case we return immediately as that would mean that we are
2142 	 * here from the aforementioned code path.
2143 	 */
2144 	if (msp->ms_group == NULL)
2145 		return;
2146 
2147 	/*
2148 	 * Devices being removed always return a weight of 0 and leave
2149 	 * fragmentation and ms_max_size as is - there is nothing for
2150 	 * us to verify here.
2151 	 */
2152 	vdev_t *vd = msp->ms_group->mg_vd;
2153 	if (vd->vdev_removing)
2154 		return;
2155 
2156 	/*
2157 	 * If the metaslab is dirty it probably means that we've done
2158 	 * some allocations or frees that have changed our histograms
2159 	 * and thus the weight.
2160 	 */
2161 	for (int t = 0; t < TXG_SIZE; t++) {
2162 		if (txg_list_member(&vd->vdev_ms_list, msp, t))
2163 			return;
2164 	}
2165 
2166 	/*
2167 	 * This verification checks that our in-memory state is consistent
2168 	 * with what's on disk. If the pool is read-only then there aren't
2169 	 * any changes and we just have the initially-loaded state.
2170 	 */
2171 	if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
2172 		return;
2173 
2174 	/* some extra verification for in-core tree if you can */
2175 	if (msp->ms_loaded) {
2176 		range_tree_stat_verify(msp->ms_allocatable);
2177 		VERIFY(space_map_histogram_verify(msp->ms_sm,
2178 		    msp->ms_allocatable));
2179 	}
2180 
2181 	uint64_t weight = msp->ms_weight;
2182 	uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2183 	boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
2184 	uint64_t frag = msp->ms_fragmentation;
2185 	uint64_t max_segsize = msp->ms_max_size;
2186 
2187 	msp->ms_weight = 0;
2188 	msp->ms_fragmentation = 0;
2189 
2190 	/*
2191 	 * This function is used for verification purposes and thus should
2192 	 * not introduce any side-effects/mutations on the system's state.
2193 	 *
2194 	 * Regardless of whether metaslab_weight() thinks this metaslab
2195 	 * should be active or not, we want to ensure that the actual weight
2196 	 * (and therefore the value of ms_weight) would be the same if it
2197 	 * was to be recalculated at this point.
2198 	 *
2199 	 * In addition we set the nodirty flag so metaslab_weight() does
2200 	 * not dirty the metaslab for future TXGs (e.g. when trying to
2201 	 * force condensing to upgrade the metaslab spacemaps).
2202 	 */
2203 	msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
2204 
2205 	VERIFY3U(max_segsize, ==, msp->ms_max_size);
2206 
2207 	/*
2208 	 * If the weight type changed then there is no point in doing
2209 	 * verification. Revert fields to their original values.
2210 	 */
2211 	if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
2212 	    (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
2213 		msp->ms_fragmentation = frag;
2214 		msp->ms_weight = weight;
2215 		return;
2216 	}
2217 
2218 	VERIFY3U(msp->ms_fragmentation, ==, frag);
2219 	VERIFY3U(msp->ms_weight, ==, weight);
2220 }
2221 
2222 /*
2223  * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
2224  * this class that was used longest ago, and attempt to unload it.  We don't
2225  * want to spend too much time in this loop to prevent performance
2226  * degradation, and we expect that most of the time this operation will
2227  * succeed. Between that and the normal unloading processing during txg sync,
2228  * we expect this to keep the metaslab memory usage under control.
2229  */
2230 static void
2231 metaslab_potentially_evict(metaslab_class_t *mc)
2232 {
2233 #ifdef _KERNEL
2234 	uint64_t allmem = arc_all_memory();
2235 	uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2236 	uint64_t size =	spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
2237 	uint_t tries = 0;
2238 	for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
2239 	    tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
2240 	    tries++) {
2241 		unsigned int idx = multilist_get_random_index(
2242 		    &mc->mc_metaslab_txg_list);
2243 		multilist_sublist_t *mls =
2244 		    multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx);
2245 		metaslab_t *msp = multilist_sublist_head(mls);
2246 		multilist_sublist_unlock(mls);
2247 		while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
2248 		    inuse * size) {
2249 			VERIFY3P(mls, ==, multilist_sublist_lock(
2250 			    &mc->mc_metaslab_txg_list, idx));
2251 			ASSERT3U(idx, ==,
2252 			    metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
2253 
2254 			if (!multilist_link_active(&msp->ms_class_txg_node)) {
2255 				multilist_sublist_unlock(mls);
2256 				break;
2257 			}
2258 			metaslab_t *next_msp = multilist_sublist_next(mls, msp);
2259 			multilist_sublist_unlock(mls);
2260 			/*
2261 			 * If the metaslab is currently loading there are two
2262 			 * cases. If it's the metaslab we're evicting, we
2263 			 * can't continue on or we'll panic when we attempt to
2264 			 * recursively lock the mutex. If it's another
2265 			 * metaslab that's loading, it can be safely skipped,
2266 			 * since we know it's very new and therefore not a
2267 			 * good eviction candidate. We check later once the
2268 			 * lock is held that the metaslab is fully loaded
2269 			 * before actually unloading it.
2270 			 */
2271 			if (msp->ms_loading) {
2272 				msp = next_msp;
2273 				inuse =
2274 				    spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2275 				continue;
2276 			}
2277 			/*
2278 			 * We can't unload metaslabs with no spacemap because
2279 			 * they're not ready to be unloaded yet. We can't
2280 			 * unload metaslabs with outstanding allocations
2281 			 * because doing so could cause the metaslab's weight
2282 			 * to decrease while it's unloaded, which violates an
2283 			 * invariant that we use to prevent unnecessary
2284 			 * loading. We also don't unload metaslabs that are
2285 			 * currently active because they are high-weight
2286 			 * metaslabs that are likely to be used in the near
2287 			 * future.
2288 			 */
2289 			mutex_enter(&msp->ms_lock);
2290 			if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
2291 			    msp->ms_allocating_total == 0) {
2292 				metaslab_unload(msp);
2293 			}
2294 			mutex_exit(&msp->ms_lock);
2295 			msp = next_msp;
2296 			inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2297 		}
2298 	}
2299 #else
2300 	(void) mc, (void) zfs_metaslab_mem_limit;
2301 #endif
2302 }
2303 
2304 static int
2305 metaslab_load_impl(metaslab_t *msp)
2306 {
2307 	int error = 0;
2308 
2309 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2310 	ASSERT(msp->ms_loading);
2311 	ASSERT(!msp->ms_condensing);
2312 
2313 	/*
2314 	 * We temporarily drop the lock to unblock other operations while we
2315 	 * are reading the space map. Therefore, metaslab_sync() and
2316 	 * metaslab_sync_done() can run at the same time as we do.
2317 	 *
2318 	 * If we are using the log space maps, metaslab_sync() can't write to
2319 	 * the metaslab's space map while we are loading as we only write to
2320 	 * it when we are flushing the metaslab, and that can't happen while
2321 	 * we are loading it.
2322 	 *
2323 	 * If we are not using log space maps though, metaslab_sync() can
2324 	 * append to the space map while we are loading. Therefore we load
2325 	 * only entries that existed when we started the load. Additionally,
2326 	 * metaslab_sync_done() has to wait for the load to complete because
2327 	 * there are potential races like metaslab_load() loading parts of the
2328 	 * space map that are currently being appended by metaslab_sync(). If
2329 	 * we didn't, the ms_allocatable would have entries that
2330 	 * metaslab_sync_done() would try to re-add later.
2331 	 *
2332 	 * That's why before dropping the lock we remember the synced length
2333 	 * of the metaslab and read up to that point of the space map,
2334 	 * ignoring entries appended by metaslab_sync() that happen after we
2335 	 * drop the lock.
2336 	 */
2337 	uint64_t length = msp->ms_synced_length;
2338 	mutex_exit(&msp->ms_lock);
2339 
2340 	hrtime_t load_start = gethrtime();
2341 	metaslab_rt_arg_t *mrap;
2342 	if (msp->ms_allocatable->rt_arg == NULL) {
2343 		mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2344 	} else {
2345 		mrap = msp->ms_allocatable->rt_arg;
2346 		msp->ms_allocatable->rt_ops = NULL;
2347 		msp->ms_allocatable->rt_arg = NULL;
2348 	}
2349 	mrap->mra_bt = &msp->ms_allocatable_by_size;
2350 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
2351 
2352 	if (msp->ms_sm != NULL) {
2353 		error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
2354 		    SM_FREE, length);
2355 
2356 		/* Now, populate the size-sorted tree. */
2357 		metaslab_rt_create(msp->ms_allocatable, mrap);
2358 		msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2359 		msp->ms_allocatable->rt_arg = mrap;
2360 
2361 		struct mssa_arg arg = {0};
2362 		arg.rt = msp->ms_allocatable;
2363 		arg.mra = mrap;
2364 		range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
2365 		    &arg);
2366 	} else {
2367 		/*
2368 		 * Add the size-sorted tree first, since we don't need to load
2369 		 * the metaslab from the spacemap.
2370 		 */
2371 		metaslab_rt_create(msp->ms_allocatable, mrap);
2372 		msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2373 		msp->ms_allocatable->rt_arg = mrap;
2374 		/*
2375 		 * The space map has not been allocated yet, so treat
2376 		 * all the space in the metaslab as free and add it to the
2377 		 * ms_allocatable tree.
2378 		 */
2379 		range_tree_add(msp->ms_allocatable,
2380 		    msp->ms_start, msp->ms_size);
2381 
2382 		if (msp->ms_new) {
2383 			/*
2384 			 * If the ms_sm doesn't exist, this means that this
2385 			 * metaslab hasn't gone through metaslab_sync() and
2386 			 * thus has never been dirtied. So we shouldn't
2387 			 * expect any unflushed allocs or frees from previous
2388 			 * TXGs.
2389 			 */
2390 			ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
2391 			ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
2392 		}
2393 	}
2394 
2395 	/*
2396 	 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
2397 	 * changing the ms_sm (or log_sm) and the metaslab's range trees
2398 	 * while we are about to use them and populate the ms_allocatable.
2399 	 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2400 	 * hold the ms_lock while writing the ms_checkpointing tree to disk.
2401 	 */
2402 	mutex_enter(&msp->ms_sync_lock);
2403 	mutex_enter(&msp->ms_lock);
2404 
2405 	ASSERT(!msp->ms_condensing);
2406 	ASSERT(!msp->ms_flushing);
2407 
2408 	if (error != 0) {
2409 		mutex_exit(&msp->ms_sync_lock);
2410 		return (error);
2411 	}
2412 
2413 	ASSERT3P(msp->ms_group, !=, NULL);
2414 	msp->ms_loaded = B_TRUE;
2415 
2416 	/*
2417 	 * Apply all the unflushed changes to ms_allocatable right
2418 	 * away so any manipulations we do below have a clear view
2419 	 * of what is allocated and what is free.
2420 	 */
2421 	range_tree_walk(msp->ms_unflushed_allocs,
2422 	    range_tree_remove, msp->ms_allocatable);
2423 	range_tree_walk(msp->ms_unflushed_frees,
2424 	    range_tree_add, msp->ms_allocatable);
2425 
2426 	ASSERT3P(msp->ms_group, !=, NULL);
2427 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2428 	if (spa_syncing_log_sm(spa) != NULL) {
2429 		ASSERT(spa_feature_is_enabled(spa,
2430 		    SPA_FEATURE_LOG_SPACEMAP));
2431 
2432 		/*
2433 		 * If we use a log space map we add all the segments
2434 		 * that are in ms_unflushed_frees so they are available
2435 		 * for allocation.
2436 		 *
2437 		 * ms_allocatable needs to contain all free segments
2438 		 * that are ready for allocations (thus not segments
2439 		 * from ms_freeing, ms_freed, and the ms_defer trees).
2440 		 * But if we grab the lock in this code path at a sync
2441 		 * pass later that 1, then it also contains the
2442 		 * segments of ms_freed (they were added to it earlier
2443 		 * in this path through ms_unflushed_frees). So we
2444 		 * need to remove all the segments that exist in
2445 		 * ms_freed from ms_allocatable as they will be added
2446 		 * later in metaslab_sync_done().
2447 		 *
2448 		 * When there's no log space map, the ms_allocatable
2449 		 * correctly doesn't contain any segments that exist
2450 		 * in ms_freed [see ms_synced_length].
2451 		 */
2452 		range_tree_walk(msp->ms_freed,
2453 		    range_tree_remove, msp->ms_allocatable);
2454 	}
2455 
2456 	/*
2457 	 * If we are not using the log space map, ms_allocatable
2458 	 * contains the segments that exist in the ms_defer trees
2459 	 * [see ms_synced_length]. Thus we need to remove them
2460 	 * from ms_allocatable as they will be added again in
2461 	 * metaslab_sync_done().
2462 	 *
2463 	 * If we are using the log space map, ms_allocatable still
2464 	 * contains the segments that exist in the ms_defer trees.
2465 	 * Not because it read them through the ms_sm though. But
2466 	 * because these segments are part of ms_unflushed_frees
2467 	 * whose segments we add to ms_allocatable earlier in this
2468 	 * code path.
2469 	 */
2470 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2471 		range_tree_walk(msp->ms_defer[t],
2472 		    range_tree_remove, msp->ms_allocatable);
2473 	}
2474 
2475 	/*
2476 	 * Call metaslab_recalculate_weight_and_sort() now that the
2477 	 * metaslab is loaded so we get the metaslab's real weight.
2478 	 *
2479 	 * Unless this metaslab was created with older software and
2480 	 * has not yet been converted to use segment-based weight, we
2481 	 * expect the new weight to be better or equal to the weight
2482 	 * that the metaslab had while it was not loaded. This is
2483 	 * because the old weight does not take into account the
2484 	 * consolidation of adjacent segments between TXGs. [see
2485 	 * comment for ms_synchist and ms_deferhist[] for more info]
2486 	 */
2487 	uint64_t weight = msp->ms_weight;
2488 	uint64_t max_size = msp->ms_max_size;
2489 	metaslab_recalculate_weight_and_sort(msp);
2490 	if (!WEIGHT_IS_SPACEBASED(weight))
2491 		ASSERT3U(weight, <=, msp->ms_weight);
2492 	msp->ms_max_size = metaslab_largest_allocatable(msp);
2493 	ASSERT3U(max_size, <=, msp->ms_max_size);
2494 	hrtime_t load_end = gethrtime();
2495 	msp->ms_load_time = load_end;
2496 	zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2497 	    "ms_id %llu, smp_length %llu, "
2498 	    "unflushed_allocs %llu, unflushed_frees %llu, "
2499 	    "freed %llu, defer %llu + %llu, unloaded time %llu ms, "
2500 	    "loading_time %lld ms, ms_max_size %llu, "
2501 	    "max size error %lld, "
2502 	    "old_weight %llx, new_weight %llx",
2503 	    (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2504 	    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2505 	    (u_longlong_t)msp->ms_id,
2506 	    (u_longlong_t)space_map_length(msp->ms_sm),
2507 	    (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
2508 	    (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
2509 	    (u_longlong_t)range_tree_space(msp->ms_freed),
2510 	    (u_longlong_t)range_tree_space(msp->ms_defer[0]),
2511 	    (u_longlong_t)range_tree_space(msp->ms_defer[1]),
2512 	    (longlong_t)((load_start - msp->ms_unload_time) / 1000000),
2513 	    (longlong_t)((load_end - load_start) / 1000000),
2514 	    (u_longlong_t)msp->ms_max_size,
2515 	    (u_longlong_t)msp->ms_max_size - max_size,
2516 	    (u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
2517 
2518 	metaslab_verify_space(msp, spa_syncing_txg(spa));
2519 	mutex_exit(&msp->ms_sync_lock);
2520 	return (0);
2521 }
2522 
2523 int
2524 metaslab_load(metaslab_t *msp)
2525 {
2526 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2527 
2528 	/*
2529 	 * There may be another thread loading the same metaslab, if that's
2530 	 * the case just wait until the other thread is done and return.
2531 	 */
2532 	metaslab_load_wait(msp);
2533 	if (msp->ms_loaded)
2534 		return (0);
2535 	VERIFY(!msp->ms_loading);
2536 	ASSERT(!msp->ms_condensing);
2537 
2538 	/*
2539 	 * We set the loading flag BEFORE potentially dropping the lock to
2540 	 * wait for an ongoing flush (see ms_flushing below). This way other
2541 	 * threads know that there is already a thread that is loading this
2542 	 * metaslab.
2543 	 */
2544 	msp->ms_loading = B_TRUE;
2545 
2546 	/*
2547 	 * Wait for any in-progress flushing to finish as we drop the ms_lock
2548 	 * both here (during space_map_load()) and in metaslab_flush() (when
2549 	 * we flush our changes to the ms_sm).
2550 	 */
2551 	if (msp->ms_flushing)
2552 		metaslab_flush_wait(msp);
2553 
2554 	/*
2555 	 * In the possibility that we were waiting for the metaslab to be
2556 	 * flushed (where we temporarily dropped the ms_lock), ensure that
2557 	 * no one else loaded the metaslab somehow.
2558 	 */
2559 	ASSERT(!msp->ms_loaded);
2560 
2561 	/*
2562 	 * If we're loading a metaslab in the normal class, consider evicting
2563 	 * another one to keep our memory usage under the limit defined by the
2564 	 * zfs_metaslab_mem_limit tunable.
2565 	 */
2566 	if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
2567 	    msp->ms_group->mg_class) {
2568 		metaslab_potentially_evict(msp->ms_group->mg_class);
2569 	}
2570 
2571 	int error = metaslab_load_impl(msp);
2572 
2573 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2574 	msp->ms_loading = B_FALSE;
2575 	cv_broadcast(&msp->ms_load_cv);
2576 
2577 	return (error);
2578 }
2579 
2580 void
2581 metaslab_unload(metaslab_t *msp)
2582 {
2583 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2584 
2585 	/*
2586 	 * This can happen if a metaslab is selected for eviction (in
2587 	 * metaslab_potentially_evict) and then unloaded during spa_sync (via
2588 	 * metaslab_class_evict_old).
2589 	 */
2590 	if (!msp->ms_loaded)
2591 		return;
2592 
2593 	range_tree_vacate(msp->ms_allocatable, NULL, NULL);
2594 	msp->ms_loaded = B_FALSE;
2595 	msp->ms_unload_time = gethrtime();
2596 
2597 	msp->ms_activation_weight = 0;
2598 	msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
2599 
2600 	if (msp->ms_group != NULL) {
2601 		metaslab_class_t *mc = msp->ms_group->mg_class;
2602 		multilist_sublist_t *mls =
2603 		    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2604 		if (multilist_link_active(&msp->ms_class_txg_node))
2605 			multilist_sublist_remove(mls, msp);
2606 		multilist_sublist_unlock(mls);
2607 
2608 		spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2609 		zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2610 		    "ms_id %llu, weight %llx, "
2611 		    "selected txg %llu (%llu ms ago), alloc_txg %llu, "
2612 		    "loaded %llu ms ago, max_size %llu",
2613 		    (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2614 		    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2615 		    (u_longlong_t)msp->ms_id,
2616 		    (u_longlong_t)msp->ms_weight,
2617 		    (u_longlong_t)msp->ms_selected_txg,
2618 		    (u_longlong_t)(msp->ms_unload_time -
2619 		    msp->ms_selected_time) / 1000 / 1000,
2620 		    (u_longlong_t)msp->ms_alloc_txg,
2621 		    (u_longlong_t)(msp->ms_unload_time -
2622 		    msp->ms_load_time) / 1000 / 1000,
2623 		    (u_longlong_t)msp->ms_max_size);
2624 	}
2625 
2626 	/*
2627 	 * We explicitly recalculate the metaslab's weight based on its space
2628 	 * map (as it is now not loaded). We want unload metaslabs to always
2629 	 * have their weights calculated from the space map histograms, while
2630 	 * loaded ones have it calculated from their in-core range tree
2631 	 * [see metaslab_load()]. This way, the weight reflects the information
2632 	 * available in-core, whether it is loaded or not.
2633 	 *
2634 	 * If ms_group == NULL means that we came here from metaslab_fini(),
2635 	 * at which point it doesn't make sense for us to do the recalculation
2636 	 * and the sorting.
2637 	 */
2638 	if (msp->ms_group != NULL)
2639 		metaslab_recalculate_weight_and_sort(msp);
2640 }
2641 
2642 /*
2643  * We want to optimize the memory use of the per-metaslab range
2644  * trees. To do this, we store the segments in the range trees in
2645  * units of sectors, zero-indexing from the start of the metaslab. If
2646  * the vdev_ms_shift - the vdev_ashift is less than 32, we can store
2647  * the ranges using two uint32_ts, rather than two uint64_ts.
2648  */
2649 range_seg_type_t
2650 metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
2651     uint64_t *start, uint64_t *shift)
2652 {
2653 	if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
2654 	    !zfs_metaslab_force_large_segs) {
2655 		*shift = vdev->vdev_ashift;
2656 		*start = msp->ms_start;
2657 		return (RANGE_SEG32);
2658 	} else {
2659 		*shift = 0;
2660 		*start = 0;
2661 		return (RANGE_SEG64);
2662 	}
2663 }
2664 
2665 void
2666 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
2667 {
2668 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2669 	metaslab_class_t *mc = msp->ms_group->mg_class;
2670 	multilist_sublist_t *mls =
2671 	    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2672 	if (multilist_link_active(&msp->ms_class_txg_node))
2673 		multilist_sublist_remove(mls, msp);
2674 	msp->ms_selected_txg = txg;
2675 	msp->ms_selected_time = gethrtime();
2676 	multilist_sublist_insert_tail(mls, msp);
2677 	multilist_sublist_unlock(mls);
2678 }
2679 
2680 void
2681 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
2682     int64_t defer_delta, int64_t space_delta)
2683 {
2684 	vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
2685 
2686 	ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
2687 	ASSERT(vd->vdev_ms_count != 0);
2688 
2689 	metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
2690 	    vdev_deflated_space(vd, space_delta));
2691 }
2692 
2693 int
2694 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
2695     uint64_t txg, metaslab_t **msp)
2696 {
2697 	vdev_t *vd = mg->mg_vd;
2698 	spa_t *spa = vd->vdev_spa;
2699 	objset_t *mos = spa->spa_meta_objset;
2700 	metaslab_t *ms;
2701 	int error;
2702 
2703 	ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2704 	mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2705 	mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2706 	cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2707 	cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2708 	multilist_link_init(&ms->ms_class_txg_node);
2709 
2710 	ms->ms_id = id;
2711 	ms->ms_start = id << vd->vdev_ms_shift;
2712 	ms->ms_size = 1ULL << vd->vdev_ms_shift;
2713 	ms->ms_allocator = -1;
2714 	ms->ms_new = B_TRUE;
2715 
2716 	vdev_ops_t *ops = vd->vdev_ops;
2717 	if (ops->vdev_op_metaslab_init != NULL)
2718 		ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
2719 
2720 	/*
2721 	 * We only open space map objects that already exist. All others
2722 	 * will be opened when we finally allocate an object for it. For
2723 	 * readonly pools there is no need to open the space map object.
2724 	 *
2725 	 * Note:
2726 	 * When called from vdev_expand(), we can't call into the DMU as
2727 	 * we are holding the spa_config_lock as a writer and we would
2728 	 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2729 	 * that case, the object parameter is zero though, so we won't
2730 	 * call into the DMU.
2731 	 */
2732 	if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
2733 	    !spa->spa_read_spacemaps)) {
2734 		error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2735 		    ms->ms_size, vd->vdev_ashift);
2736 
2737 		if (error != 0) {
2738 			kmem_free(ms, sizeof (metaslab_t));
2739 			return (error);
2740 		}
2741 
2742 		ASSERT(ms->ms_sm != NULL);
2743 		ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2744 	}
2745 
2746 	uint64_t shift, start;
2747 	range_seg_type_t type =
2748 	    metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
2749 
2750 	ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
2751 	for (int t = 0; t < TXG_SIZE; t++) {
2752 		ms->ms_allocating[t] = range_tree_create(NULL, type,
2753 		    NULL, start, shift);
2754 	}
2755 	ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
2756 	ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
2757 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2758 		ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
2759 		    start, shift);
2760 	}
2761 	ms->ms_checkpointing =
2762 	    range_tree_create(NULL, type, NULL, start, shift);
2763 	ms->ms_unflushed_allocs =
2764 	    range_tree_create(NULL, type, NULL, start, shift);
2765 
2766 	metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2767 	mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
2768 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
2769 	ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
2770 	    type, mrap, start, shift);
2771 
2772 	ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
2773 
2774 	metaslab_group_add(mg, ms);
2775 	metaslab_set_fragmentation(ms, B_FALSE);
2776 
2777 	/*
2778 	 * If we're opening an existing pool (txg == 0) or creating
2779 	 * a new one (txg == TXG_INITIAL), all space is available now.
2780 	 * If we're adding space to an existing pool, the new space
2781 	 * does not become available until after this txg has synced.
2782 	 * The metaslab's weight will also be initialized when we sync
2783 	 * out this txg. This ensures that we don't attempt to allocate
2784 	 * from it before we have initialized it completely.
2785 	 */
2786 	if (txg <= TXG_INITIAL) {
2787 		metaslab_sync_done(ms, 0);
2788 		metaslab_space_update(vd, mg->mg_class,
2789 		    metaslab_allocated_space(ms), 0, 0);
2790 	}
2791 
2792 	if (txg != 0) {
2793 		vdev_dirty(vd, 0, NULL, txg);
2794 		vdev_dirty(vd, VDD_METASLAB, ms, txg);
2795 	}
2796 
2797 	*msp = ms;
2798 
2799 	return (0);
2800 }
2801 
2802 static void
2803 metaslab_fini_flush_data(metaslab_t *msp)
2804 {
2805 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2806 
2807 	if (metaslab_unflushed_txg(msp) == 0) {
2808 		ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2809 		    ==, NULL);
2810 		return;
2811 	}
2812 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2813 
2814 	mutex_enter(&spa->spa_flushed_ms_lock);
2815 	avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2816 	mutex_exit(&spa->spa_flushed_ms_lock);
2817 
2818 	spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2819 	spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
2820 	    metaslab_unflushed_dirty(msp));
2821 }
2822 
2823 uint64_t
2824 metaslab_unflushed_changes_memused(metaslab_t *ms)
2825 {
2826 	return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
2827 	    range_tree_numsegs(ms->ms_unflushed_frees)) *
2828 	    ms->ms_unflushed_allocs->rt_root.bt_elem_size);
2829 }
2830 
2831 void
2832 metaslab_fini(metaslab_t *msp)
2833 {
2834 	metaslab_group_t *mg = msp->ms_group;
2835 	vdev_t *vd = mg->mg_vd;
2836 	spa_t *spa = vd->vdev_spa;
2837 
2838 	metaslab_fini_flush_data(msp);
2839 
2840 	metaslab_group_remove(mg, msp);
2841 
2842 	mutex_enter(&msp->ms_lock);
2843 	VERIFY(msp->ms_group == NULL);
2844 
2845 	/*
2846 	 * If this metaslab hasn't been through metaslab_sync_done() yet its
2847 	 * space hasn't been accounted for in its vdev and doesn't need to be
2848 	 * subtracted.
2849 	 */
2850 	if (!msp->ms_new) {
2851 		metaslab_space_update(vd, mg->mg_class,
2852 		    -metaslab_allocated_space(msp), 0, -msp->ms_size);
2853 
2854 	}
2855 	space_map_close(msp->ms_sm);
2856 	msp->ms_sm = NULL;
2857 
2858 	metaslab_unload(msp);
2859 
2860 	range_tree_destroy(msp->ms_allocatable);
2861 	range_tree_destroy(msp->ms_freeing);
2862 	range_tree_destroy(msp->ms_freed);
2863 
2864 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2865 	    metaslab_unflushed_changes_memused(msp));
2866 	spa->spa_unflushed_stats.sus_memused -=
2867 	    metaslab_unflushed_changes_memused(msp);
2868 	range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2869 	range_tree_destroy(msp->ms_unflushed_allocs);
2870 	range_tree_destroy(msp->ms_checkpointing);
2871 	range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2872 	range_tree_destroy(msp->ms_unflushed_frees);
2873 
2874 	for (int t = 0; t < TXG_SIZE; t++) {
2875 		range_tree_destroy(msp->ms_allocating[t]);
2876 	}
2877 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2878 		range_tree_destroy(msp->ms_defer[t]);
2879 	}
2880 	ASSERT0(msp->ms_deferspace);
2881 
2882 	for (int t = 0; t < TXG_SIZE; t++)
2883 		ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2884 
2885 	range_tree_vacate(msp->ms_trim, NULL, NULL);
2886 	range_tree_destroy(msp->ms_trim);
2887 
2888 	mutex_exit(&msp->ms_lock);
2889 	cv_destroy(&msp->ms_load_cv);
2890 	cv_destroy(&msp->ms_flush_cv);
2891 	mutex_destroy(&msp->ms_lock);
2892 	mutex_destroy(&msp->ms_sync_lock);
2893 	ASSERT3U(msp->ms_allocator, ==, -1);
2894 
2895 	kmem_free(msp, sizeof (metaslab_t));
2896 }
2897 
2898 #define	FRAGMENTATION_TABLE_SIZE	17
2899 
2900 /*
2901  * This table defines a segment size based fragmentation metric that will
2902  * allow each metaslab to derive its own fragmentation value. This is done
2903  * by calculating the space in each bucket of the spacemap histogram and
2904  * multiplying that by the fragmentation metric in this table. Doing
2905  * this for all buckets and dividing it by the total amount of free
2906  * space in this metaslab (i.e. the total free space in all buckets) gives
2907  * us the fragmentation metric. This means that a high fragmentation metric
2908  * equates to most of the free space being comprised of small segments.
2909  * Conversely, if the metric is low, then most of the free space is in
2910  * large segments. A 10% change in fragmentation equates to approximately
2911  * double the number of segments.
2912  *
2913  * This table defines 0% fragmented space using 16MB segments. Testing has
2914  * shown that segments that are greater than or equal to 16MB do not suffer
2915  * from drastic performance problems. Using this value, we derive the rest
2916  * of the table. Since the fragmentation value is never stored on disk, it
2917  * is possible to change these calculations in the future.
2918  */
2919 static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
2920 	100,	/* 512B	*/
2921 	100,	/* 1K	*/
2922 	98,	/* 2K	*/
2923 	95,	/* 4K	*/
2924 	90,	/* 8K	*/
2925 	80,	/* 16K	*/
2926 	70,	/* 32K	*/
2927 	60,	/* 64K	*/
2928 	50,	/* 128K	*/
2929 	40,	/* 256K	*/
2930 	30,	/* 512K	*/
2931 	20,	/* 1M	*/
2932 	15,	/* 2M	*/
2933 	10,	/* 4M	*/
2934 	5,	/* 8M	*/
2935 	0	/* 16M	*/
2936 };
2937 
2938 /*
2939  * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2940  * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2941  * been upgraded and does not support this metric. Otherwise, the return
2942  * value should be in the range [0, 100].
2943  */
2944 static void
2945 metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
2946 {
2947 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2948 	uint64_t fragmentation = 0;
2949 	uint64_t total = 0;
2950 	boolean_t feature_enabled = spa_feature_is_enabled(spa,
2951 	    SPA_FEATURE_SPACEMAP_HISTOGRAM);
2952 
2953 	if (!feature_enabled) {
2954 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
2955 		return;
2956 	}
2957 
2958 	/*
2959 	 * A null space map means that the entire metaslab is free
2960 	 * and thus is not fragmented.
2961 	 */
2962 	if (msp->ms_sm == NULL) {
2963 		msp->ms_fragmentation = 0;
2964 		return;
2965 	}
2966 
2967 	/*
2968 	 * If this metaslab's space map has not been upgraded, flag it
2969 	 * so that we upgrade next time we encounter it.
2970 	 */
2971 	if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
2972 		uint64_t txg = spa_syncing_txg(spa);
2973 		vdev_t *vd = msp->ms_group->mg_vd;
2974 
2975 		/*
2976 		 * If we've reached the final dirty txg, then we must
2977 		 * be shutting down the pool. We don't want to dirty
2978 		 * any data past this point so skip setting the condense
2979 		 * flag. We can retry this action the next time the pool
2980 		 * is imported. We also skip marking this metaslab for
2981 		 * condensing if the caller has explicitly set nodirty.
2982 		 */
2983 		if (!nodirty &&
2984 		    spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2985 			msp->ms_condense_wanted = B_TRUE;
2986 			vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2987 			zfs_dbgmsg("txg %llu, requesting force condense: "
2988 			    "ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
2989 			    (u_longlong_t)msp->ms_id,
2990 			    (u_longlong_t)vd->vdev_id);
2991 		}
2992 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
2993 		return;
2994 	}
2995 
2996 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2997 		uint64_t space = 0;
2998 		uint8_t shift = msp->ms_sm->sm_shift;
2999 
3000 		int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
3001 		    FRAGMENTATION_TABLE_SIZE - 1);
3002 
3003 		if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
3004 			continue;
3005 
3006 		space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
3007 		total += space;
3008 
3009 		ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
3010 		fragmentation += space * zfs_frag_table[idx];
3011 	}
3012 
3013 	if (total > 0)
3014 		fragmentation /= total;
3015 	ASSERT3U(fragmentation, <=, 100);
3016 
3017 	msp->ms_fragmentation = fragmentation;
3018 }
3019 
3020 /*
3021  * Compute a weight -- a selection preference value -- for the given metaslab.
3022  * This is based on the amount of free space, the level of fragmentation,
3023  * the LBA range, and whether the metaslab is loaded.
3024  */
3025 static uint64_t
3026 metaslab_space_weight(metaslab_t *msp)
3027 {
3028 	metaslab_group_t *mg = msp->ms_group;
3029 	vdev_t *vd = mg->mg_vd;
3030 	uint64_t weight, space;
3031 
3032 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3033 
3034 	/*
3035 	 * The baseline weight is the metaslab's free space.
3036 	 */
3037 	space = msp->ms_size - metaslab_allocated_space(msp);
3038 
3039 	if (metaslab_fragmentation_factor_enabled &&
3040 	    msp->ms_fragmentation != ZFS_FRAG_INVALID) {
3041 		/*
3042 		 * Use the fragmentation information to inversely scale
3043 		 * down the baseline weight. We need to ensure that we
3044 		 * don't exclude this metaslab completely when it's 100%
3045 		 * fragmented. To avoid this we reduce the fragmented value
3046 		 * by 1.
3047 		 */
3048 		space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
3049 
3050 		/*
3051 		 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
3052 		 * this metaslab again. The fragmentation metric may have
3053 		 * decreased the space to something smaller than
3054 		 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
3055 		 * so that we can consume any remaining space.
3056 		 */
3057 		if (space > 0 && space < SPA_MINBLOCKSIZE)
3058 			space = SPA_MINBLOCKSIZE;
3059 	}
3060 	weight = space;
3061 
3062 	/*
3063 	 * Modern disks have uniform bit density and constant angular velocity.
3064 	 * Therefore, the outer recording zones are faster (higher bandwidth)
3065 	 * than the inner zones by the ratio of outer to inner track diameter,
3066 	 * which is typically around 2:1.  We account for this by assigning
3067 	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
3068 	 * In effect, this means that we'll select the metaslab with the most
3069 	 * free bandwidth rather than simply the one with the most free space.
3070 	 */
3071 	if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
3072 		weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
3073 		ASSERT(weight >= space && weight <= 2 * space);
3074 	}
3075 
3076 	/*
3077 	 * If this metaslab is one we're actively using, adjust its
3078 	 * weight to make it preferable to any inactive metaslab so
3079 	 * we'll polish it off. If the fragmentation on this metaslab
3080 	 * has exceed our threshold, then don't mark it active.
3081 	 */
3082 	if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
3083 	    msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
3084 		weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
3085 	}
3086 
3087 	WEIGHT_SET_SPACEBASED(weight);
3088 	return (weight);
3089 }
3090 
3091 /*
3092  * Return the weight of the specified metaslab, according to the segment-based
3093  * weighting algorithm. The metaslab must be loaded. This function can
3094  * be called within a sync pass since it relies only on the metaslab's
3095  * range tree which is always accurate when the metaslab is loaded.
3096  */
3097 static uint64_t
3098 metaslab_weight_from_range_tree(metaslab_t *msp)
3099 {
3100 	uint64_t weight = 0;
3101 	uint32_t segments = 0;
3102 
3103 	ASSERT(msp->ms_loaded);
3104 
3105 	for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
3106 	    i--) {
3107 		uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
3108 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3109 
3110 		segments <<= 1;
3111 		segments += msp->ms_allocatable->rt_histogram[i];
3112 
3113 		/*
3114 		 * The range tree provides more precision than the space map
3115 		 * and must be downgraded so that all values fit within the
3116 		 * space map's histogram. This allows us to compare loaded
3117 		 * vs. unloaded metaslabs to determine which metaslab is
3118 		 * considered "best".
3119 		 */
3120 		if (i > max_idx)
3121 			continue;
3122 
3123 		if (segments != 0) {
3124 			WEIGHT_SET_COUNT(weight, segments);
3125 			WEIGHT_SET_INDEX(weight, i);
3126 			WEIGHT_SET_ACTIVE(weight, 0);
3127 			break;
3128 		}
3129 	}
3130 	return (weight);
3131 }
3132 
3133 /*
3134  * Calculate the weight based on the on-disk histogram. Should be applied
3135  * only to unloaded metaslabs  (i.e no incoming allocations) in-order to
3136  * give results consistent with the on-disk state
3137  */
3138 static uint64_t
3139 metaslab_weight_from_spacemap(metaslab_t *msp)
3140 {
3141 	space_map_t *sm = msp->ms_sm;
3142 	ASSERT(!msp->ms_loaded);
3143 	ASSERT(sm != NULL);
3144 	ASSERT3U(space_map_object(sm), !=, 0);
3145 	ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3146 
3147 	/*
3148 	 * Create a joint histogram from all the segments that have made
3149 	 * it to the metaslab's space map histogram, that are not yet
3150 	 * available for allocation because they are still in the freeing
3151 	 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
3152 	 * these segments from the space map's histogram to get a more
3153 	 * accurate weight.
3154 	 */
3155 	uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
3156 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
3157 		deferspace_histogram[i] += msp->ms_synchist[i];
3158 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3159 		for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3160 			deferspace_histogram[i] += msp->ms_deferhist[t][i];
3161 		}
3162 	}
3163 
3164 	uint64_t weight = 0;
3165 	for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
3166 		ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
3167 		    deferspace_histogram[i]);
3168 		uint64_t count =
3169 		    sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
3170 		if (count != 0) {
3171 			WEIGHT_SET_COUNT(weight, count);
3172 			WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
3173 			WEIGHT_SET_ACTIVE(weight, 0);
3174 			break;
3175 		}
3176 	}
3177 	return (weight);
3178 }
3179 
3180 /*
3181  * Compute a segment-based weight for the specified metaslab. The weight
3182  * is determined by highest bucket in the histogram. The information
3183  * for the highest bucket is encoded into the weight value.
3184  */
3185 static uint64_t
3186 metaslab_segment_weight(metaslab_t *msp)
3187 {
3188 	metaslab_group_t *mg = msp->ms_group;
3189 	uint64_t weight = 0;
3190 	uint8_t shift = mg->mg_vd->vdev_ashift;
3191 
3192 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3193 
3194 	/*
3195 	 * The metaslab is completely free.
3196 	 */
3197 	if (metaslab_allocated_space(msp) == 0) {
3198 		int idx = highbit64(msp->ms_size) - 1;
3199 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3200 
3201 		if (idx < max_idx) {
3202 			WEIGHT_SET_COUNT(weight, 1ULL);
3203 			WEIGHT_SET_INDEX(weight, idx);
3204 		} else {
3205 			WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
3206 			WEIGHT_SET_INDEX(weight, max_idx);
3207 		}
3208 		WEIGHT_SET_ACTIVE(weight, 0);
3209 		ASSERT(!WEIGHT_IS_SPACEBASED(weight));
3210 		return (weight);
3211 	}
3212 
3213 	ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3214 
3215 	/*
3216 	 * If the metaslab is fully allocated then just make the weight 0.
3217 	 */
3218 	if (metaslab_allocated_space(msp) == msp->ms_size)
3219 		return (0);
3220 	/*
3221 	 * If the metaslab is already loaded, then use the range tree to
3222 	 * determine the weight. Otherwise, we rely on the space map information
3223 	 * to generate the weight.
3224 	 */
3225 	if (msp->ms_loaded) {
3226 		weight = metaslab_weight_from_range_tree(msp);
3227 	} else {
3228 		weight = metaslab_weight_from_spacemap(msp);
3229 	}
3230 
3231 	/*
3232 	 * If the metaslab was active the last time we calculated its weight
3233 	 * then keep it active. We want to consume the entire region that
3234 	 * is associated with this weight.
3235 	 */
3236 	if (msp->ms_activation_weight != 0 && weight != 0)
3237 		WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
3238 	return (weight);
3239 }
3240 
3241 /*
3242  * Determine if we should attempt to allocate from this metaslab. If the
3243  * metaslab is loaded, then we can determine if the desired allocation
3244  * can be satisfied by looking at the size of the maximum free segment
3245  * on that metaslab. Otherwise, we make our decision based on the metaslab's
3246  * weight. For segment-based weighting we can determine the maximum
3247  * allocation based on the index encoded in its value. For space-based
3248  * weights we rely on the entire weight (excluding the weight-type bit).
3249  */
3250 static boolean_t
3251 metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
3252 {
3253 	/*
3254 	 * If the metaslab is loaded, ms_max_size is definitive and we can use
3255 	 * the fast check. If it's not, the ms_max_size is a lower bound (once
3256 	 * set), and we should use the fast check as long as we're not in
3257 	 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec
3258 	 * seconds since the metaslab was unloaded.
3259 	 */
3260 	if (msp->ms_loaded ||
3261 	    (msp->ms_max_size != 0 && !try_hard && gethrtime() <
3262 	    msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
3263 		return (msp->ms_max_size >= asize);
3264 
3265 	boolean_t should_allocate;
3266 	if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3267 		/*
3268 		 * The metaslab segment weight indicates segments in the
3269 		 * range [2^i, 2^(i+1)), where i is the index in the weight.
3270 		 * Since the asize might be in the middle of the range, we
3271 		 * should attempt the allocation if asize < 2^(i+1).
3272 		 */
3273 		should_allocate = (asize <
3274 		    1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
3275 	} else {
3276 		should_allocate = (asize <=
3277 		    (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
3278 	}
3279 
3280 	return (should_allocate);
3281 }
3282 
3283 static uint64_t
3284 metaslab_weight(metaslab_t *msp, boolean_t nodirty)
3285 {
3286 	vdev_t *vd = msp->ms_group->mg_vd;
3287 	spa_t *spa = vd->vdev_spa;
3288 	uint64_t weight;
3289 
3290 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3291 
3292 	metaslab_set_fragmentation(msp, nodirty);
3293 
3294 	/*
3295 	 * Update the maximum size. If the metaslab is loaded, this will
3296 	 * ensure that we get an accurate maximum size if newly freed space
3297 	 * has been added back into the free tree. If the metaslab is
3298 	 * unloaded, we check if there's a larger free segment in the
3299 	 * unflushed frees. This is a lower bound on the largest allocatable
3300 	 * segment size. Coalescing of adjacent entries may reveal larger
3301 	 * allocatable segments, but we aren't aware of those until loading
3302 	 * the space map into a range tree.
3303 	 */
3304 	if (msp->ms_loaded) {
3305 		msp->ms_max_size = metaslab_largest_allocatable(msp);
3306 	} else {
3307 		msp->ms_max_size = MAX(msp->ms_max_size,
3308 		    metaslab_largest_unflushed_free(msp));
3309 	}
3310 
3311 	/*
3312 	 * Segment-based weighting requires space map histogram support.
3313 	 */
3314 	if (zfs_metaslab_segment_weight_enabled &&
3315 	    spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
3316 	    (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
3317 	    sizeof (space_map_phys_t))) {
3318 		weight = metaslab_segment_weight(msp);
3319 	} else {
3320 		weight = metaslab_space_weight(msp);
3321 	}
3322 	return (weight);
3323 }
3324 
3325 void
3326 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
3327 {
3328 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3329 
3330 	/* note: we preserve the mask (e.g. indication of primary, etc..) */
3331 	uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
3332 	metaslab_group_sort(msp->ms_group, msp,
3333 	    metaslab_weight(msp, B_FALSE) | was_active);
3334 }
3335 
3336 static int
3337 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3338     int allocator, uint64_t activation_weight)
3339 {
3340 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
3341 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3342 
3343 	/*
3344 	 * If we're activating for the claim code, we don't want to actually
3345 	 * set the metaslab up for a specific allocator.
3346 	 */
3347 	if (activation_weight == METASLAB_WEIGHT_CLAIM) {
3348 		ASSERT0(msp->ms_activation_weight);
3349 		msp->ms_activation_weight = msp->ms_weight;
3350 		metaslab_group_sort(mg, msp, msp->ms_weight |
3351 		    activation_weight);
3352 		return (0);
3353 	}
3354 
3355 	metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
3356 	    &mga->mga_primary : &mga->mga_secondary);
3357 
3358 	mutex_enter(&mg->mg_lock);
3359 	if (*mspp != NULL) {
3360 		mutex_exit(&mg->mg_lock);
3361 		return (EEXIST);
3362 	}
3363 
3364 	*mspp = msp;
3365 	ASSERT3S(msp->ms_allocator, ==, -1);
3366 	msp->ms_allocator = allocator;
3367 	msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
3368 
3369 	ASSERT0(msp->ms_activation_weight);
3370 	msp->ms_activation_weight = msp->ms_weight;
3371 	metaslab_group_sort_impl(mg, msp,
3372 	    msp->ms_weight | activation_weight);
3373 	mutex_exit(&mg->mg_lock);
3374 
3375 	return (0);
3376 }
3377 
3378 static int
3379 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
3380 {
3381 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3382 
3383 	/*
3384 	 * The current metaslab is already activated for us so there
3385 	 * is nothing to do. Already activated though, doesn't mean
3386 	 * that this metaslab is activated for our allocator nor our
3387 	 * requested activation weight. The metaslab could have started
3388 	 * as an active one for our allocator but changed allocators
3389 	 * while we were waiting to grab its ms_lock or we stole it
3390 	 * [see find_valid_metaslab()]. This means that there is a
3391 	 * possibility of passivating a metaslab of another allocator
3392 	 * or from a different activation mask, from this thread.
3393 	 */
3394 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3395 		ASSERT(msp->ms_loaded);
3396 		return (0);
3397 	}
3398 
3399 	int error = metaslab_load(msp);
3400 	if (error != 0) {
3401 		metaslab_group_sort(msp->ms_group, msp, 0);
3402 		return (error);
3403 	}
3404 
3405 	/*
3406 	 * When entering metaslab_load() we may have dropped the
3407 	 * ms_lock because we were loading this metaslab, or we
3408 	 * were waiting for another thread to load it for us. In
3409 	 * that scenario, we recheck the weight of the metaslab
3410 	 * to see if it was activated by another thread.
3411 	 *
3412 	 * If the metaslab was activated for another allocator or
3413 	 * it was activated with a different activation weight (e.g.
3414 	 * we wanted to make it a primary but it was activated as
3415 	 * secondary) we return error (EBUSY).
3416 	 *
3417 	 * If the metaslab was activated for the same allocator
3418 	 * and requested activation mask, skip activating it.
3419 	 */
3420 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3421 		if (msp->ms_allocator != allocator)
3422 			return (EBUSY);
3423 
3424 		if ((msp->ms_weight & activation_weight) == 0)
3425 			return (SET_ERROR(EBUSY));
3426 
3427 		EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
3428 		    msp->ms_primary);
3429 		return (0);
3430 	}
3431 
3432 	/*
3433 	 * If the metaslab has literally 0 space, it will have weight 0. In
3434 	 * that case, don't bother activating it. This can happen if the
3435 	 * metaslab had space during find_valid_metaslab, but another thread
3436 	 * loaded it and used all that space while we were waiting to grab the
3437 	 * lock.
3438 	 */
3439 	if (msp->ms_weight == 0) {
3440 		ASSERT0(range_tree_space(msp->ms_allocatable));
3441 		return (SET_ERROR(ENOSPC));
3442 	}
3443 
3444 	if ((error = metaslab_activate_allocator(msp->ms_group, msp,
3445 	    allocator, activation_weight)) != 0) {
3446 		return (error);
3447 	}
3448 
3449 	ASSERT(msp->ms_loaded);
3450 	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
3451 
3452 	return (0);
3453 }
3454 
3455 static void
3456 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3457     uint64_t weight)
3458 {
3459 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3460 	ASSERT(msp->ms_loaded);
3461 
3462 	if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
3463 		metaslab_group_sort(mg, msp, weight);
3464 		return;
3465 	}
3466 
3467 	mutex_enter(&mg->mg_lock);
3468 	ASSERT3P(msp->ms_group, ==, mg);
3469 	ASSERT3S(0, <=, msp->ms_allocator);
3470 	ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
3471 
3472 	metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
3473 	if (msp->ms_primary) {
3474 		ASSERT3P(mga->mga_primary, ==, msp);
3475 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
3476 		mga->mga_primary = NULL;
3477 	} else {
3478 		ASSERT3P(mga->mga_secondary, ==, msp);
3479 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
3480 		mga->mga_secondary = NULL;
3481 	}
3482 	msp->ms_allocator = -1;
3483 	metaslab_group_sort_impl(mg, msp, weight);
3484 	mutex_exit(&mg->mg_lock);
3485 }
3486 
3487 static void
3488 metaslab_passivate(metaslab_t *msp, uint64_t weight)
3489 {
3490 	uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
3491 
3492 	/*
3493 	 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
3494 	 * this metaslab again.  In that case, it had better be empty,
3495 	 * or we would be leaving space on the table.
3496 	 */
3497 	ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
3498 	    size >= SPA_MINBLOCKSIZE ||
3499 	    range_tree_space(msp->ms_allocatable) == 0);
3500 	ASSERT0(weight & METASLAB_ACTIVE_MASK);
3501 
3502 	ASSERT(msp->ms_activation_weight != 0);
3503 	msp->ms_activation_weight = 0;
3504 	metaslab_passivate_allocator(msp->ms_group, msp, weight);
3505 	ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
3506 }
3507 
3508 /*
3509  * Segment-based metaslabs are activated once and remain active until
3510  * we either fail an allocation attempt (similar to space-based metaslabs)
3511  * or have exhausted the free space in zfs_metaslab_switch_threshold
3512  * buckets since the metaslab was activated. This function checks to see
3513  * if we've exhausted the zfs_metaslab_switch_threshold buckets in the
3514  * metaslab and passivates it proactively. This will allow us to select a
3515  * metaslab with a larger contiguous region, if any, remaining within this
3516  * metaslab group. If we're in sync pass > 1, then we continue using this
3517  * metaslab so that we don't dirty more block and cause more sync passes.
3518  */
3519 static void
3520 metaslab_segment_may_passivate(metaslab_t *msp)
3521 {
3522 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3523 
3524 	if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
3525 		return;
3526 
3527 	/*
3528 	 * Since we are in the middle of a sync pass, the most accurate
3529 	 * information that is accessible to us is the in-core range tree
3530 	 * histogram; calculate the new weight based on that information.
3531 	 */
3532 	uint64_t weight = metaslab_weight_from_range_tree(msp);
3533 	int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
3534 	int current_idx = WEIGHT_GET_INDEX(weight);
3535 
3536 	if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
3537 		metaslab_passivate(msp, weight);
3538 }
3539 
3540 static void
3541 metaslab_preload(void *arg)
3542 {
3543 	metaslab_t *msp = arg;
3544 	metaslab_class_t *mc = msp->ms_group->mg_class;
3545 	spa_t *spa = mc->mc_spa;
3546 	fstrans_cookie_t cookie = spl_fstrans_mark();
3547 
3548 	ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
3549 
3550 	mutex_enter(&msp->ms_lock);
3551 	(void) metaslab_load(msp);
3552 	metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
3553 	mutex_exit(&msp->ms_lock);
3554 	spl_fstrans_unmark(cookie);
3555 }
3556 
3557 static void
3558 metaslab_group_preload(metaslab_group_t *mg)
3559 {
3560 	spa_t *spa = mg->mg_vd->vdev_spa;
3561 	metaslab_t *msp;
3562 	avl_tree_t *t = &mg->mg_metaslab_tree;
3563 	int m = 0;
3564 
3565 	if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
3566 		taskq_wait_outstanding(mg->mg_taskq, 0);
3567 		return;
3568 	}
3569 
3570 	mutex_enter(&mg->mg_lock);
3571 
3572 	/*
3573 	 * Load the next potential metaslabs
3574 	 */
3575 	for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
3576 		ASSERT3P(msp->ms_group, ==, mg);
3577 
3578 		/*
3579 		 * We preload only the maximum number of metaslabs specified
3580 		 * by metaslab_preload_limit. If a metaslab is being forced
3581 		 * to condense then we preload it too. This will ensure
3582 		 * that force condensing happens in the next txg.
3583 		 */
3584 		if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
3585 			continue;
3586 		}
3587 
3588 		VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
3589 		    msp, TQ_SLEEP) != TASKQID_INVALID);
3590 	}
3591 	mutex_exit(&mg->mg_lock);
3592 }
3593 
3594 /*
3595  * Determine if the space map's on-disk footprint is past our tolerance for
3596  * inefficiency. We would like to use the following criteria to make our
3597  * decision:
3598  *
3599  * 1. Do not condense if the size of the space map object would dramatically
3600  *    increase as a result of writing out the free space range tree.
3601  *
3602  * 2. Condense if the on on-disk space map representation is at least
3603  *    zfs_condense_pct/100 times the size of the optimal representation
3604  *    (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
3605  *
3606  * 3. Do not condense if the on-disk size of the space map does not actually
3607  *    decrease.
3608  *
3609  * Unfortunately, we cannot compute the on-disk size of the space map in this
3610  * context because we cannot accurately compute the effects of compression, etc.
3611  * Instead, we apply the heuristic described in the block comment for
3612  * zfs_metaslab_condense_block_threshold - we only condense if the space used
3613  * is greater than a threshold number of blocks.
3614  */
3615 static boolean_t
3616 metaslab_should_condense(metaslab_t *msp)
3617 {
3618 	space_map_t *sm = msp->ms_sm;
3619 	vdev_t *vd = msp->ms_group->mg_vd;
3620 	uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
3621 
3622 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3623 	ASSERT(msp->ms_loaded);
3624 	ASSERT(sm != NULL);
3625 	ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
3626 
3627 	/*
3628 	 * We always condense metaslabs that are empty and metaslabs for
3629 	 * which a condense request has been made.
3630 	 */
3631 	if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
3632 	    msp->ms_condense_wanted)
3633 		return (B_TRUE);
3634 
3635 	uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
3636 	uint64_t object_size = space_map_length(sm);
3637 	uint64_t optimal_size = space_map_estimate_optimal_size(sm,
3638 	    msp->ms_allocatable, SM_NO_VDEVID);
3639 
3640 	return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
3641 	    object_size > zfs_metaslab_condense_block_threshold * record_size);
3642 }
3643 
3644 /*
3645  * Condense the on-disk space map representation to its minimized form.
3646  * The minimized form consists of a small number of allocations followed
3647  * by the entries of the free range tree (ms_allocatable). The condensed
3648  * spacemap contains all the entries of previous TXGs (including those in
3649  * the pool-wide log spacemaps; thus this is effectively a superset of
3650  * metaslab_flush()), but this TXG's entries still need to be written.
3651  */
3652 static void
3653 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
3654 {
3655 	range_tree_t *condense_tree;
3656 	space_map_t *sm = msp->ms_sm;
3657 	uint64_t txg = dmu_tx_get_txg(tx);
3658 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3659 
3660 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3661 	ASSERT(msp->ms_loaded);
3662 	ASSERT(msp->ms_sm != NULL);
3663 
3664 	/*
3665 	 * In order to condense the space map, we need to change it so it
3666 	 * only describes which segments are currently allocated and free.
3667 	 *
3668 	 * All the current free space resides in the ms_allocatable, all
3669 	 * the ms_defer trees, and all the ms_allocating trees. We ignore
3670 	 * ms_freed because it is empty because we're in sync pass 1. We
3671 	 * ignore ms_freeing because these changes are not yet reflected
3672 	 * in the spacemap (they will be written later this txg).
3673 	 *
3674 	 * So to truncate the space map to represent all the entries of
3675 	 * previous TXGs we do the following:
3676 	 *
3677 	 * 1] We create a range tree (condense tree) that is 100% empty.
3678 	 * 2] We add to it all segments found in the ms_defer trees
3679 	 *    as those segments are marked as free in the original space
3680 	 *    map. We do the same with the ms_allocating trees for the same
3681 	 *    reason. Adding these segments should be a relatively
3682 	 *    inexpensive operation since we expect these trees to have a
3683 	 *    small number of nodes.
3684 	 * 3] We vacate any unflushed allocs, since they are not frees we
3685 	 *    need to add to the condense tree. Then we vacate any
3686 	 *    unflushed frees as they should already be part of ms_allocatable.
3687 	 * 4] At this point, we would ideally like to add all segments
3688 	 *    in the ms_allocatable tree from the condense tree. This way
3689 	 *    we would write all the entries of the condense tree as the
3690 	 *    condensed space map, which would only contain freed
3691 	 *    segments with everything else assumed to be allocated.
3692 	 *
3693 	 *    Doing so can be prohibitively expensive as ms_allocatable can
3694 	 *    be large, and therefore computationally expensive to add to
3695 	 *    the condense_tree. Instead we first sync out an entry marking
3696 	 *    everything as allocated, then the condense_tree and then the
3697 	 *    ms_allocatable, in the condensed space map. While this is not
3698 	 *    optimal, it is typically close to optimal and more importantly
3699 	 *    much cheaper to compute.
3700 	 *
3701 	 * 5] Finally, as both of the unflushed trees were written to our
3702 	 *    new and condensed metaslab space map, we basically flushed
3703 	 *    all the unflushed changes to disk, thus we call
3704 	 *    metaslab_flush_update().
3705 	 */
3706 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3707 	ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
3708 
3709 	zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
3710 	    "spa %s, smp size %llu, segments %llu, forcing condense=%s",
3711 	    (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
3712 	    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
3713 	    spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
3714 	    (u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
3715 	    msp->ms_condense_wanted ? "TRUE" : "FALSE");
3716 
3717 	msp->ms_condense_wanted = B_FALSE;
3718 
3719 	range_seg_type_t type;
3720 	uint64_t shift, start;
3721 	type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
3722 	    &start, &shift);
3723 
3724 	condense_tree = range_tree_create(NULL, type, NULL, start, shift);
3725 
3726 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3727 		range_tree_walk(msp->ms_defer[t],
3728 		    range_tree_add, condense_tree);
3729 	}
3730 
3731 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
3732 		range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3733 		    range_tree_add, condense_tree);
3734 	}
3735 
3736 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3737 	    metaslab_unflushed_changes_memused(msp));
3738 	spa->spa_unflushed_stats.sus_memused -=
3739 	    metaslab_unflushed_changes_memused(msp);
3740 	range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3741 	range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3742 
3743 	/*
3744 	 * We're about to drop the metaslab's lock thus allowing other
3745 	 * consumers to change it's content. Set the metaslab's ms_condensing
3746 	 * flag to ensure that allocations on this metaslab do not occur
3747 	 * while we're in the middle of committing it to disk. This is only
3748 	 * critical for ms_allocatable as all other range trees use per TXG
3749 	 * views of their content.
3750 	 */
3751 	msp->ms_condensing = B_TRUE;
3752 
3753 	mutex_exit(&msp->ms_lock);
3754 	uint64_t object = space_map_object(msp->ms_sm);
3755 	space_map_truncate(sm,
3756 	    spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3757 	    zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3758 
3759 	/*
3760 	 * space_map_truncate() may have reallocated the spacemap object.
3761 	 * If so, update the vdev_ms_array.
3762 	 */
3763 	if (space_map_object(msp->ms_sm) != object) {
3764 		object = space_map_object(msp->ms_sm);
3765 		dmu_write(spa->spa_meta_objset,
3766 		    msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3767 		    msp->ms_id, sizeof (uint64_t), &object, tx);
3768 	}
3769 
3770 	/*
3771 	 * Note:
3772 	 * When the log space map feature is enabled, each space map will
3773 	 * always have ALLOCS followed by FREES for each sync pass. This is
3774 	 * typically true even when the log space map feature is disabled,
3775 	 * except from the case where a metaslab goes through metaslab_sync()
3776 	 * and gets condensed. In that case the metaslab's space map will have
3777 	 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3778 	 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3779 	 * sync pass 1.
3780 	 */
3781 	range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
3782 	    shift);
3783 	range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
3784 	space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3785 	space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3786 	space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
3787 
3788 	range_tree_vacate(condense_tree, NULL, NULL);
3789 	range_tree_destroy(condense_tree);
3790 	range_tree_vacate(tmp_tree, NULL, NULL);
3791 	range_tree_destroy(tmp_tree);
3792 	mutex_enter(&msp->ms_lock);
3793 
3794 	msp->ms_condensing = B_FALSE;
3795 	metaslab_flush_update(msp, tx);
3796 }
3797 
3798 static void
3799 metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
3800 {
3801 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3802 	ASSERT(spa_syncing_log_sm(spa) != NULL);
3803 	ASSERT(msp->ms_sm != NULL);
3804 	ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3805 	ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3806 
3807 	mutex_enter(&spa->spa_flushed_ms_lock);
3808 	metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3809 	metaslab_set_unflushed_dirty(msp, B_TRUE);
3810 	avl_add(&spa->spa_metaslabs_by_flushed, msp);
3811 	mutex_exit(&spa->spa_flushed_ms_lock);
3812 
3813 	spa_log_sm_increment_current_mscount(spa);
3814 	spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
3815 }
3816 
3817 void
3818 metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
3819 {
3820 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3821 	ASSERT(spa_syncing_log_sm(spa) != NULL);
3822 	ASSERT(msp->ms_sm != NULL);
3823 	ASSERT(metaslab_unflushed_txg(msp) != 0);
3824 	ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3825 	ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3826 	ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3827 
3828 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3829 
3830 	/* update metaslab's position in our flushing tree */
3831 	uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3832 	boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
3833 	mutex_enter(&spa->spa_flushed_ms_lock);
3834 	avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3835 	metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3836 	metaslab_set_unflushed_dirty(msp, dirty);
3837 	avl_add(&spa->spa_metaslabs_by_flushed, msp);
3838 	mutex_exit(&spa->spa_flushed_ms_lock);
3839 
3840 	/* update metaslab counts of spa_log_sm_t nodes */
3841 	spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
3842 	spa_log_sm_increment_current_mscount(spa);
3843 
3844 	/* update log space map summary */
3845 	spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
3846 	    ms_prev_flushed_dirty);
3847 	spa_log_summary_add_flushed_metaslab(spa, dirty);
3848 
3849 	/* cleanup obsolete logs if any */
3850 	spa_cleanup_old_sm_logs(spa, tx);
3851 }
3852 
3853 /*
3854  * Called when the metaslab has been flushed (its own spacemap now reflects
3855  * all the contents of the pool-wide spacemap log). Updates the metaslab's
3856  * metadata and any pool-wide related log space map data (e.g. summary,
3857  * obsolete logs, etc..) to reflect that.
3858  */
3859 static void
3860 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
3861 {
3862 	metaslab_group_t *mg = msp->ms_group;
3863 	spa_t *spa = mg->mg_vd->vdev_spa;
3864 
3865 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3866 
3867 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3868 
3869 	/*
3870 	 * Just because a metaslab got flushed, that doesn't mean that
3871 	 * it will pass through metaslab_sync_done(). Thus, make sure to
3872 	 * update ms_synced_length here in case it doesn't.
3873 	 */
3874 	msp->ms_synced_length = space_map_length(msp->ms_sm);
3875 
3876 	/*
3877 	 * We may end up here from metaslab_condense() without the
3878 	 * feature being active. In that case this is a no-op.
3879 	 */
3880 	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
3881 	    metaslab_unflushed_txg(msp) == 0)
3882 		return;
3883 
3884 	metaslab_unflushed_bump(msp, tx, B_FALSE);
3885 }
3886 
3887 boolean_t
3888 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
3889 {
3890 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3891 
3892 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3893 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3894 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3895 
3896 	ASSERT(msp->ms_sm != NULL);
3897 	ASSERT(metaslab_unflushed_txg(msp) != 0);
3898 	ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
3899 
3900 	/*
3901 	 * There is nothing wrong with flushing the same metaslab twice, as
3902 	 * this codepath should work on that case. However, the current
3903 	 * flushing scheme makes sure to avoid this situation as we would be
3904 	 * making all these calls without having anything meaningful to write
3905 	 * to disk. We assert this behavior here.
3906 	 */
3907 	ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
3908 
3909 	/*
3910 	 * We can not flush while loading, because then we would
3911 	 * not load the ms_unflushed_{allocs,frees}.
3912 	 */
3913 	if (msp->ms_loading)
3914 		return (B_FALSE);
3915 
3916 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3917 	metaslab_verify_weight_and_frag(msp);
3918 
3919 	/*
3920 	 * Metaslab condensing is effectively flushing. Therefore if the
3921 	 * metaslab can be condensed we can just condense it instead of
3922 	 * flushing it.
3923 	 *
3924 	 * Note that metaslab_condense() does call metaslab_flush_update()
3925 	 * so we can just return immediately after condensing. We also
3926 	 * don't need to care about setting ms_flushing or broadcasting
3927 	 * ms_flush_cv, even if we temporarily drop the ms_lock in
3928 	 * metaslab_condense(), as the metaslab is already loaded.
3929 	 */
3930 	if (msp->ms_loaded && metaslab_should_condense(msp)) {
3931 		metaslab_group_t *mg = msp->ms_group;
3932 
3933 		/*
3934 		 * For all histogram operations below refer to the
3935 		 * comments of metaslab_sync() where we follow a
3936 		 * similar procedure.
3937 		 */
3938 		metaslab_group_histogram_verify(mg);
3939 		metaslab_class_histogram_verify(mg->mg_class);
3940 		metaslab_group_histogram_remove(mg, msp);
3941 
3942 		metaslab_condense(msp, tx);
3943 
3944 		space_map_histogram_clear(msp->ms_sm);
3945 		space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3946 		ASSERT(range_tree_is_empty(msp->ms_freed));
3947 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3948 			space_map_histogram_add(msp->ms_sm,
3949 			    msp->ms_defer[t], tx);
3950 		}
3951 		metaslab_aux_histograms_update(msp);
3952 
3953 		metaslab_group_histogram_add(mg, msp);
3954 		metaslab_group_histogram_verify(mg);
3955 		metaslab_class_histogram_verify(mg->mg_class);
3956 
3957 		metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3958 
3959 		/*
3960 		 * Since we recreated the histogram (and potentially
3961 		 * the ms_sm too while condensing) ensure that the
3962 		 * weight is updated too because we are not guaranteed
3963 		 * that this metaslab is dirty and will go through
3964 		 * metaslab_sync_done().
3965 		 */
3966 		metaslab_recalculate_weight_and_sort(msp);
3967 		return (B_TRUE);
3968 	}
3969 
3970 	msp->ms_flushing = B_TRUE;
3971 	uint64_t sm_len_before = space_map_length(msp->ms_sm);
3972 
3973 	mutex_exit(&msp->ms_lock);
3974 	space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
3975 	    SM_NO_VDEVID, tx);
3976 	space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
3977 	    SM_NO_VDEVID, tx);
3978 	mutex_enter(&msp->ms_lock);
3979 
3980 	uint64_t sm_len_after = space_map_length(msp->ms_sm);
3981 	if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
3982 		zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3983 		    "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
3984 		    "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
3985 		    spa_name(spa),
3986 		    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
3987 		    (u_longlong_t)msp->ms_id,
3988 		    (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
3989 		    (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
3990 		    (u_longlong_t)(sm_len_after - sm_len_before));
3991 	}
3992 
3993 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3994 	    metaslab_unflushed_changes_memused(msp));
3995 	spa->spa_unflushed_stats.sus_memused -=
3996 	    metaslab_unflushed_changes_memused(msp);
3997 	range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3998 	range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3999 
4000 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4001 	metaslab_verify_weight_and_frag(msp);
4002 
4003 	metaslab_flush_update(msp, tx);
4004 
4005 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4006 	metaslab_verify_weight_and_frag(msp);
4007 
4008 	msp->ms_flushing = B_FALSE;
4009 	cv_broadcast(&msp->ms_flush_cv);
4010 	return (B_TRUE);
4011 }
4012 
4013 /*
4014  * Write a metaslab to disk in the context of the specified transaction group.
4015  */
4016 void
4017 metaslab_sync(metaslab_t *msp, uint64_t txg)
4018 {
4019 	metaslab_group_t *mg = msp->ms_group;
4020 	vdev_t *vd = mg->mg_vd;
4021 	spa_t *spa = vd->vdev_spa;
4022 	objset_t *mos = spa_meta_objset(spa);
4023 	range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
4024 	dmu_tx_t *tx;
4025 
4026 	ASSERT(!vd->vdev_ishole);
4027 
4028 	/*
4029 	 * This metaslab has just been added so there's no work to do now.
4030 	 */
4031 	if (msp->ms_new) {
4032 		ASSERT0(range_tree_space(alloctree));
4033 		ASSERT0(range_tree_space(msp->ms_freeing));
4034 		ASSERT0(range_tree_space(msp->ms_freed));
4035 		ASSERT0(range_tree_space(msp->ms_checkpointing));
4036 		ASSERT0(range_tree_space(msp->ms_trim));
4037 		return;
4038 	}
4039 
4040 	/*
4041 	 * Normally, we don't want to process a metaslab if there are no
4042 	 * allocations or frees to perform. However, if the metaslab is being
4043 	 * forced to condense, it's loaded and we're not beyond the final
4044 	 * dirty txg, we need to let it through. Not condensing beyond the
4045 	 * final dirty txg prevents an issue where metaslabs that need to be
4046 	 * condensed but were loaded for other reasons could cause a panic
4047 	 * here. By only checking the txg in that branch of the conditional,
4048 	 * we preserve the utility of the VERIFY statements in all other
4049 	 * cases.
4050 	 */
4051 	if (range_tree_is_empty(alloctree) &&
4052 	    range_tree_is_empty(msp->ms_freeing) &&
4053 	    range_tree_is_empty(msp->ms_checkpointing) &&
4054 	    !(msp->ms_loaded && msp->ms_condense_wanted &&
4055 	    txg <= spa_final_dirty_txg(spa)))
4056 		return;
4057 
4058 
4059 	VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
4060 
4061 	/*
4062 	 * The only state that can actually be changing concurrently
4063 	 * with metaslab_sync() is the metaslab's ms_allocatable. No
4064 	 * other thread can be modifying this txg's alloc, freeing,
4065 	 * freed, or space_map_phys_t.  We drop ms_lock whenever we
4066 	 * could call into the DMU, because the DMU can call down to
4067 	 * us (e.g. via zio_free()) at any time.
4068 	 *
4069 	 * The spa_vdev_remove_thread() can be reading metaslab state
4070 	 * concurrently, and it is locked out by the ms_sync_lock.
4071 	 * Note that the ms_lock is insufficient for this, because it
4072 	 * is dropped by space_map_write().
4073 	 */
4074 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
4075 
4076 	/*
4077 	 * Generate a log space map if one doesn't exist already.
4078 	 */
4079 	spa_generate_syncing_log_sm(spa, tx);
4080 
4081 	if (msp->ms_sm == NULL) {
4082 		uint64_t new_object = space_map_alloc(mos,
4083 		    spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
4084 		    zfs_metaslab_sm_blksz_with_log :
4085 		    zfs_metaslab_sm_blksz_no_log, tx);
4086 		VERIFY3U(new_object, !=, 0);
4087 
4088 		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
4089 		    msp->ms_id, sizeof (uint64_t), &new_object, tx);
4090 
4091 		VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
4092 		    msp->ms_start, msp->ms_size, vd->vdev_ashift));
4093 		ASSERT(msp->ms_sm != NULL);
4094 
4095 		ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
4096 		ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
4097 		ASSERT0(metaslab_allocated_space(msp));
4098 	}
4099 
4100 	if (!range_tree_is_empty(msp->ms_checkpointing) &&
4101 	    vd->vdev_checkpoint_sm == NULL) {
4102 		ASSERT(spa_has_checkpoint(spa));
4103 
4104 		uint64_t new_object = space_map_alloc(mos,
4105 		    zfs_vdev_standard_sm_blksz, tx);
4106 		VERIFY3U(new_object, !=, 0);
4107 
4108 		VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
4109 		    mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
4110 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4111 
4112 		/*
4113 		 * We save the space map object as an entry in vdev_top_zap
4114 		 * so it can be retrieved when the pool is reopened after an
4115 		 * export or through zdb.
4116 		 */
4117 		VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
4118 		    vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
4119 		    sizeof (new_object), 1, &new_object, tx));
4120 	}
4121 
4122 	mutex_enter(&msp->ms_sync_lock);
4123 	mutex_enter(&msp->ms_lock);
4124 
4125 	/*
4126 	 * Note: metaslab_condense() clears the space map's histogram.
4127 	 * Therefore we must verify and remove this histogram before
4128 	 * condensing.
4129 	 */
4130 	metaslab_group_histogram_verify(mg);
4131 	metaslab_class_histogram_verify(mg->mg_class);
4132 	metaslab_group_histogram_remove(mg, msp);
4133 
4134 	if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
4135 	    metaslab_should_condense(msp))
4136 		metaslab_condense(msp, tx);
4137 
4138 	/*
4139 	 * We'll be going to disk to sync our space accounting, thus we
4140 	 * drop the ms_lock during that time so allocations coming from
4141 	 * open-context (ZIL) for future TXGs do not block.
4142 	 */
4143 	mutex_exit(&msp->ms_lock);
4144 	space_map_t *log_sm = spa_syncing_log_sm(spa);
4145 	if (log_sm != NULL) {
4146 		ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4147 		if (metaslab_unflushed_txg(msp) == 0)
4148 			metaslab_unflushed_add(msp, tx);
4149 		else if (!metaslab_unflushed_dirty(msp))
4150 			metaslab_unflushed_bump(msp, tx, B_TRUE);
4151 
4152 		space_map_write(log_sm, alloctree, SM_ALLOC,
4153 		    vd->vdev_id, tx);
4154 		space_map_write(log_sm, msp->ms_freeing, SM_FREE,
4155 		    vd->vdev_id, tx);
4156 		mutex_enter(&msp->ms_lock);
4157 
4158 		ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4159 		    metaslab_unflushed_changes_memused(msp));
4160 		spa->spa_unflushed_stats.sus_memused -=
4161 		    metaslab_unflushed_changes_memused(msp);
4162 		range_tree_remove_xor_add(alloctree,
4163 		    msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
4164 		range_tree_remove_xor_add(msp->ms_freeing,
4165 		    msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
4166 		spa->spa_unflushed_stats.sus_memused +=
4167 		    metaslab_unflushed_changes_memused(msp);
4168 	} else {
4169 		ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4170 
4171 		space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
4172 		    SM_NO_VDEVID, tx);
4173 		space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
4174 		    SM_NO_VDEVID, tx);
4175 		mutex_enter(&msp->ms_lock);
4176 	}
4177 
4178 	msp->ms_allocated_space += range_tree_space(alloctree);
4179 	ASSERT3U(msp->ms_allocated_space, >=,
4180 	    range_tree_space(msp->ms_freeing));
4181 	msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
4182 
4183 	if (!range_tree_is_empty(msp->ms_checkpointing)) {
4184 		ASSERT(spa_has_checkpoint(spa));
4185 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4186 
4187 		/*
4188 		 * Since we are doing writes to disk and the ms_checkpointing
4189 		 * tree won't be changing during that time, we drop the
4190 		 * ms_lock while writing to the checkpoint space map, for the
4191 		 * same reason mentioned above.
4192 		 */
4193 		mutex_exit(&msp->ms_lock);
4194 		space_map_write(vd->vdev_checkpoint_sm,
4195 		    msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
4196 		mutex_enter(&msp->ms_lock);
4197 
4198 		spa->spa_checkpoint_info.sci_dspace +=
4199 		    range_tree_space(msp->ms_checkpointing);
4200 		vd->vdev_stat.vs_checkpoint_space +=
4201 		    range_tree_space(msp->ms_checkpointing);
4202 		ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
4203 		    -space_map_allocated(vd->vdev_checkpoint_sm));
4204 
4205 		range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
4206 	}
4207 
4208 	if (msp->ms_loaded) {
4209 		/*
4210 		 * When the space map is loaded, we have an accurate
4211 		 * histogram in the range tree. This gives us an opportunity
4212 		 * to bring the space map's histogram up-to-date so we clear
4213 		 * it first before updating it.
4214 		 */
4215 		space_map_histogram_clear(msp->ms_sm);
4216 		space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4217 
4218 		/*
4219 		 * Since we've cleared the histogram we need to add back
4220 		 * any free space that has already been processed, plus
4221 		 * any deferred space. This allows the on-disk histogram
4222 		 * to accurately reflect all free space even if some space
4223 		 * is not yet available for allocation (i.e. deferred).
4224 		 */
4225 		space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4226 
4227 		/*
4228 		 * Add back any deferred free space that has not been
4229 		 * added back into the in-core free tree yet. This will
4230 		 * ensure that we don't end up with a space map histogram
4231 		 * that is completely empty unless the metaslab is fully
4232 		 * allocated.
4233 		 */
4234 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4235 			space_map_histogram_add(msp->ms_sm,
4236 			    msp->ms_defer[t], tx);
4237 		}
4238 	}
4239 
4240 	/*
4241 	 * Always add the free space from this sync pass to the space
4242 	 * map histogram. We want to make sure that the on-disk histogram
4243 	 * accounts for all free space. If the space map is not loaded,
4244 	 * then we will lose some accuracy but will correct it the next
4245 	 * time we load the space map.
4246 	 */
4247 	space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4248 	metaslab_aux_histograms_update(msp);
4249 
4250 	metaslab_group_histogram_add(mg, msp);
4251 	metaslab_group_histogram_verify(mg);
4252 	metaslab_class_histogram_verify(mg->mg_class);
4253 
4254 	/*
4255 	 * For sync pass 1, we avoid traversing this txg's free range tree
4256 	 * and instead will just swap the pointers for freeing and freed.
4257 	 * We can safely do this since the freed_tree is guaranteed to be
4258 	 * empty on the initial pass.
4259 	 *
4260 	 * Keep in mind that even if we are currently using a log spacemap
4261 	 * we want current frees to end up in the ms_allocatable (but not
4262 	 * get appended to the ms_sm) so their ranges can be reused as usual.
4263 	 */
4264 	if (spa_sync_pass(spa) == 1) {
4265 		range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
4266 		ASSERT0(msp->ms_allocated_this_txg);
4267 	} else {
4268 		range_tree_vacate(msp->ms_freeing,
4269 		    range_tree_add, msp->ms_freed);
4270 	}
4271 	msp->ms_allocated_this_txg += range_tree_space(alloctree);
4272 	range_tree_vacate(alloctree, NULL, NULL);
4273 
4274 	ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4275 	ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
4276 	    & TXG_MASK]));
4277 	ASSERT0(range_tree_space(msp->ms_freeing));
4278 	ASSERT0(range_tree_space(msp->ms_checkpointing));
4279 
4280 	mutex_exit(&msp->ms_lock);
4281 
4282 	/*
4283 	 * Verify that the space map object ID has been recorded in the
4284 	 * vdev_ms_array.
4285 	 */
4286 	uint64_t object;
4287 	VERIFY0(dmu_read(mos, vd->vdev_ms_array,
4288 	    msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
4289 	VERIFY3U(object, ==, space_map_object(msp->ms_sm));
4290 
4291 	mutex_exit(&msp->ms_sync_lock);
4292 	dmu_tx_commit(tx);
4293 }
4294 
4295 static void
4296 metaslab_evict(metaslab_t *msp, uint64_t txg)
4297 {
4298 	if (!msp->ms_loaded || msp->ms_disabled != 0)
4299 		return;
4300 
4301 	for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
4302 		VERIFY0(range_tree_space(
4303 		    msp->ms_allocating[(txg + t) & TXG_MASK]));
4304 	}
4305 	if (msp->ms_allocator != -1)
4306 		metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
4307 
4308 	if (!metaslab_debug_unload)
4309 		metaslab_unload(msp);
4310 }
4311 
4312 /*
4313  * Called after a transaction group has completely synced to mark
4314  * all of the metaslab's free space as usable.
4315  */
4316 void
4317 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
4318 {
4319 	metaslab_group_t *mg = msp->ms_group;
4320 	vdev_t *vd = mg->mg_vd;
4321 	spa_t *spa = vd->vdev_spa;
4322 	range_tree_t **defer_tree;
4323 	int64_t alloc_delta, defer_delta;
4324 	boolean_t defer_allowed = B_TRUE;
4325 
4326 	ASSERT(!vd->vdev_ishole);
4327 
4328 	mutex_enter(&msp->ms_lock);
4329 
4330 	if (msp->ms_new) {
4331 		/* this is a new metaslab, add its capacity to the vdev */
4332 		metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
4333 
4334 		/* there should be no allocations nor frees at this point */
4335 		VERIFY0(msp->ms_allocated_this_txg);
4336 		VERIFY0(range_tree_space(msp->ms_freed));
4337 	}
4338 
4339 	ASSERT0(range_tree_space(msp->ms_freeing));
4340 	ASSERT0(range_tree_space(msp->ms_checkpointing));
4341 
4342 	defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
4343 
4344 	uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4345 	    metaslab_class_get_alloc(spa_normal_class(spa));
4346 	if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
4347 		defer_allowed = B_FALSE;
4348 	}
4349 
4350 	defer_delta = 0;
4351 	alloc_delta = msp->ms_allocated_this_txg -
4352 	    range_tree_space(msp->ms_freed);
4353 
4354 	if (defer_allowed) {
4355 		defer_delta = range_tree_space(msp->ms_freed) -
4356 		    range_tree_space(*defer_tree);
4357 	} else {
4358 		defer_delta -= range_tree_space(*defer_tree);
4359 	}
4360 	metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
4361 	    defer_delta, 0);
4362 
4363 	if (spa_syncing_log_sm(spa) == NULL) {
4364 		/*
4365 		 * If there's a metaslab_load() in progress and we don't have
4366 		 * a log space map, it means that we probably wrote to the
4367 		 * metaslab's space map. If this is the case, we need to
4368 		 * make sure that we wait for the load to complete so that we
4369 		 * have a consistent view at the in-core side of the metaslab.
4370 		 */
4371 		metaslab_load_wait(msp);
4372 	} else {
4373 		ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4374 	}
4375 
4376 	/*
4377 	 * When auto-trimming is enabled, free ranges which are added to
4378 	 * ms_allocatable are also be added to ms_trim.  The ms_trim tree is
4379 	 * periodically consumed by the vdev_autotrim_thread() which issues
4380 	 * trims for all ranges and then vacates the tree.  The ms_trim tree
4381 	 * can be discarded at any time with the sole consequence of recent
4382 	 * frees not being trimmed.
4383 	 */
4384 	if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
4385 		range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
4386 		if (!defer_allowed) {
4387 			range_tree_walk(msp->ms_freed, range_tree_add,
4388 			    msp->ms_trim);
4389 		}
4390 	} else {
4391 		range_tree_vacate(msp->ms_trim, NULL, NULL);
4392 	}
4393 
4394 	/*
4395 	 * Move the frees from the defer_tree back to the free
4396 	 * range tree (if it's loaded). Swap the freed_tree and
4397 	 * the defer_tree -- this is safe to do because we've
4398 	 * just emptied out the defer_tree.
4399 	 */
4400 	range_tree_vacate(*defer_tree,
4401 	    msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
4402 	if (defer_allowed) {
4403 		range_tree_swap(&msp->ms_freed, defer_tree);
4404 	} else {
4405 		range_tree_vacate(msp->ms_freed,
4406 		    msp->ms_loaded ? range_tree_add : NULL,
4407 		    msp->ms_allocatable);
4408 	}
4409 
4410 	msp->ms_synced_length = space_map_length(msp->ms_sm);
4411 
4412 	msp->ms_deferspace += defer_delta;
4413 	ASSERT3S(msp->ms_deferspace, >=, 0);
4414 	ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
4415 	if (msp->ms_deferspace != 0) {
4416 		/*
4417 		 * Keep syncing this metaslab until all deferred frees
4418 		 * are back in circulation.
4419 		 */
4420 		vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
4421 	}
4422 	metaslab_aux_histograms_update_done(msp, defer_allowed);
4423 
4424 	if (msp->ms_new) {
4425 		msp->ms_new = B_FALSE;
4426 		mutex_enter(&mg->mg_lock);
4427 		mg->mg_ms_ready++;
4428 		mutex_exit(&mg->mg_lock);
4429 	}
4430 
4431 	/*
4432 	 * Re-sort metaslab within its group now that we've adjusted
4433 	 * its allocatable space.
4434 	 */
4435 	metaslab_recalculate_weight_and_sort(msp);
4436 
4437 	ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4438 	ASSERT0(range_tree_space(msp->ms_freeing));
4439 	ASSERT0(range_tree_space(msp->ms_freed));
4440 	ASSERT0(range_tree_space(msp->ms_checkpointing));
4441 	msp->ms_allocating_total -= msp->ms_allocated_this_txg;
4442 	msp->ms_allocated_this_txg = 0;
4443 	mutex_exit(&msp->ms_lock);
4444 }
4445 
4446 void
4447 metaslab_sync_reassess(metaslab_group_t *mg)
4448 {
4449 	spa_t *spa = mg->mg_class->mc_spa;
4450 
4451 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4452 	metaslab_group_alloc_update(mg);
4453 	mg->mg_fragmentation = metaslab_group_fragmentation(mg);
4454 
4455 	/*
4456 	 * Preload the next potential metaslabs but only on active
4457 	 * metaslab groups. We can get into a state where the metaslab
4458 	 * is no longer active since we dirty metaslabs as we remove a
4459 	 * a device, thus potentially making the metaslab group eligible
4460 	 * for preloading.
4461 	 */
4462 	if (mg->mg_activation_count > 0) {
4463 		metaslab_group_preload(mg);
4464 	}
4465 	spa_config_exit(spa, SCL_ALLOC, FTAG);
4466 }
4467 
4468 /*
4469  * When writing a ditto block (i.e. more than one DVA for a given BP) on
4470  * the same vdev as an existing DVA of this BP, then try to allocate it
4471  * on a different metaslab than existing DVAs (i.e. a unique metaslab).
4472  */
4473 static boolean_t
4474 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
4475 {
4476 	uint64_t dva_ms_id;
4477 
4478 	if (DVA_GET_ASIZE(dva) == 0)
4479 		return (B_TRUE);
4480 
4481 	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
4482 		return (B_TRUE);
4483 
4484 	dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
4485 
4486 	return (msp->ms_id != dva_ms_id);
4487 }
4488 
4489 /*
4490  * ==========================================================================
4491  * Metaslab allocation tracing facility
4492  * ==========================================================================
4493  */
4494 
4495 /*
4496  * Add an allocation trace element to the allocation tracing list.
4497  */
4498 static void
4499 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
4500     metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
4501     int allocator)
4502 {
4503 	metaslab_alloc_trace_t *mat;
4504 
4505 	if (!metaslab_trace_enabled)
4506 		return;
4507 
4508 	/*
4509 	 * When the tracing list reaches its maximum we remove
4510 	 * the second element in the list before adding a new one.
4511 	 * By removing the second element we preserve the original
4512 	 * entry as a clue to what allocations steps have already been
4513 	 * performed.
4514 	 */
4515 	if (zal->zal_size == metaslab_trace_max_entries) {
4516 		metaslab_alloc_trace_t *mat_next;
4517 #ifdef ZFS_DEBUG
4518 		panic("too many entries in allocation list");
4519 #endif
4520 		METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
4521 		zal->zal_size--;
4522 		mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
4523 		list_remove(&zal->zal_list, mat_next);
4524 		kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
4525 	}
4526 
4527 	mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
4528 	list_link_init(&mat->mat_list_node);
4529 	mat->mat_mg = mg;
4530 	mat->mat_msp = msp;
4531 	mat->mat_size = psize;
4532 	mat->mat_dva_id = dva_id;
4533 	mat->mat_offset = offset;
4534 	mat->mat_weight = 0;
4535 	mat->mat_allocator = allocator;
4536 
4537 	if (msp != NULL)
4538 		mat->mat_weight = msp->ms_weight;
4539 
4540 	/*
4541 	 * The list is part of the zio so locking is not required. Only
4542 	 * a single thread will perform allocations for a given zio.
4543 	 */
4544 	list_insert_tail(&zal->zal_list, mat);
4545 	zal->zal_size++;
4546 
4547 	ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
4548 }
4549 
4550 void
4551 metaslab_trace_init(zio_alloc_list_t *zal)
4552 {
4553 	list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
4554 	    offsetof(metaslab_alloc_trace_t, mat_list_node));
4555 	zal->zal_size = 0;
4556 }
4557 
4558 void
4559 metaslab_trace_fini(zio_alloc_list_t *zal)
4560 {
4561 	metaslab_alloc_trace_t *mat;
4562 
4563 	while ((mat = list_remove_head(&zal->zal_list)) != NULL)
4564 		kmem_cache_free(metaslab_alloc_trace_cache, mat);
4565 	list_destroy(&zal->zal_list);
4566 	zal->zal_size = 0;
4567 }
4568 
4569 /*
4570  * ==========================================================================
4571  * Metaslab block operations
4572  * ==========================================================================
4573  */
4574 
4575 static void
4576 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag,
4577     int flags, int allocator)
4578 {
4579 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
4580 	    (flags & METASLAB_DONT_THROTTLE))
4581 		return;
4582 
4583 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4584 	if (!mg->mg_class->mc_alloc_throttle_enabled)
4585 		return;
4586 
4587 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4588 	(void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
4589 }
4590 
4591 static void
4592 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
4593 {
4594 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4595 	metaslab_class_allocator_t *mca =
4596 	    &mg->mg_class->mc_allocator[allocator];
4597 	uint64_t max = mg->mg_max_alloc_queue_depth;
4598 	uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
4599 	while (cur < max) {
4600 		if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
4601 		    cur, cur + 1) == cur) {
4602 			atomic_inc_64(&mca->mca_alloc_max_slots);
4603 			return;
4604 		}
4605 		cur = mga->mga_cur_max_alloc_queue_depth;
4606 	}
4607 }
4608 
4609 void
4610 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag,
4611     int flags, int allocator, boolean_t io_complete)
4612 {
4613 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
4614 	    (flags & METASLAB_DONT_THROTTLE))
4615 		return;
4616 
4617 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4618 	if (!mg->mg_class->mc_alloc_throttle_enabled)
4619 		return;
4620 
4621 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4622 	(void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
4623 	if (io_complete)
4624 		metaslab_group_increment_qdepth(mg, allocator);
4625 }
4626 
4627 void
4628 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag,
4629     int allocator)
4630 {
4631 #ifdef ZFS_DEBUG
4632 	const dva_t *dva = bp->blk_dva;
4633 	int ndvas = BP_GET_NDVAS(bp);
4634 
4635 	for (int d = 0; d < ndvas; d++) {
4636 		uint64_t vdev = DVA_GET_VDEV(&dva[d]);
4637 		metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4638 		metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4639 		VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
4640 	}
4641 #endif
4642 }
4643 
4644 static uint64_t
4645 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
4646 {
4647 	uint64_t start;
4648 	range_tree_t *rt = msp->ms_allocatable;
4649 	metaslab_class_t *mc = msp->ms_group->mg_class;
4650 
4651 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4652 	VERIFY(!msp->ms_condensing);
4653 	VERIFY0(msp->ms_disabled);
4654 
4655 	start = mc->mc_ops->msop_alloc(msp, size);
4656 	if (start != -1ULL) {
4657 		metaslab_group_t *mg = msp->ms_group;
4658 		vdev_t *vd = mg->mg_vd;
4659 
4660 		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
4661 		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4662 		VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
4663 		range_tree_remove(rt, start, size);
4664 		range_tree_clear(msp->ms_trim, start, size);
4665 
4666 		if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4667 			vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
4668 
4669 		range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
4670 		msp->ms_allocating_total += size;
4671 
4672 		/* Track the last successful allocation */
4673 		msp->ms_alloc_txg = txg;
4674 		metaslab_verify_space(msp, txg);
4675 	}
4676 
4677 	/*
4678 	 * Now that we've attempted the allocation we need to update the
4679 	 * metaslab's maximum block size since it may have changed.
4680 	 */
4681 	msp->ms_max_size = metaslab_largest_allocatable(msp);
4682 	return (start);
4683 }
4684 
4685 /*
4686  * Find the metaslab with the highest weight that is less than what we've
4687  * already tried.  In the common case, this means that we will examine each
4688  * metaslab at most once. Note that concurrent callers could reorder metaslabs
4689  * by activation/passivation once we have dropped the mg_lock. If a metaslab is
4690  * activated by another thread, and we fail to allocate from the metaslab we
4691  * have selected, we may not try the newly-activated metaslab, and instead
4692  * activate another metaslab.  This is not optimal, but generally does not cause
4693  * any problems (a possible exception being if every metaslab is completely full
4694  * except for the newly-activated metaslab which we fail to examine).
4695  */
4696 static metaslab_t *
4697 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4698     dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
4699     boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
4700     boolean_t *was_active)
4701 {
4702 	avl_index_t idx;
4703 	avl_tree_t *t = &mg->mg_metaslab_tree;
4704 	metaslab_t *msp = avl_find(t, search, &idx);
4705 	if (msp == NULL)
4706 		msp = avl_nearest(t, idx, AVL_AFTER);
4707 
4708 	uint_t tries = 0;
4709 	for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4710 		int i;
4711 
4712 		if (!try_hard && tries > zfs_metaslab_find_max_tries) {
4713 			METASLABSTAT_BUMP(metaslabstat_too_many_tries);
4714 			return (NULL);
4715 		}
4716 		tries++;
4717 
4718 		if (!metaslab_should_allocate(msp, asize, try_hard)) {
4719 			metaslab_trace_add(zal, mg, msp, asize, d,
4720 			    TRACE_TOO_SMALL, allocator);
4721 			continue;
4722 		}
4723 
4724 		/*
4725 		 * If the selected metaslab is condensing or disabled,
4726 		 * skip it.
4727 		 */
4728 		if (msp->ms_condensing || msp->ms_disabled > 0)
4729 			continue;
4730 
4731 		*was_active = msp->ms_allocator != -1;
4732 		/*
4733 		 * If we're activating as primary, this is our first allocation
4734 		 * from this disk, so we don't need to check how close we are.
4735 		 * If the metaslab under consideration was already active,
4736 		 * we're getting desperate enough to steal another allocator's
4737 		 * metaslab, so we still don't care about distances.
4738 		 */
4739 		if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4740 			break;
4741 
4742 		for (i = 0; i < d; i++) {
4743 			if (want_unique &&
4744 			    !metaslab_is_unique(msp, &dva[i]))
4745 				break;  /* try another metaslab */
4746 		}
4747 		if (i == d)
4748 			break;
4749 	}
4750 
4751 	if (msp != NULL) {
4752 		search->ms_weight = msp->ms_weight;
4753 		search->ms_start = msp->ms_start + 1;
4754 		search->ms_allocator = msp->ms_allocator;
4755 		search->ms_primary = msp->ms_primary;
4756 	}
4757 	return (msp);
4758 }
4759 
4760 static void
4761 metaslab_active_mask_verify(metaslab_t *msp)
4762 {
4763 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4764 
4765 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4766 		return;
4767 
4768 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4769 		return;
4770 
4771 	if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4772 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4773 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4774 		VERIFY3S(msp->ms_allocator, !=, -1);
4775 		VERIFY(msp->ms_primary);
4776 		return;
4777 	}
4778 
4779 	if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4780 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4781 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4782 		VERIFY3S(msp->ms_allocator, !=, -1);
4783 		VERIFY(!msp->ms_primary);
4784 		return;
4785 	}
4786 
4787 	if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4788 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4789 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4790 		VERIFY3S(msp->ms_allocator, ==, -1);
4791 		return;
4792 	}
4793 }
4794 
4795 static uint64_t
4796 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
4797     uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
4798     int allocator, boolean_t try_hard)
4799 {
4800 	metaslab_t *msp = NULL;
4801 	uint64_t offset = -1ULL;
4802 
4803 	uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4804 	for (int i = 0; i < d; i++) {
4805 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4806 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4807 			activation_weight = METASLAB_WEIGHT_SECONDARY;
4808 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4809 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4810 			activation_weight = METASLAB_WEIGHT_CLAIM;
4811 			break;
4812 		}
4813 	}
4814 
4815 	/*
4816 	 * If we don't have enough metaslabs active to fill the entire array, we
4817 	 * just use the 0th slot.
4818 	 */
4819 	if (mg->mg_ms_ready < mg->mg_allocators * 3)
4820 		allocator = 0;
4821 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4822 
4823 	ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4824 
4825 	metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4826 	search->ms_weight = UINT64_MAX;
4827 	search->ms_start = 0;
4828 	/*
4829 	 * At the end of the metaslab tree are the already-active metaslabs,
4830 	 * first the primaries, then the secondaries. When we resume searching
4831 	 * through the tree, we need to consider ms_allocator and ms_primary so
4832 	 * we start in the location right after where we left off, and don't
4833 	 * accidentally loop forever considering the same metaslabs.
4834 	 */
4835 	search->ms_allocator = -1;
4836 	search->ms_primary = B_TRUE;
4837 	for (;;) {
4838 		boolean_t was_active = B_FALSE;
4839 
4840 		mutex_enter(&mg->mg_lock);
4841 
4842 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4843 		    mga->mga_primary != NULL) {
4844 			msp = mga->mga_primary;
4845 
4846 			/*
4847 			 * Even though we don't hold the ms_lock for the
4848 			 * primary metaslab, those fields should not
4849 			 * change while we hold the mg_lock. Thus it is
4850 			 * safe to make assertions on them.
4851 			 */
4852 			ASSERT(msp->ms_primary);
4853 			ASSERT3S(msp->ms_allocator, ==, allocator);
4854 			ASSERT(msp->ms_loaded);
4855 
4856 			was_active = B_TRUE;
4857 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4858 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4859 		    mga->mga_secondary != NULL) {
4860 			msp = mga->mga_secondary;
4861 
4862 			/*
4863 			 * See comment above about the similar assertions
4864 			 * for the primary metaslab.
4865 			 */
4866 			ASSERT(!msp->ms_primary);
4867 			ASSERT3S(msp->ms_allocator, ==, allocator);
4868 			ASSERT(msp->ms_loaded);
4869 
4870 			was_active = B_TRUE;
4871 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4872 		} else {
4873 			msp = find_valid_metaslab(mg, activation_weight, dva, d,
4874 			    want_unique, asize, allocator, try_hard, zal,
4875 			    search, &was_active);
4876 		}
4877 
4878 		mutex_exit(&mg->mg_lock);
4879 		if (msp == NULL) {
4880 			kmem_free(search, sizeof (*search));
4881 			return (-1ULL);
4882 		}
4883 		mutex_enter(&msp->ms_lock);
4884 
4885 		metaslab_active_mask_verify(msp);
4886 
4887 		/*
4888 		 * This code is disabled out because of issues with
4889 		 * tracepoints in non-gpl kernel modules.
4890 		 */
4891 #if 0
4892 		DTRACE_PROBE3(ms__activation__attempt,
4893 		    metaslab_t *, msp, uint64_t, activation_weight,
4894 		    boolean_t, was_active);
4895 #endif
4896 
4897 		/*
4898 		 * Ensure that the metaslab we have selected is still
4899 		 * capable of handling our request. It's possible that
4900 		 * another thread may have changed the weight while we
4901 		 * were blocked on the metaslab lock. We check the
4902 		 * active status first to see if we need to set_selected_txg
4903 		 * a new metaslab.
4904 		 */
4905 		if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
4906 			ASSERT3S(msp->ms_allocator, ==, -1);
4907 			mutex_exit(&msp->ms_lock);
4908 			continue;
4909 		}
4910 
4911 		/*
4912 		 * If the metaslab was activated for another allocator
4913 		 * while we were waiting in the ms_lock above, or it's
4914 		 * a primary and we're seeking a secondary (or vice versa),
4915 		 * we go back and select a new metaslab.
4916 		 */
4917 		if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
4918 		    (msp->ms_allocator != -1) &&
4919 		    (msp->ms_allocator != allocator || ((activation_weight ==
4920 		    METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
4921 			ASSERT(msp->ms_loaded);
4922 			ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
4923 			    msp->ms_allocator != -1);
4924 			mutex_exit(&msp->ms_lock);
4925 			continue;
4926 		}
4927 
4928 		/*
4929 		 * This metaslab was used for claiming regions allocated
4930 		 * by the ZIL during pool import. Once these regions are
4931 		 * claimed we don't need to keep the CLAIM bit set
4932 		 * anymore. Passivate this metaslab to zero its activation
4933 		 * mask.
4934 		 */
4935 		if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
4936 		    activation_weight != METASLAB_WEIGHT_CLAIM) {
4937 			ASSERT(msp->ms_loaded);
4938 			ASSERT3S(msp->ms_allocator, ==, -1);
4939 			metaslab_passivate(msp, msp->ms_weight &
4940 			    ~METASLAB_WEIGHT_CLAIM);
4941 			mutex_exit(&msp->ms_lock);
4942 			continue;
4943 		}
4944 
4945 		metaslab_set_selected_txg(msp, txg);
4946 
4947 		int activation_error =
4948 		    metaslab_activate(msp, allocator, activation_weight);
4949 		metaslab_active_mask_verify(msp);
4950 
4951 		/*
4952 		 * If the metaslab was activated by another thread for
4953 		 * another allocator or activation_weight (EBUSY), or it
4954 		 * failed because another metaslab was assigned as primary
4955 		 * for this allocator (EEXIST) we continue using this
4956 		 * metaslab for our allocation, rather than going on to a
4957 		 * worse metaslab (we waited for that metaslab to be loaded
4958 		 * after all).
4959 		 *
4960 		 * If the activation failed due to an I/O error or ENOSPC we
4961 		 * skip to the next metaslab.
4962 		 */
4963 		boolean_t activated;
4964 		if (activation_error == 0) {
4965 			activated = B_TRUE;
4966 		} else if (activation_error == EBUSY ||
4967 		    activation_error == EEXIST) {
4968 			activated = B_FALSE;
4969 		} else {
4970 			mutex_exit(&msp->ms_lock);
4971 			continue;
4972 		}
4973 		ASSERT(msp->ms_loaded);
4974 
4975 		/*
4976 		 * Now that we have the lock, recheck to see if we should
4977 		 * continue to use this metaslab for this allocation. The
4978 		 * the metaslab is now loaded so metaslab_should_allocate()
4979 		 * can accurately determine if the allocation attempt should
4980 		 * proceed.
4981 		 */
4982 		if (!metaslab_should_allocate(msp, asize, try_hard)) {
4983 			/* Passivate this metaslab and select a new one. */
4984 			metaslab_trace_add(zal, mg, msp, asize, d,
4985 			    TRACE_TOO_SMALL, allocator);
4986 			goto next;
4987 		}
4988 
4989 		/*
4990 		 * If this metaslab is currently condensing then pick again
4991 		 * as we can't manipulate this metaslab until it's committed
4992 		 * to disk. If this metaslab is being initialized, we shouldn't
4993 		 * allocate from it since the allocated region might be
4994 		 * overwritten after allocation.
4995 		 */
4996 		if (msp->ms_condensing) {
4997 			metaslab_trace_add(zal, mg, msp, asize, d,
4998 			    TRACE_CONDENSING, allocator);
4999 			if (activated) {
5000 				metaslab_passivate(msp, msp->ms_weight &
5001 				    ~METASLAB_ACTIVE_MASK);
5002 			}
5003 			mutex_exit(&msp->ms_lock);
5004 			continue;
5005 		} else if (msp->ms_disabled > 0) {
5006 			metaslab_trace_add(zal, mg, msp, asize, d,
5007 			    TRACE_DISABLED, allocator);
5008 			if (activated) {
5009 				metaslab_passivate(msp, msp->ms_weight &
5010 				    ~METASLAB_ACTIVE_MASK);
5011 			}
5012 			mutex_exit(&msp->ms_lock);
5013 			continue;
5014 		}
5015 
5016 		offset = metaslab_block_alloc(msp, asize, txg);
5017 		metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
5018 
5019 		if (offset != -1ULL) {
5020 			/* Proactively passivate the metaslab, if needed */
5021 			if (activated)
5022 				metaslab_segment_may_passivate(msp);
5023 			break;
5024 		}
5025 next:
5026 		ASSERT(msp->ms_loaded);
5027 
5028 		/*
5029 		 * This code is disabled out because of issues with
5030 		 * tracepoints in non-gpl kernel modules.
5031 		 */
5032 #if 0
5033 		DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
5034 		    uint64_t, asize);
5035 #endif
5036 
5037 		/*
5038 		 * We were unable to allocate from this metaslab so determine
5039 		 * a new weight for this metaslab. Now that we have loaded
5040 		 * the metaslab we can provide a better hint to the metaslab
5041 		 * selector.
5042 		 *
5043 		 * For space-based metaslabs, we use the maximum block size.
5044 		 * This information is only available when the metaslab
5045 		 * is loaded and is more accurate than the generic free
5046 		 * space weight that was calculated by metaslab_weight().
5047 		 * This information allows us to quickly compare the maximum
5048 		 * available allocation in the metaslab to the allocation
5049 		 * size being requested.
5050 		 *
5051 		 * For segment-based metaslabs, determine the new weight
5052 		 * based on the highest bucket in the range tree. We
5053 		 * explicitly use the loaded segment weight (i.e. the range
5054 		 * tree histogram) since it contains the space that is
5055 		 * currently available for allocation and is accurate
5056 		 * even within a sync pass.
5057 		 */
5058 		uint64_t weight;
5059 		if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
5060 			weight = metaslab_largest_allocatable(msp);
5061 			WEIGHT_SET_SPACEBASED(weight);
5062 		} else {
5063 			weight = metaslab_weight_from_range_tree(msp);
5064 		}
5065 
5066 		if (activated) {
5067 			metaslab_passivate(msp, weight);
5068 		} else {
5069 			/*
5070 			 * For the case where we use the metaslab that is
5071 			 * active for another allocator we want to make
5072 			 * sure that we retain the activation mask.
5073 			 *
5074 			 * Note that we could attempt to use something like
5075 			 * metaslab_recalculate_weight_and_sort() that
5076 			 * retains the activation mask here. That function
5077 			 * uses metaslab_weight() to set the weight though
5078 			 * which is not as accurate as the calculations
5079 			 * above.
5080 			 */
5081 			weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
5082 			metaslab_group_sort(mg, msp, weight);
5083 		}
5084 		metaslab_active_mask_verify(msp);
5085 
5086 		/*
5087 		 * We have just failed an allocation attempt, check
5088 		 * that metaslab_should_allocate() agrees. Otherwise,
5089 		 * we may end up in an infinite loop retrying the same
5090 		 * metaslab.
5091 		 */
5092 		ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
5093 
5094 		mutex_exit(&msp->ms_lock);
5095 	}
5096 	mutex_exit(&msp->ms_lock);
5097 	kmem_free(search, sizeof (*search));
5098 	return (offset);
5099 }
5100 
5101 static uint64_t
5102 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
5103     uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
5104     int allocator, boolean_t try_hard)
5105 {
5106 	uint64_t offset;
5107 	ASSERT(mg->mg_initialized);
5108 
5109 	offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
5110 	    dva, d, allocator, try_hard);
5111 
5112 	mutex_enter(&mg->mg_lock);
5113 	if (offset == -1ULL) {
5114 		mg->mg_failed_allocations++;
5115 		metaslab_trace_add(zal, mg, NULL, asize, d,
5116 		    TRACE_GROUP_FAILURE, allocator);
5117 		if (asize == SPA_GANGBLOCKSIZE) {
5118 			/*
5119 			 * This metaslab group was unable to allocate
5120 			 * the minimum gang block size so it must be out of
5121 			 * space. We must notify the allocation throttle
5122 			 * to start skipping allocation attempts to this
5123 			 * metaslab group until more space becomes available.
5124 			 * Note: this failure cannot be caused by the
5125 			 * allocation throttle since the allocation throttle
5126 			 * is only responsible for skipping devices and
5127 			 * not failing block allocations.
5128 			 */
5129 			mg->mg_no_free_space = B_TRUE;
5130 		}
5131 	}
5132 	mg->mg_allocations++;
5133 	mutex_exit(&mg->mg_lock);
5134 	return (offset);
5135 }
5136 
5137 /*
5138  * Allocate a block for the specified i/o.
5139  */
5140 int
5141 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5142     dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
5143     zio_alloc_list_t *zal, int allocator)
5144 {
5145 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5146 	metaslab_group_t *mg, *rotor;
5147 	vdev_t *vd;
5148 	boolean_t try_hard = B_FALSE;
5149 
5150 	ASSERT(!DVA_IS_VALID(&dva[d]));
5151 
5152 	/*
5153 	 * For testing, make some blocks above a certain size be gang blocks.
5154 	 * This will result in more split blocks when using device removal,
5155 	 * and a large number of split blocks coupled with ztest-induced
5156 	 * damage can result in extremely long reconstruction times.  This
5157 	 * will also test spilling from special to normal.
5158 	 */
5159 	if (psize >= metaslab_force_ganging &&
5160 	    metaslab_force_ganging_pct > 0 &&
5161 	    (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) {
5162 		metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
5163 		    allocator);
5164 		return (SET_ERROR(ENOSPC));
5165 	}
5166 
5167 	/*
5168 	 * Start at the rotor and loop through all mgs until we find something.
5169 	 * Note that there's no locking on mca_rotor or mca_aliquot because
5170 	 * nothing actually breaks if we miss a few updates -- we just won't
5171 	 * allocate quite as evenly.  It all balances out over time.
5172 	 *
5173 	 * If we are doing ditto or log blocks, try to spread them across
5174 	 * consecutive vdevs.  If we're forced to reuse a vdev before we've
5175 	 * allocated all of our ditto blocks, then try and spread them out on
5176 	 * that vdev as much as possible.  If it turns out to not be possible,
5177 	 * gradually lower our standards until anything becomes acceptable.
5178 	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
5179 	 * gives us hope of containing our fault domains to something we're
5180 	 * able to reason about.  Otherwise, any two top-level vdev failures
5181 	 * will guarantee the loss of data.  With consecutive allocation,
5182 	 * only two adjacent top-level vdev failures will result in data loss.
5183 	 *
5184 	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
5185 	 * ourselves on the same vdev as our gang block header.  That
5186 	 * way, we can hope for locality in vdev_cache, plus it makes our
5187 	 * fault domains something tractable.
5188 	 */
5189 	if (hintdva) {
5190 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
5191 
5192 		/*
5193 		 * It's possible the vdev we're using as the hint no
5194 		 * longer exists or its mg has been closed (e.g. by
5195 		 * device removal).  Consult the rotor when
5196 		 * all else fails.
5197 		 */
5198 		if (vd != NULL && vd->vdev_mg != NULL) {
5199 			mg = vdev_get_mg(vd, mc);
5200 
5201 			if (flags & METASLAB_HINTBP_AVOID)
5202 				mg = mg->mg_next;
5203 		} else {
5204 			mg = mca->mca_rotor;
5205 		}
5206 	} else if (d != 0) {
5207 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
5208 		mg = vd->vdev_mg->mg_next;
5209 	} else {
5210 		ASSERT(mca->mca_rotor != NULL);
5211 		mg = mca->mca_rotor;
5212 	}
5213 
5214 	/*
5215 	 * If the hint put us into the wrong metaslab class, or into a
5216 	 * metaslab group that has been passivated, just follow the rotor.
5217 	 */
5218 	if (mg->mg_class != mc || mg->mg_activation_count <= 0)
5219 		mg = mca->mca_rotor;
5220 
5221 	rotor = mg;
5222 top:
5223 	do {
5224 		boolean_t allocatable;
5225 
5226 		ASSERT(mg->mg_activation_count == 1);
5227 		vd = mg->mg_vd;
5228 
5229 		/*
5230 		 * Don't allocate from faulted devices.
5231 		 */
5232 		if (try_hard) {
5233 			spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
5234 			allocatable = vdev_allocatable(vd);
5235 			spa_config_exit(spa, SCL_ZIO, FTAG);
5236 		} else {
5237 			allocatable = vdev_allocatable(vd);
5238 		}
5239 
5240 		/*
5241 		 * Determine if the selected metaslab group is eligible
5242 		 * for allocations. If we're ganging then don't allow
5243 		 * this metaslab group to skip allocations since that would
5244 		 * inadvertently return ENOSPC and suspend the pool
5245 		 * even though space is still available.
5246 		 */
5247 		if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
5248 			allocatable = metaslab_group_allocatable(mg, rotor,
5249 			    flags, psize, allocator, d);
5250 		}
5251 
5252 		if (!allocatable) {
5253 			metaslab_trace_add(zal, mg, NULL, psize, d,
5254 			    TRACE_NOT_ALLOCATABLE, allocator);
5255 			goto next;
5256 		}
5257 
5258 		ASSERT(mg->mg_initialized);
5259 
5260 		/*
5261 		 * Avoid writing single-copy data to an unhealthy,
5262 		 * non-redundant vdev, unless we've already tried all
5263 		 * other vdevs.
5264 		 */
5265 		if (vd->vdev_state < VDEV_STATE_HEALTHY &&
5266 		    d == 0 && !try_hard && vd->vdev_children == 0) {
5267 			metaslab_trace_add(zal, mg, NULL, psize, d,
5268 			    TRACE_VDEV_ERROR, allocator);
5269 			goto next;
5270 		}
5271 
5272 		ASSERT(mg->mg_class == mc);
5273 
5274 		uint64_t asize = vdev_psize_to_asize(vd, psize);
5275 		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
5276 
5277 		/*
5278 		 * If we don't need to try hard, then require that the
5279 		 * block be on a different metaslab from any other DVAs
5280 		 * in this BP (unique=true).  If we are trying hard, then
5281 		 * allow any metaslab to be used (unique=false).
5282 		 */
5283 		uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
5284 		    !try_hard, dva, d, allocator, try_hard);
5285 
5286 		if (offset != -1ULL) {
5287 			/*
5288 			 * If we've just selected this metaslab group,
5289 			 * figure out whether the corresponding vdev is
5290 			 * over- or under-used relative to the pool,
5291 			 * and set an allocation bias to even it out.
5292 			 *
5293 			 * Bias is also used to compensate for unequally
5294 			 * sized vdevs so that space is allocated fairly.
5295 			 */
5296 			if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
5297 				vdev_stat_t *vs = &vd->vdev_stat;
5298 				int64_t vs_free = vs->vs_space - vs->vs_alloc;
5299 				int64_t mc_free = mc->mc_space - mc->mc_alloc;
5300 				int64_t ratio;
5301 
5302 				/*
5303 				 * Calculate how much more or less we should
5304 				 * try to allocate from this device during
5305 				 * this iteration around the rotor.
5306 				 *
5307 				 * This basically introduces a zero-centered
5308 				 * bias towards the devices with the most
5309 				 * free space, while compensating for vdev
5310 				 * size differences.
5311 				 *
5312 				 * Examples:
5313 				 *  vdev V1 = 16M/128M
5314 				 *  vdev V2 = 16M/128M
5315 				 *  ratio(V1) = 100% ratio(V2) = 100%
5316 				 *
5317 				 *  vdev V1 = 16M/128M
5318 				 *  vdev V2 = 64M/128M
5319 				 *  ratio(V1) = 127% ratio(V2) =  72%
5320 				 *
5321 				 *  vdev V1 = 16M/128M
5322 				 *  vdev V2 = 64M/512M
5323 				 *  ratio(V1) =  40% ratio(V2) = 160%
5324 				 */
5325 				ratio = (vs_free * mc->mc_alloc_groups * 100) /
5326 				    (mc_free + 1);
5327 				mg->mg_bias = ((ratio - 100) *
5328 				    (int64_t)mg->mg_aliquot) / 100;
5329 			} else if (!metaslab_bias_enabled) {
5330 				mg->mg_bias = 0;
5331 			}
5332 
5333 			if ((flags & METASLAB_ZIL) ||
5334 			    atomic_add_64_nv(&mca->mca_aliquot, asize) >=
5335 			    mg->mg_aliquot + mg->mg_bias) {
5336 				mca->mca_rotor = mg->mg_next;
5337 				mca->mca_aliquot = 0;
5338 			}
5339 
5340 			DVA_SET_VDEV(&dva[d], vd->vdev_id);
5341 			DVA_SET_OFFSET(&dva[d], offset);
5342 			DVA_SET_GANG(&dva[d],
5343 			    ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
5344 			DVA_SET_ASIZE(&dva[d], asize);
5345 
5346 			return (0);
5347 		}
5348 next:
5349 		mca->mca_rotor = mg->mg_next;
5350 		mca->mca_aliquot = 0;
5351 	} while ((mg = mg->mg_next) != rotor);
5352 
5353 	/*
5354 	 * If we haven't tried hard, perhaps do so now.
5355 	 */
5356 	if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
5357 	    GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
5358 	    psize <= 1 << spa->spa_min_ashift)) {
5359 		METASLABSTAT_BUMP(metaslabstat_try_hard);
5360 		try_hard = B_TRUE;
5361 		goto top;
5362 	}
5363 
5364 	memset(&dva[d], 0, sizeof (dva_t));
5365 
5366 	metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
5367 	return (SET_ERROR(ENOSPC));
5368 }
5369 
5370 void
5371 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
5372     boolean_t checkpoint)
5373 {
5374 	metaslab_t *msp;
5375 	spa_t *spa = vd->vdev_spa;
5376 
5377 	ASSERT(vdev_is_concrete(vd));
5378 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5379 	ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5380 
5381 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5382 
5383 	VERIFY(!msp->ms_condensing);
5384 	VERIFY3U(offset, >=, msp->ms_start);
5385 	VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
5386 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5387 	VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
5388 
5389 	metaslab_check_free_impl(vd, offset, asize);
5390 
5391 	mutex_enter(&msp->ms_lock);
5392 	if (range_tree_is_empty(msp->ms_freeing) &&
5393 	    range_tree_is_empty(msp->ms_checkpointing)) {
5394 		vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
5395 	}
5396 
5397 	if (checkpoint) {
5398 		ASSERT(spa_has_checkpoint(spa));
5399 		range_tree_add(msp->ms_checkpointing, offset, asize);
5400 	} else {
5401 		range_tree_add(msp->ms_freeing, offset, asize);
5402 	}
5403 	mutex_exit(&msp->ms_lock);
5404 }
5405 
5406 void
5407 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5408     uint64_t size, void *arg)
5409 {
5410 	(void) inner_offset;
5411 	boolean_t *checkpoint = arg;
5412 
5413 	ASSERT3P(checkpoint, !=, NULL);
5414 
5415 	if (vd->vdev_ops->vdev_op_remap != NULL)
5416 		vdev_indirect_mark_obsolete(vd, offset, size);
5417 	else
5418 		metaslab_free_impl(vd, offset, size, *checkpoint);
5419 }
5420 
5421 static void
5422 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
5423     boolean_t checkpoint)
5424 {
5425 	spa_t *spa = vd->vdev_spa;
5426 
5427 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5428 
5429 	if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
5430 		return;
5431 
5432 	if (spa->spa_vdev_removal != NULL &&
5433 	    spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
5434 	    vdev_is_concrete(vd)) {
5435 		/*
5436 		 * Note: we check if the vdev is concrete because when
5437 		 * we complete the removal, we first change the vdev to be
5438 		 * an indirect vdev (in open context), and then (in syncing
5439 		 * context) clear spa_vdev_removal.
5440 		 */
5441 		free_from_removing_vdev(vd, offset, size);
5442 	} else if (vd->vdev_ops->vdev_op_remap != NULL) {
5443 		vdev_indirect_mark_obsolete(vd, offset, size);
5444 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5445 		    metaslab_free_impl_cb, &checkpoint);
5446 	} else {
5447 		metaslab_free_concrete(vd, offset, size, checkpoint);
5448 	}
5449 }
5450 
5451 typedef struct remap_blkptr_cb_arg {
5452 	blkptr_t *rbca_bp;
5453 	spa_remap_cb_t rbca_cb;
5454 	vdev_t *rbca_remap_vd;
5455 	uint64_t rbca_remap_offset;
5456 	void *rbca_cb_arg;
5457 } remap_blkptr_cb_arg_t;
5458 
5459 static void
5460 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5461     uint64_t size, void *arg)
5462 {
5463 	remap_blkptr_cb_arg_t *rbca = arg;
5464 	blkptr_t *bp = rbca->rbca_bp;
5465 
5466 	/* We can not remap split blocks. */
5467 	if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
5468 		return;
5469 	ASSERT0(inner_offset);
5470 
5471 	if (rbca->rbca_cb != NULL) {
5472 		/*
5473 		 * At this point we know that we are not handling split
5474 		 * blocks and we invoke the callback on the previous
5475 		 * vdev which must be indirect.
5476 		 */
5477 		ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
5478 
5479 		rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
5480 		    rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
5481 
5482 		/* set up remap_blkptr_cb_arg for the next call */
5483 		rbca->rbca_remap_vd = vd;
5484 		rbca->rbca_remap_offset = offset;
5485 	}
5486 
5487 	/*
5488 	 * The phys birth time is that of dva[0].  This ensures that we know
5489 	 * when each dva was written, so that resilver can determine which
5490 	 * blocks need to be scrubbed (i.e. those written during the time
5491 	 * the vdev was offline).  It also ensures that the key used in
5492 	 * the ARC hash table is unique (i.e. dva[0] + phys_birth).  If
5493 	 * we didn't change the phys_birth, a lookup in the ARC for a
5494 	 * remapped BP could find the data that was previously stored at
5495 	 * this vdev + offset.
5496 	 */
5497 	vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
5498 	    DVA_GET_VDEV(&bp->blk_dva[0]));
5499 	vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
5500 	bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
5501 	    DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
5502 
5503 	DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
5504 	DVA_SET_OFFSET(&bp->blk_dva[0], offset);
5505 }
5506 
5507 /*
5508  * If the block pointer contains any indirect DVAs, modify them to refer to
5509  * concrete DVAs.  Note that this will sometimes not be possible, leaving
5510  * the indirect DVA in place.  This happens if the indirect DVA spans multiple
5511  * segments in the mapping (i.e. it is a "split block").
5512  *
5513  * If the BP was remapped, calls the callback on the original dva (note the
5514  * callback can be called multiple times if the original indirect DVA refers
5515  * to another indirect DVA, etc).
5516  *
5517  * Returns TRUE if the BP was remapped.
5518  */
5519 boolean_t
5520 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
5521 {
5522 	remap_blkptr_cb_arg_t rbca;
5523 
5524 	if (!zfs_remap_blkptr_enable)
5525 		return (B_FALSE);
5526 
5527 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
5528 		return (B_FALSE);
5529 
5530 	/*
5531 	 * Dedup BP's can not be remapped, because ddt_phys_select() depends
5532 	 * on DVA[0] being the same in the BP as in the DDT (dedup table).
5533 	 */
5534 	if (BP_GET_DEDUP(bp))
5535 		return (B_FALSE);
5536 
5537 	/*
5538 	 * Gang blocks can not be remapped, because
5539 	 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
5540 	 * the BP used to read the gang block header (GBH) being the same
5541 	 * as the DVA[0] that we allocated for the GBH.
5542 	 */
5543 	if (BP_IS_GANG(bp))
5544 		return (B_FALSE);
5545 
5546 	/*
5547 	 * Embedded BP's have no DVA to remap.
5548 	 */
5549 	if (BP_GET_NDVAS(bp) < 1)
5550 		return (B_FALSE);
5551 
5552 	/*
5553 	 * Note: we only remap dva[0].  If we remapped other dvas, we
5554 	 * would no longer know what their phys birth txg is.
5555 	 */
5556 	dva_t *dva = &bp->blk_dva[0];
5557 
5558 	uint64_t offset = DVA_GET_OFFSET(dva);
5559 	uint64_t size = DVA_GET_ASIZE(dva);
5560 	vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
5561 
5562 	if (vd->vdev_ops->vdev_op_remap == NULL)
5563 		return (B_FALSE);
5564 
5565 	rbca.rbca_bp = bp;
5566 	rbca.rbca_cb = callback;
5567 	rbca.rbca_remap_vd = vd;
5568 	rbca.rbca_remap_offset = offset;
5569 	rbca.rbca_cb_arg = arg;
5570 
5571 	/*
5572 	 * remap_blkptr_cb() will be called in order for each level of
5573 	 * indirection, until a concrete vdev is reached or a split block is
5574 	 * encountered. old_vd and old_offset are updated within the callback
5575 	 * as we go from the one indirect vdev to the next one (either concrete
5576 	 * or indirect again) in that order.
5577 	 */
5578 	vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
5579 
5580 	/* Check if the DVA wasn't remapped because it is a split block */
5581 	if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
5582 		return (B_FALSE);
5583 
5584 	return (B_TRUE);
5585 }
5586 
5587 /*
5588  * Undo the allocation of a DVA which happened in the given transaction group.
5589  */
5590 void
5591 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5592 {
5593 	metaslab_t *msp;
5594 	vdev_t *vd;
5595 	uint64_t vdev = DVA_GET_VDEV(dva);
5596 	uint64_t offset = DVA_GET_OFFSET(dva);
5597 	uint64_t size = DVA_GET_ASIZE(dva);
5598 
5599 	ASSERT(DVA_IS_VALID(dva));
5600 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5601 
5602 	if (txg > spa_freeze_txg(spa))
5603 		return;
5604 
5605 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
5606 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
5607 		zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
5608 		    (u_longlong_t)vdev, (u_longlong_t)offset,
5609 		    (u_longlong_t)size);
5610 		return;
5611 	}
5612 
5613 	ASSERT(!vd->vdev_removing);
5614 	ASSERT(vdev_is_concrete(vd));
5615 	ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
5616 	ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
5617 
5618 	if (DVA_GET_GANG(dva))
5619 		size = vdev_gang_header_asize(vd);
5620 
5621 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5622 
5623 	mutex_enter(&msp->ms_lock);
5624 	range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
5625 	    offset, size);
5626 	msp->ms_allocating_total -= size;
5627 
5628 	VERIFY(!msp->ms_condensing);
5629 	VERIFY3U(offset, >=, msp->ms_start);
5630 	VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
5631 	VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
5632 	    msp->ms_size);
5633 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5634 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5635 	range_tree_add(msp->ms_allocatable, offset, size);
5636 	mutex_exit(&msp->ms_lock);
5637 }
5638 
5639 /*
5640  * Free the block represented by the given DVA.
5641  */
5642 void
5643 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
5644 {
5645 	uint64_t vdev = DVA_GET_VDEV(dva);
5646 	uint64_t offset = DVA_GET_OFFSET(dva);
5647 	uint64_t size = DVA_GET_ASIZE(dva);
5648 	vdev_t *vd = vdev_lookup_top(spa, vdev);
5649 
5650 	ASSERT(DVA_IS_VALID(dva));
5651 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5652 
5653 	if (DVA_GET_GANG(dva)) {
5654 		size = vdev_gang_header_asize(vd);
5655 	}
5656 
5657 	metaslab_free_impl(vd, offset, size, checkpoint);
5658 }
5659 
5660 /*
5661  * Reserve some allocation slots. The reservation system must be called
5662  * before we call into the allocator. If there aren't any available slots
5663  * then the I/O will be throttled until an I/O completes and its slots are
5664  * freed up. The function returns true if it was successful in placing
5665  * the reservation.
5666  */
5667 boolean_t
5668 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
5669     zio_t *zio, int flags)
5670 {
5671 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5672 	uint64_t max = mca->mca_alloc_max_slots;
5673 
5674 	ASSERT(mc->mc_alloc_throttle_enabled);
5675 	if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
5676 	    zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
5677 		/*
5678 		 * The potential race between _count() and _add() is covered
5679 		 * by the allocator lock in most cases, or irrelevant due to
5680 		 * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
5681 		 * But even if we assume some other non-existing scenario, the
5682 		 * worst that can happen is few more I/Os get to allocation
5683 		 * earlier, that is not a problem.
5684 		 *
5685 		 * We reserve the slots individually so that we can unreserve
5686 		 * them individually when an I/O completes.
5687 		 */
5688 		zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio);
5689 		zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
5690 		return (B_TRUE);
5691 	}
5692 	return (B_FALSE);
5693 }
5694 
5695 void
5696 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5697     int allocator, zio_t *zio)
5698 {
5699 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5700 
5701 	ASSERT(mc->mc_alloc_throttle_enabled);
5702 	zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio);
5703 }
5704 
5705 static int
5706 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5707     uint64_t txg)
5708 {
5709 	metaslab_t *msp;
5710 	spa_t *spa = vd->vdev_spa;
5711 	int error = 0;
5712 
5713 	if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5714 		return (SET_ERROR(ENXIO));
5715 
5716 	ASSERT3P(vd->vdev_ms, !=, NULL);
5717 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5718 
5719 	mutex_enter(&msp->ms_lock);
5720 
5721 	if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5722 		error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5723 		if (error == EBUSY) {
5724 			ASSERT(msp->ms_loaded);
5725 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5726 			error = 0;
5727 		}
5728 	}
5729 
5730 	if (error == 0 &&
5731 	    !range_tree_contains(msp->ms_allocatable, offset, size))
5732 		error = SET_ERROR(ENOENT);
5733 
5734 	if (error || txg == 0) {	/* txg == 0 indicates dry run */
5735 		mutex_exit(&msp->ms_lock);
5736 		return (error);
5737 	}
5738 
5739 	VERIFY(!msp->ms_condensing);
5740 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5741 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5742 	VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
5743 	    msp->ms_size);
5744 	range_tree_remove(msp->ms_allocatable, offset, size);
5745 	range_tree_clear(msp->ms_trim, offset, size);
5746 
5747 	if (spa_writeable(spa)) {	/* don't dirty if we're zdb(8) */
5748 		metaslab_class_t *mc = msp->ms_group->mg_class;
5749 		multilist_sublist_t *mls =
5750 		    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
5751 		if (!multilist_link_active(&msp->ms_class_txg_node)) {
5752 			msp->ms_selected_txg = txg;
5753 			multilist_sublist_insert_head(mls, msp);
5754 		}
5755 		multilist_sublist_unlock(mls);
5756 
5757 		if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5758 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
5759 		range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5760 		    offset, size);
5761 		msp->ms_allocating_total += size;
5762 	}
5763 
5764 	mutex_exit(&msp->ms_lock);
5765 
5766 	return (0);
5767 }
5768 
5769 typedef struct metaslab_claim_cb_arg_t {
5770 	uint64_t	mcca_txg;
5771 	int		mcca_error;
5772 } metaslab_claim_cb_arg_t;
5773 
5774 static void
5775 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5776     uint64_t size, void *arg)
5777 {
5778 	(void) inner_offset;
5779 	metaslab_claim_cb_arg_t *mcca_arg = arg;
5780 
5781 	if (mcca_arg->mcca_error == 0) {
5782 		mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5783 		    size, mcca_arg->mcca_txg);
5784 	}
5785 }
5786 
5787 int
5788 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5789 {
5790 	if (vd->vdev_ops->vdev_op_remap != NULL) {
5791 		metaslab_claim_cb_arg_t arg;
5792 
5793 		/*
5794 		 * Only zdb(8) can claim on indirect vdevs.  This is used
5795 		 * to detect leaks of mapped space (that are not accounted
5796 		 * for in the obsolete counts, spacemap, or bpobj).
5797 		 */
5798 		ASSERT(!spa_writeable(vd->vdev_spa));
5799 		arg.mcca_error = 0;
5800 		arg.mcca_txg = txg;
5801 
5802 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5803 		    metaslab_claim_impl_cb, &arg);
5804 
5805 		if (arg.mcca_error == 0) {
5806 			arg.mcca_error = metaslab_claim_concrete(vd,
5807 			    offset, size, txg);
5808 		}
5809 		return (arg.mcca_error);
5810 	} else {
5811 		return (metaslab_claim_concrete(vd, offset, size, txg));
5812 	}
5813 }
5814 
5815 /*
5816  * Intent log support: upon opening the pool after a crash, notify the SPA
5817  * of blocks that the intent log has allocated for immediate write, but
5818  * which are still considered free by the SPA because the last transaction
5819  * group didn't commit yet.
5820  */
5821 static int
5822 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5823 {
5824 	uint64_t vdev = DVA_GET_VDEV(dva);
5825 	uint64_t offset = DVA_GET_OFFSET(dva);
5826 	uint64_t size = DVA_GET_ASIZE(dva);
5827 	vdev_t *vd;
5828 
5829 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5830 		return (SET_ERROR(ENXIO));
5831 	}
5832 
5833 	ASSERT(DVA_IS_VALID(dva));
5834 
5835 	if (DVA_GET_GANG(dva))
5836 		size = vdev_gang_header_asize(vd);
5837 
5838 	return (metaslab_claim_impl(vd, offset, size, txg));
5839 }
5840 
5841 int
5842 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5843     int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5844     zio_alloc_list_t *zal, zio_t *zio, int allocator)
5845 {
5846 	dva_t *dva = bp->blk_dva;
5847 	dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5848 	int error = 0;
5849 
5850 	ASSERT(bp->blk_birth == 0);
5851 	ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
5852 
5853 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5854 
5855 	if (mc->mc_allocator[allocator].mca_rotor == NULL) {
5856 		/* no vdevs in this class */
5857 		spa_config_exit(spa, SCL_ALLOC, FTAG);
5858 		return (SET_ERROR(ENOSPC));
5859 	}
5860 
5861 	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5862 	ASSERT(BP_GET_NDVAS(bp) == 0);
5863 	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5864 	ASSERT3P(zal, !=, NULL);
5865 
5866 	for (int d = 0; d < ndvas; d++) {
5867 		error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
5868 		    txg, flags, zal, allocator);
5869 		if (error != 0) {
5870 			for (d--; d >= 0; d--) {
5871 				metaslab_unalloc_dva(spa, &dva[d], txg);
5872 				metaslab_group_alloc_decrement(spa,
5873 				    DVA_GET_VDEV(&dva[d]), zio, flags,
5874 				    allocator, B_FALSE);
5875 				memset(&dva[d], 0, sizeof (dva_t));
5876 			}
5877 			spa_config_exit(spa, SCL_ALLOC, FTAG);
5878 			return (error);
5879 		} else {
5880 			/*
5881 			 * Update the metaslab group's queue depth
5882 			 * based on the newly allocated dva.
5883 			 */
5884 			metaslab_group_alloc_increment(spa,
5885 			    DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
5886 		}
5887 	}
5888 	ASSERT(error == 0);
5889 	ASSERT(BP_GET_NDVAS(bp) == ndvas);
5890 
5891 	spa_config_exit(spa, SCL_ALLOC, FTAG);
5892 
5893 	BP_SET_BIRTH(bp, txg, 0);
5894 
5895 	return (0);
5896 }
5897 
5898 void
5899 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5900 {
5901 	const dva_t *dva = bp->blk_dva;
5902 	int ndvas = BP_GET_NDVAS(bp);
5903 
5904 	ASSERT(!BP_IS_HOLE(bp));
5905 	ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
5906 
5907 	/*
5908 	 * If we have a checkpoint for the pool we need to make sure that
5909 	 * the blocks that we free that are part of the checkpoint won't be
5910 	 * reused until the checkpoint is discarded or we revert to it.
5911 	 *
5912 	 * The checkpoint flag is passed down the metaslab_free code path
5913 	 * and is set whenever we want to add a block to the checkpoint's
5914 	 * accounting. That is, we "checkpoint" blocks that existed at the
5915 	 * time the checkpoint was created and are therefore referenced by
5916 	 * the checkpointed uberblock.
5917 	 *
5918 	 * Note that, we don't checkpoint any blocks if the current
5919 	 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5920 	 * normally as they will be referenced by the checkpointed uberblock.
5921 	 */
5922 	boolean_t checkpoint = B_FALSE;
5923 	if (bp->blk_birth <= spa->spa_checkpoint_txg &&
5924 	    spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
5925 		/*
5926 		 * At this point, if the block is part of the checkpoint
5927 		 * there is no way it was created in the current txg.
5928 		 */
5929 		ASSERT(!now);
5930 		ASSERT3U(spa_syncing_txg(spa), ==, txg);
5931 		checkpoint = B_TRUE;
5932 	}
5933 
5934 	spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
5935 
5936 	for (int d = 0; d < ndvas; d++) {
5937 		if (now) {
5938 			metaslab_unalloc_dva(spa, &dva[d], txg);
5939 		} else {
5940 			ASSERT3U(txg, ==, spa_syncing_txg(spa));
5941 			metaslab_free_dva(spa, &dva[d], checkpoint);
5942 		}
5943 	}
5944 
5945 	spa_config_exit(spa, SCL_FREE, FTAG);
5946 }
5947 
5948 int
5949 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5950 {
5951 	const dva_t *dva = bp->blk_dva;
5952 	int ndvas = BP_GET_NDVAS(bp);
5953 	int error = 0;
5954 
5955 	ASSERT(!BP_IS_HOLE(bp));
5956 
5957 	if (txg != 0) {
5958 		/*
5959 		 * First do a dry run to make sure all DVAs are claimable,
5960 		 * so we don't have to unwind from partial failures below.
5961 		 */
5962 		if ((error = metaslab_claim(spa, bp, 0)) != 0)
5963 			return (error);
5964 	}
5965 
5966 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5967 
5968 	for (int d = 0; d < ndvas; d++) {
5969 		error = metaslab_claim_dva(spa, &dva[d], txg);
5970 		if (error != 0)
5971 			break;
5972 	}
5973 
5974 	spa_config_exit(spa, SCL_ALLOC, FTAG);
5975 
5976 	ASSERT(error == 0 || txg == 0);
5977 
5978 	return (error);
5979 }
5980 
5981 static void
5982 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
5983     uint64_t size, void *arg)
5984 {
5985 	(void) inner, (void) arg;
5986 
5987 	if (vd->vdev_ops == &vdev_indirect_ops)
5988 		return;
5989 
5990 	metaslab_check_free_impl(vd, offset, size);
5991 }
5992 
5993 static void
5994 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
5995 {
5996 	metaslab_t *msp;
5997 	spa_t *spa __maybe_unused = vd->vdev_spa;
5998 
5999 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6000 		return;
6001 
6002 	if (vd->vdev_ops->vdev_op_remap != NULL) {
6003 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
6004 		    metaslab_check_free_impl_cb, NULL);
6005 		return;
6006 	}
6007 
6008 	ASSERT(vdev_is_concrete(vd));
6009 	ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
6010 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
6011 
6012 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
6013 
6014 	mutex_enter(&msp->ms_lock);
6015 	if (msp->ms_loaded) {
6016 		range_tree_verify_not_present(msp->ms_allocatable,
6017 		    offset, size);
6018 	}
6019 
6020 	/*
6021 	 * Check all segments that currently exist in the freeing pipeline.
6022 	 *
6023 	 * It would intuitively make sense to also check the current allocating
6024 	 * tree since metaslab_unalloc_dva() exists for extents that are
6025 	 * allocated and freed in the same sync pass within the same txg.
6026 	 * Unfortunately there are places (e.g. the ZIL) where we allocate a
6027 	 * segment but then we free part of it within the same txg
6028 	 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
6029 	 * current allocating tree.
6030 	 */
6031 	range_tree_verify_not_present(msp->ms_freeing, offset, size);
6032 	range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
6033 	range_tree_verify_not_present(msp->ms_freed, offset, size);
6034 	for (int j = 0; j < TXG_DEFER_SIZE; j++)
6035 		range_tree_verify_not_present(msp->ms_defer[j], offset, size);
6036 	range_tree_verify_not_present(msp->ms_trim, offset, size);
6037 	mutex_exit(&msp->ms_lock);
6038 }
6039 
6040 void
6041 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
6042 {
6043 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6044 		return;
6045 
6046 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
6047 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
6048 		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
6049 		vdev_t *vd = vdev_lookup_top(spa, vdev);
6050 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
6051 		uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
6052 
6053 		if (DVA_GET_GANG(&bp->blk_dva[i]))
6054 			size = vdev_gang_header_asize(vd);
6055 
6056 		ASSERT3P(vd, !=, NULL);
6057 
6058 		metaslab_check_free_impl(vd, offset, size);
6059 	}
6060 	spa_config_exit(spa, SCL_VDEV, FTAG);
6061 }
6062 
6063 static void
6064 metaslab_group_disable_wait(metaslab_group_t *mg)
6065 {
6066 	ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6067 	while (mg->mg_disabled_updating) {
6068 		cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6069 	}
6070 }
6071 
6072 static void
6073 metaslab_group_disabled_increment(metaslab_group_t *mg)
6074 {
6075 	ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6076 	ASSERT(mg->mg_disabled_updating);
6077 
6078 	while (mg->mg_ms_disabled >= max_disabled_ms) {
6079 		cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6080 	}
6081 	mg->mg_ms_disabled++;
6082 	ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
6083 }
6084 
6085 /*
6086  * Mark the metaslab as disabled to prevent any allocations on this metaslab.
6087  * We must also track how many metaslabs are currently disabled within a
6088  * metaslab group and limit them to prevent allocation failures from
6089  * occurring because all metaslabs are disabled.
6090  */
6091 void
6092 metaslab_disable(metaslab_t *msp)
6093 {
6094 	ASSERT(!MUTEX_HELD(&msp->ms_lock));
6095 	metaslab_group_t *mg = msp->ms_group;
6096 
6097 	mutex_enter(&mg->mg_ms_disabled_lock);
6098 
6099 	/*
6100 	 * To keep an accurate count of how many threads have disabled
6101 	 * a specific metaslab group, we only allow one thread to mark
6102 	 * the metaslab group at a time. This ensures that the value of
6103 	 * ms_disabled will be accurate when we decide to mark a metaslab
6104 	 * group as disabled. To do this we force all other threads
6105 	 * to wait till the metaslab's mg_disabled_updating flag is no
6106 	 * longer set.
6107 	 */
6108 	metaslab_group_disable_wait(mg);
6109 	mg->mg_disabled_updating = B_TRUE;
6110 	if (msp->ms_disabled == 0) {
6111 		metaslab_group_disabled_increment(mg);
6112 	}
6113 	mutex_enter(&msp->ms_lock);
6114 	msp->ms_disabled++;
6115 	mutex_exit(&msp->ms_lock);
6116 
6117 	mg->mg_disabled_updating = B_FALSE;
6118 	cv_broadcast(&mg->mg_ms_disabled_cv);
6119 	mutex_exit(&mg->mg_ms_disabled_lock);
6120 }
6121 
6122 void
6123 metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
6124 {
6125 	metaslab_group_t *mg = msp->ms_group;
6126 	spa_t *spa = mg->mg_vd->vdev_spa;
6127 
6128 	/*
6129 	 * Wait for the outstanding IO to be synced to prevent newly
6130 	 * allocated blocks from being overwritten.  This used by
6131 	 * initialize and TRIM which are modifying unallocated space.
6132 	 */
6133 	if (sync)
6134 		txg_wait_synced(spa_get_dsl(spa), 0);
6135 
6136 	mutex_enter(&mg->mg_ms_disabled_lock);
6137 	mutex_enter(&msp->ms_lock);
6138 	if (--msp->ms_disabled == 0) {
6139 		mg->mg_ms_disabled--;
6140 		cv_broadcast(&mg->mg_ms_disabled_cv);
6141 		if (unload)
6142 			metaslab_unload(msp);
6143 	}
6144 	mutex_exit(&msp->ms_lock);
6145 	mutex_exit(&mg->mg_ms_disabled_lock);
6146 }
6147 
6148 void
6149 metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
6150 {
6151 	ms->ms_unflushed_dirty = dirty;
6152 }
6153 
6154 static void
6155 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
6156 {
6157 	vdev_t *vd = ms->ms_group->mg_vd;
6158 	spa_t *spa = vd->vdev_spa;
6159 	objset_t *mos = spa_meta_objset(spa);
6160 
6161 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
6162 
6163 	metaslab_unflushed_phys_t entry = {
6164 		.msp_unflushed_txg = metaslab_unflushed_txg(ms),
6165 	};
6166 	uint64_t entry_size = sizeof (entry);
6167 	uint64_t entry_offset = ms->ms_id * entry_size;
6168 
6169 	uint64_t object = 0;
6170 	int err = zap_lookup(mos, vd->vdev_top_zap,
6171 	    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6172 	    &object);
6173 	if (err == ENOENT) {
6174 		object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
6175 		    SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
6176 		VERIFY0(zap_add(mos, vd->vdev_top_zap,
6177 		    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6178 		    &object, tx));
6179 	} else {
6180 		VERIFY0(err);
6181 	}
6182 
6183 	dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
6184 	    &entry, tx);
6185 }
6186 
6187 void
6188 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
6189 {
6190 	ms->ms_unflushed_txg = txg;
6191 	metaslab_update_ondisk_flush_data(ms, tx);
6192 }
6193 
6194 boolean_t
6195 metaslab_unflushed_dirty(metaslab_t *ms)
6196 {
6197 	return (ms->ms_unflushed_dirty);
6198 }
6199 
6200 uint64_t
6201 metaslab_unflushed_txg(metaslab_t *ms)
6202 {
6203 	return (ms->ms_unflushed_txg);
6204 }
6205 
6206 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW,
6207 	"Allocation granularity (a.k.a. stripe size)");
6208 
6209 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
6210 	"Load all metaslabs when pool is first opened");
6211 
6212 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
6213 	"Prevent metaslabs from being unloaded");
6214 
6215 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
6216 	"Preload potential metaslabs during reassessment");
6217 
6218 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
6219 	"Delay in txgs after metaslab was last used before unloading");
6220 
6221 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
6222 	"Delay in milliseconds after metaslab was last used before unloading");
6223 
6224 /* BEGIN CSTYLED */
6225 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
6226 	"Percentage of metaslab group size that should be free to make it "
6227 	"eligible for allocation");
6228 
6229 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
6230 	"Percentage of metaslab group size that should be considered eligible "
6231 	"for allocations unless all metaslab groups within the metaslab class "
6232 	"have also crossed this threshold");
6233 
6234 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
6235 	ZMOD_RW,
6236 	"Use the fragmentation metric to prefer less fragmented metaslabs");
6237 /* END CSTYLED */
6238 
6239 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
6240 	ZMOD_RW, "Fragmentation for metaslab to allow allocation");
6241 
6242 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
6243 	"Prefer metaslabs with lower LBAs");
6244 
6245 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
6246 	"Enable metaslab group biasing");
6247 
6248 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
6249 	ZMOD_RW, "Enable segment-based metaslab selection");
6250 
6251 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
6252 	"Segment-based metaslab selection maximum buckets before switching");
6253 
6254 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW,
6255 	"Blocks larger than this size are sometimes forced to be gang blocks");
6256 
6257 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW,
6258 	"Percentage of large blocks that will be forced to be gang blocks");
6259 
6260 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
6261 	"Max distance (bytes) to search forward before using size tree");
6262 
6263 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
6264 	"When looking in size tree, use largest segment instead of exact fit");
6265 
6266 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64,
6267 	ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
6268 
6269 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
6270 	"Percentage of memory that can be used to store metaslab range trees");
6271 
6272 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
6273 	ZMOD_RW, "Try hard to allocate before ganging");
6274 
6275 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
6276 	"Normally only consider this many of the best metaslabs in each vdev");
6277 
6278 /* BEGIN CSTYLED */
6279 ZFS_MODULE_PARAM_CALL(zfs, zfs_, active_allocator,
6280 	param_set_active_allocator, param_get_charp, ZMOD_RW,
6281 	"SPA active allocator");
6282 /* END CSTYLED */
6283