1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  */
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/zio.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 
38 SYSCTL_DECL(_vfs_zfs);
39 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
40 
41 #define	GANG_ALLOCATION(flags) \
42 	((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
43 
44 uint64_t metaslab_aliquot = 512ULL << 10;
45 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1;	/* force gang blocks */
46 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
47     &metaslab_gang_bang, 0,
48     "Force gang block allocation for blocks larger than or equal to this value");
49 
50 /*
51  * The in-core space map representation is more compact than its on-disk form.
52  * The zfs_condense_pct determines how much more compact the in-core
53  * space map representation must be before we compact it on-disk.
54  * Values should be greater than or equal to 100.
55  */
56 int zfs_condense_pct = 200;
57 SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
58     &zfs_condense_pct, 0,
59     "Condense on-disk spacemap when it is more than this many percents"
60     " of in-memory counterpart");
61 
62 /*
63  * Condensing a metaslab is not guaranteed to actually reduce the amount of
64  * space used on disk. In particular, a space map uses data in increments of
65  * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
66  * same number of blocks after condensing. Since the goal of condensing is to
67  * reduce the number of IOPs required to read the space map, we only want to
68  * condense when we can be sure we will reduce the number of blocks used by the
69  * space map. Unfortunately, we cannot precisely compute whether or not this is
70  * the case in metaslab_should_condense since we are holding ms_lock. Instead,
71  * we apply the following heuristic: do not condense a spacemap unless the
72  * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
73  * blocks.
74  */
75 int zfs_metaslab_condense_block_threshold = 4;
76 
77 /*
78  * The zfs_mg_noalloc_threshold defines which metaslab groups should
79  * be eligible for allocation. The value is defined as a percentage of
80  * free space. Metaslab groups that have more free space than
81  * zfs_mg_noalloc_threshold are always eligible for allocations. Once
82  * a metaslab group's free space is less than or equal to the
83  * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
84  * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
85  * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
86  * groups are allowed to accept allocations. Gang blocks are always
87  * eligible to allocate on any metaslab group. The default value of 0 means
88  * no metaslab group will be excluded based on this criterion.
89  */
90 int zfs_mg_noalloc_threshold = 0;
91 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
92     &zfs_mg_noalloc_threshold, 0,
93     "Percentage of metaslab group size that should be free"
94     " to make it eligible for allocation");
95 
96 /*
97  * Metaslab groups are considered eligible for allocations if their
98  * fragmenation metric (measured as a percentage) is less than or equal to
99  * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
100  * then it will be skipped unless all metaslab groups within the metaslab
101  * class have also crossed this threshold.
102  */
103 int zfs_mg_fragmentation_threshold = 85;
104 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_fragmentation_threshold, CTLFLAG_RWTUN,
105     &zfs_mg_fragmentation_threshold, 0,
106     "Percentage of metaslab group size that should be considered "
107     "eligible for allocations unless all metaslab groups within the metaslab class "
108     "have also crossed this threshold");
109 
110 /*
111  * Allow metaslabs to keep their active state as long as their fragmentation
112  * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
113  * active metaslab that exceeds this threshold will no longer keep its active
114  * status allowing better metaslabs to be selected.
115  */
116 int zfs_metaslab_fragmentation_threshold = 70;
117 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_threshold, CTLFLAG_RWTUN,
118     &zfs_metaslab_fragmentation_threshold, 0,
119     "Maximum percentage of metaslab fragmentation level to keep their active state");
120 
121 /*
122  * When set will load all metaslabs when pool is first opened.
123  */
124 int metaslab_debug_load = 0;
125 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
126     &metaslab_debug_load, 0,
127     "Load all metaslabs when pool is first opened");
128 
129 /*
130  * When set will prevent metaslabs from being unloaded.
131  */
132 int metaslab_debug_unload = 0;
133 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
134     &metaslab_debug_unload, 0,
135     "Prevent metaslabs from being unloaded");
136 
137 /*
138  * Minimum size which forces the dynamic allocator to change
139  * it's allocation strategy.  Once the space map cannot satisfy
140  * an allocation of this size then it switches to using more
141  * aggressive strategy (i.e search by size rather than offset).
142  */
143 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
144 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
145     &metaslab_df_alloc_threshold, 0,
146     "Minimum size which forces the dynamic allocator to change it's allocation strategy");
147 
148 /*
149  * The minimum free space, in percent, which must be available
150  * in a space map to continue allocations in a first-fit fashion.
151  * Once the space map's free space drops below this level we dynamically
152  * switch to using best-fit allocations.
153  */
154 int metaslab_df_free_pct = 4;
155 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
156     &metaslab_df_free_pct, 0,
157     "The minimum free space, in percent, which must be available in a "
158     "space map to continue allocations in a first-fit fashion");
159 
160 /*
161  * A metaslab is considered "free" if it contains a contiguous
162  * segment which is greater than metaslab_min_alloc_size.
163  */
164 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
165 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
166     &metaslab_min_alloc_size, 0,
167     "A metaslab is considered \"free\" if it contains a contiguous "
168     "segment which is greater than vfs.zfs.metaslab.min_alloc_size");
169 
170 /*
171  * Percentage of all cpus that can be used by the metaslab taskq.
172  */
173 int metaslab_load_pct = 50;
174 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
175     &metaslab_load_pct, 0,
176     "Percentage of cpus that can be used by the metaslab taskq");
177 
178 /*
179  * Determines how many txgs a metaslab may remain loaded without having any
180  * allocations from it. As long as a metaslab continues to be used we will
181  * keep it loaded.
182  */
183 int metaslab_unload_delay = TXG_SIZE * 2;
184 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
185     &metaslab_unload_delay, 0,
186     "Number of TXGs that an unused metaslab can be kept in memory");
187 
188 /*
189  * Max number of metaslabs per group to preload.
190  */
191 int metaslab_preload_limit = SPA_DVAS_PER_BP;
192 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
193     &metaslab_preload_limit, 0,
194     "Max number of metaslabs per group to preload");
195 
196 /*
197  * Enable/disable preloading of metaslab.
198  */
199 boolean_t metaslab_preload_enabled = B_TRUE;
200 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
201     &metaslab_preload_enabled, 0,
202     "Max number of metaslabs per group to preload");
203 
204 /*
205  * Enable/disable fragmentation weighting on metaslabs.
206  */
207 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
208 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_factor_enabled, CTLFLAG_RWTUN,
209     &metaslab_fragmentation_factor_enabled, 0,
210     "Enable fragmentation weighting on metaslabs");
211 
212 /*
213  * Enable/disable lba weighting (i.e. outer tracks are given preference).
214  */
215 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
216 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, lba_weighting_enabled, CTLFLAG_RWTUN,
217     &metaslab_lba_weighting_enabled, 0,
218     "Enable LBA weighting (i.e. outer tracks are given preference)");
219 
220 /*
221  * Enable/disable metaslab group biasing.
222  */
223 boolean_t metaslab_bias_enabled = B_TRUE;
224 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, bias_enabled, CTLFLAG_RWTUN,
225     &metaslab_bias_enabled, 0,
226     "Enable metaslab group biasing");
227 
228 /*
229  * Enable/disable segment-based metaslab selection.
230  */
231 boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE;
232 
233 /*
234  * When using segment-based metaslab selection, we will continue
235  * allocating from the active metaslab until we have exhausted
236  * zfs_metaslab_switch_threshold of its buckets.
237  */
238 int zfs_metaslab_switch_threshold = 2;
239 
240 /*
241  * Internal switch to enable/disable the metaslab allocation tracing
242  * facility.
243  */
244 boolean_t metaslab_trace_enabled = B_TRUE;
245 
246 /*
247  * Maximum entries that the metaslab allocation tracing facility will keep
248  * in a given list when running in non-debug mode. We limit the number
249  * of entries in non-debug mode to prevent us from using up too much memory.
250  * The limit should be sufficiently large that we don't expect any allocation
251  * to every exceed this value. In debug mode, the system will panic if this
252  * limit is ever reached allowing for further investigation.
253  */
254 uint64_t metaslab_trace_max_entries = 5000;
255 
256 static uint64_t metaslab_weight(metaslab_t *);
257 static void metaslab_set_fragmentation(metaslab_t *);
258 
259 kmem_cache_t *metaslab_alloc_trace_cache;
260 
261 /*
262  * ==========================================================================
263  * Metaslab classes
264  * ==========================================================================
265  */
266 metaslab_class_t *
metaslab_class_create(spa_t * spa,metaslab_ops_t * ops)267 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
268 {
269 	metaslab_class_t *mc;
270 
271 	mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
272 
273 	mc->mc_spa = spa;
274 	mc->mc_rotor = NULL;
275 	mc->mc_ops = ops;
276 	mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
277 	refcount_create_tracked(&mc->mc_alloc_slots);
278 
279 	return (mc);
280 }
281 
282 void
metaslab_class_destroy(metaslab_class_t * mc)283 metaslab_class_destroy(metaslab_class_t *mc)
284 {
285 	ASSERT(mc->mc_rotor == NULL);
286 	ASSERT(mc->mc_alloc == 0);
287 	ASSERT(mc->mc_deferred == 0);
288 	ASSERT(mc->mc_space == 0);
289 	ASSERT(mc->mc_dspace == 0);
290 
291 	refcount_destroy(&mc->mc_alloc_slots);
292 	mutex_destroy(&mc->mc_lock);
293 	kmem_free(mc, sizeof (metaslab_class_t));
294 }
295 
296 int
metaslab_class_validate(metaslab_class_t * mc)297 metaslab_class_validate(metaslab_class_t *mc)
298 {
299 	metaslab_group_t *mg;
300 	vdev_t *vd;
301 
302 	/*
303 	 * Must hold one of the spa_config locks.
304 	 */
305 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
306 	    spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
307 
308 	if ((mg = mc->mc_rotor) == NULL)
309 		return (0);
310 
311 	do {
312 		vd = mg->mg_vd;
313 		ASSERT(vd->vdev_mg != NULL);
314 		ASSERT3P(vd->vdev_top, ==, vd);
315 		ASSERT3P(mg->mg_class, ==, mc);
316 		ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
317 	} while ((mg = mg->mg_next) != mc->mc_rotor);
318 
319 	return (0);
320 }
321 
322 void
metaslab_class_space_update(metaslab_class_t * mc,int64_t alloc_delta,int64_t defer_delta,int64_t space_delta,int64_t dspace_delta)323 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
324     int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
325 {
326 	atomic_add_64(&mc->mc_alloc, alloc_delta);
327 	atomic_add_64(&mc->mc_deferred, defer_delta);
328 	atomic_add_64(&mc->mc_space, space_delta);
329 	atomic_add_64(&mc->mc_dspace, dspace_delta);
330 }
331 
332 void
metaslab_class_minblocksize_update(metaslab_class_t * mc)333 metaslab_class_minblocksize_update(metaslab_class_t *mc)
334 {
335 	metaslab_group_t *mg;
336 	vdev_t *vd;
337 	uint64_t minashift = UINT64_MAX;
338 
339 	if ((mg = mc->mc_rotor) == NULL) {
340 		mc->mc_minblocksize = SPA_MINBLOCKSIZE;
341 		return;
342 	}
343 
344 	do {
345 		vd = mg->mg_vd;
346 		if (vd->vdev_ashift < minashift)
347 			minashift = vd->vdev_ashift;
348 	} while ((mg = mg->mg_next) != mc->mc_rotor);
349 
350 	mc->mc_minblocksize = 1ULL << minashift;
351 }
352 
353 uint64_t
metaslab_class_get_alloc(metaslab_class_t * mc)354 metaslab_class_get_alloc(metaslab_class_t *mc)
355 {
356 	return (mc->mc_alloc);
357 }
358 
359 uint64_t
metaslab_class_get_deferred(metaslab_class_t * mc)360 metaslab_class_get_deferred(metaslab_class_t *mc)
361 {
362 	return (mc->mc_deferred);
363 }
364 
365 uint64_t
metaslab_class_get_space(metaslab_class_t * mc)366 metaslab_class_get_space(metaslab_class_t *mc)
367 {
368 	return (mc->mc_space);
369 }
370 
371 uint64_t
metaslab_class_get_dspace(metaslab_class_t * mc)372 metaslab_class_get_dspace(metaslab_class_t *mc)
373 {
374 	return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
375 }
376 
377 uint64_t
metaslab_class_get_minblocksize(metaslab_class_t * mc)378 metaslab_class_get_minblocksize(metaslab_class_t *mc)
379 {
380 	return (mc->mc_minblocksize);
381 }
382 
383 void
metaslab_class_histogram_verify(metaslab_class_t * mc)384 metaslab_class_histogram_verify(metaslab_class_t *mc)
385 {
386 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
387 	uint64_t *mc_hist;
388 	int i;
389 
390 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
391 		return;
392 
393 	mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
394 	    KM_SLEEP);
395 
396 	for (int c = 0; c < rvd->vdev_children; c++) {
397 		vdev_t *tvd = rvd->vdev_child[c];
398 		metaslab_group_t *mg = tvd->vdev_mg;
399 
400 		/*
401 		 * Skip any holes, uninitialized top-levels, or
402 		 * vdevs that are not in this metalab class.
403 		 */
404 		if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
405 		    mg->mg_class != mc) {
406 			continue;
407 		}
408 
409 		for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
410 			mc_hist[i] += mg->mg_histogram[i];
411 	}
412 
413 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
414 		VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
415 
416 	kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
417 }
418 
419 /*
420  * Calculate the metaslab class's fragmentation metric. The metric
421  * is weighted based on the space contribution of each metaslab group.
422  * The return value will be a number between 0 and 100 (inclusive), or
423  * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
424  * zfs_frag_table for more information about the metric.
425  */
426 uint64_t
metaslab_class_fragmentation(metaslab_class_t * mc)427 metaslab_class_fragmentation(metaslab_class_t *mc)
428 {
429 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
430 	uint64_t fragmentation = 0;
431 
432 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
433 
434 	for (int c = 0; c < rvd->vdev_children; c++) {
435 		vdev_t *tvd = rvd->vdev_child[c];
436 		metaslab_group_t *mg = tvd->vdev_mg;
437 
438 		/*
439 		 * Skip any holes, uninitialized top-levels, or
440 		 * vdevs that are not in this metalab class.
441 		 */
442 		if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
443 		    mg->mg_class != mc) {
444 			continue;
445 		}
446 
447 		/*
448 		 * If a metaslab group does not contain a fragmentation
449 		 * metric then just bail out.
450 		 */
451 		if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
452 			spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
453 			return (ZFS_FRAG_INVALID);
454 		}
455 
456 		/*
457 		 * Determine how much this metaslab_group is contributing
458 		 * to the overall pool fragmentation metric.
459 		 */
460 		fragmentation += mg->mg_fragmentation *
461 		    metaslab_group_get_space(mg);
462 	}
463 	fragmentation /= metaslab_class_get_space(mc);
464 
465 	ASSERT3U(fragmentation, <=, 100);
466 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
467 	return (fragmentation);
468 }
469 
470 /*
471  * Calculate the amount of expandable space that is available in
472  * this metaslab class. If a device is expanded then its expandable
473  * space will be the amount of allocatable space that is currently not
474  * part of this metaslab class.
475  */
476 uint64_t
metaslab_class_expandable_space(metaslab_class_t * mc)477 metaslab_class_expandable_space(metaslab_class_t *mc)
478 {
479 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
480 	uint64_t space = 0;
481 
482 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
483 	for (int c = 0; c < rvd->vdev_children; c++) {
484 		vdev_t *tvd = rvd->vdev_child[c];
485 		metaslab_group_t *mg = tvd->vdev_mg;
486 
487 		if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
488 		    mg->mg_class != mc) {
489 			continue;
490 		}
491 
492 		/*
493 		 * Calculate if we have enough space to add additional
494 		 * metaslabs. We report the expandable space in terms
495 		 * of the metaslab size since that's the unit of expansion.
496 		 */
497 		space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
498 		    1ULL << tvd->vdev_ms_shift);
499 	}
500 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
501 	return (space);
502 }
503 
504 static int
metaslab_compare(const void * x1,const void * x2)505 metaslab_compare(const void *x1, const void *x2)
506 {
507 	const metaslab_t *m1 = x1;
508 	const metaslab_t *m2 = x2;
509 
510 	if (m1->ms_weight < m2->ms_weight)
511 		return (1);
512 	if (m1->ms_weight > m2->ms_weight)
513 		return (-1);
514 
515 	/*
516 	 * If the weights are identical, use the offset to force uniqueness.
517 	 */
518 	if (m1->ms_start < m2->ms_start)
519 		return (-1);
520 	if (m1->ms_start > m2->ms_start)
521 		return (1);
522 
523 	ASSERT3P(m1, ==, m2);
524 
525 	return (0);
526 }
527 
528 /*
529  * Verify that the space accounting on disk matches the in-core range_trees.
530  */
531 void
metaslab_verify_space(metaslab_t * msp,uint64_t txg)532 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
533 {
534 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
535 	uint64_t allocated = 0;
536 	uint64_t freed = 0;
537 	uint64_t sm_free_space, msp_free_space;
538 
539 	ASSERT(MUTEX_HELD(&msp->ms_lock));
540 
541 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
542 		return;
543 
544 	/*
545 	 * We can only verify the metaslab space when we're called
546 	 * from syncing context with a loaded metaslab that has an allocated
547 	 * space map. Calling this in non-syncing context does not
548 	 * provide a consistent view of the metaslab since we're performing
549 	 * allocations in the future.
550 	 */
551 	if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
552 	    !msp->ms_loaded)
553 		return;
554 
555 	sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
556 	    space_map_alloc_delta(msp->ms_sm);
557 
558 	/*
559 	 * Account for future allocations since we would have already
560 	 * deducted that space from the ms_freetree.
561 	 */
562 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
563 		allocated +=
564 		    range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
565 	}
566 	freed = range_tree_space(msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]);
567 
568 	msp_free_space = range_tree_space(msp->ms_tree) + allocated +
569 	    msp->ms_deferspace + freed;
570 
571 	VERIFY3U(sm_free_space, ==, msp_free_space);
572 }
573 
574 /*
575  * ==========================================================================
576  * Metaslab groups
577  * ==========================================================================
578  */
579 /*
580  * Update the allocatable flag and the metaslab group's capacity.
581  * The allocatable flag is set to true if the capacity is below
582  * the zfs_mg_noalloc_threshold or has a fragmentation value that is
583  * greater than zfs_mg_fragmentation_threshold. If a metaslab group
584  * transitions from allocatable to non-allocatable or vice versa then the
585  * metaslab group's class is updated to reflect the transition.
586  */
587 static void
metaslab_group_alloc_update(metaslab_group_t * mg)588 metaslab_group_alloc_update(metaslab_group_t *mg)
589 {
590 	vdev_t *vd = mg->mg_vd;
591 	metaslab_class_t *mc = mg->mg_class;
592 	vdev_stat_t *vs = &vd->vdev_stat;
593 	boolean_t was_allocatable;
594 	boolean_t was_initialized;
595 
596 	ASSERT(vd == vd->vdev_top);
597 
598 	mutex_enter(&mg->mg_lock);
599 	was_allocatable = mg->mg_allocatable;
600 	was_initialized = mg->mg_initialized;
601 
602 	mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
603 	    (vs->vs_space + 1);
604 
605 	mutex_enter(&mc->mc_lock);
606 
607 	/*
608 	 * If the metaslab group was just added then it won't
609 	 * have any space until we finish syncing out this txg.
610 	 * At that point we will consider it initialized and available
611 	 * for allocations.  We also don't consider non-activated
612 	 * metaslab groups (e.g. vdevs that are in the middle of being removed)
613 	 * to be initialized, because they can't be used for allocation.
614 	 */
615 	mg->mg_initialized = metaslab_group_initialized(mg);
616 	if (!was_initialized && mg->mg_initialized) {
617 		mc->mc_groups++;
618 	} else if (was_initialized && !mg->mg_initialized) {
619 		ASSERT3U(mc->mc_groups, >, 0);
620 		mc->mc_groups--;
621 	}
622 	if (mg->mg_initialized)
623 		mg->mg_no_free_space = B_FALSE;
624 
625 	/*
626 	 * A metaslab group is considered allocatable if it has plenty
627 	 * of free space or is not heavily fragmented. We only take
628 	 * fragmentation into account if the metaslab group has a valid
629 	 * fragmentation metric (i.e. a value between 0 and 100).
630 	 */
631 	mg->mg_allocatable = (mg->mg_activation_count > 0 &&
632 	    mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
633 	    (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
634 	    mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
635 
636 	/*
637 	 * The mc_alloc_groups maintains a count of the number of
638 	 * groups in this metaslab class that are still above the
639 	 * zfs_mg_noalloc_threshold. This is used by the allocating
640 	 * threads to determine if they should avoid allocations to
641 	 * a given group. The allocator will avoid allocations to a group
642 	 * if that group has reached or is below the zfs_mg_noalloc_threshold
643 	 * and there are still other groups that are above the threshold.
644 	 * When a group transitions from allocatable to non-allocatable or
645 	 * vice versa we update the metaslab class to reflect that change.
646 	 * When the mc_alloc_groups value drops to 0 that means that all
647 	 * groups have reached the zfs_mg_noalloc_threshold making all groups
648 	 * eligible for allocations. This effectively means that all devices
649 	 * are balanced again.
650 	 */
651 	if (was_allocatable && !mg->mg_allocatable)
652 		mc->mc_alloc_groups--;
653 	else if (!was_allocatable && mg->mg_allocatable)
654 		mc->mc_alloc_groups++;
655 	mutex_exit(&mc->mc_lock);
656 
657 	mutex_exit(&mg->mg_lock);
658 }
659 
660 metaslab_group_t *
metaslab_group_create(metaslab_class_t * mc,vdev_t * vd)661 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
662 {
663 	metaslab_group_t *mg;
664 
665 	mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
666 	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
667 	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
668 	    sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
669 	mg->mg_vd = vd;
670 	mg->mg_class = mc;
671 	mg->mg_activation_count = 0;
672 	mg->mg_initialized = B_FALSE;
673 	mg->mg_no_free_space = B_TRUE;
674 	refcount_create_tracked(&mg->mg_alloc_queue_depth);
675 
676 	mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
677 	    minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
678 
679 	return (mg);
680 }
681 
682 void
metaslab_group_destroy(metaslab_group_t * mg)683 metaslab_group_destroy(metaslab_group_t *mg)
684 {
685 	ASSERT(mg->mg_prev == NULL);
686 	ASSERT(mg->mg_next == NULL);
687 	/*
688 	 * We may have gone below zero with the activation count
689 	 * either because we never activated in the first place or
690 	 * because we're done, and possibly removing the vdev.
691 	 */
692 	ASSERT(mg->mg_activation_count <= 0);
693 
694 	taskq_destroy(mg->mg_taskq);
695 	avl_destroy(&mg->mg_metaslab_tree);
696 	mutex_destroy(&mg->mg_lock);
697 	refcount_destroy(&mg->mg_alloc_queue_depth);
698 	kmem_free(mg, sizeof (metaslab_group_t));
699 }
700 
701 void
metaslab_group_activate(metaslab_group_t * mg)702 metaslab_group_activate(metaslab_group_t *mg)
703 {
704 	metaslab_class_t *mc = mg->mg_class;
705 	metaslab_group_t *mgprev, *mgnext;
706 
707 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
708 
709 	ASSERT(mc->mc_rotor != mg);
710 	ASSERT(mg->mg_prev == NULL);
711 	ASSERT(mg->mg_next == NULL);
712 	ASSERT(mg->mg_activation_count <= 0);
713 
714 	if (++mg->mg_activation_count <= 0)
715 		return;
716 
717 	mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
718 	metaslab_group_alloc_update(mg);
719 
720 	if ((mgprev = mc->mc_rotor) == NULL) {
721 		mg->mg_prev = mg;
722 		mg->mg_next = mg;
723 	} else {
724 		mgnext = mgprev->mg_next;
725 		mg->mg_prev = mgprev;
726 		mg->mg_next = mgnext;
727 		mgprev->mg_next = mg;
728 		mgnext->mg_prev = mg;
729 	}
730 	mc->mc_rotor = mg;
731 	metaslab_class_minblocksize_update(mc);
732 }
733 
734 void
metaslab_group_passivate(metaslab_group_t * mg)735 metaslab_group_passivate(metaslab_group_t *mg)
736 {
737 	metaslab_class_t *mc = mg->mg_class;
738 	metaslab_group_t *mgprev, *mgnext;
739 
740 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
741 
742 	if (--mg->mg_activation_count != 0) {
743 		ASSERT(mc->mc_rotor != mg);
744 		ASSERT(mg->mg_prev == NULL);
745 		ASSERT(mg->mg_next == NULL);
746 		ASSERT(mg->mg_activation_count < 0);
747 		return;
748 	}
749 
750 	taskq_wait(mg->mg_taskq);
751 	metaslab_group_alloc_update(mg);
752 
753 	mgprev = mg->mg_prev;
754 	mgnext = mg->mg_next;
755 
756 	if (mg == mgnext) {
757 		mc->mc_rotor = NULL;
758 	} else {
759 		mc->mc_rotor = mgnext;
760 		mgprev->mg_next = mgnext;
761 		mgnext->mg_prev = mgprev;
762 	}
763 
764 	mg->mg_prev = NULL;
765 	mg->mg_next = NULL;
766 	metaslab_class_minblocksize_update(mc);
767 }
768 
769 boolean_t
metaslab_group_initialized(metaslab_group_t * mg)770 metaslab_group_initialized(metaslab_group_t *mg)
771 {
772 	vdev_t *vd = mg->mg_vd;
773 	vdev_stat_t *vs = &vd->vdev_stat;
774 
775 	return (vs->vs_space != 0 && mg->mg_activation_count > 0);
776 }
777 
778 uint64_t
metaslab_group_get_space(metaslab_group_t * mg)779 metaslab_group_get_space(metaslab_group_t *mg)
780 {
781 	return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
782 }
783 
784 void
metaslab_group_histogram_verify(metaslab_group_t * mg)785 metaslab_group_histogram_verify(metaslab_group_t *mg)
786 {
787 	uint64_t *mg_hist;
788 	vdev_t *vd = mg->mg_vd;
789 	uint64_t ashift = vd->vdev_ashift;
790 	int i;
791 
792 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
793 		return;
794 
795 	mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
796 	    KM_SLEEP);
797 
798 	ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
799 	    SPACE_MAP_HISTOGRAM_SIZE + ashift);
800 
801 	for (int m = 0; m < vd->vdev_ms_count; m++) {
802 		metaslab_t *msp = vd->vdev_ms[m];
803 
804 		if (msp->ms_sm == NULL)
805 			continue;
806 
807 		for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
808 			mg_hist[i + ashift] +=
809 			    msp->ms_sm->sm_phys->smp_histogram[i];
810 	}
811 
812 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
813 		VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
814 
815 	kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
816 }
817 
818 static void
metaslab_group_histogram_add(metaslab_group_t * mg,metaslab_t * msp)819 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
820 {
821 	metaslab_class_t *mc = mg->mg_class;
822 	uint64_t ashift = mg->mg_vd->vdev_ashift;
823 
824 	ASSERT(MUTEX_HELD(&msp->ms_lock));
825 	if (msp->ms_sm == NULL)
826 		return;
827 
828 	mutex_enter(&mg->mg_lock);
829 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
830 		mg->mg_histogram[i + ashift] +=
831 		    msp->ms_sm->sm_phys->smp_histogram[i];
832 		mc->mc_histogram[i + ashift] +=
833 		    msp->ms_sm->sm_phys->smp_histogram[i];
834 	}
835 	mutex_exit(&mg->mg_lock);
836 }
837 
838 void
metaslab_group_histogram_remove(metaslab_group_t * mg,metaslab_t * msp)839 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
840 {
841 	metaslab_class_t *mc = mg->mg_class;
842 	uint64_t ashift = mg->mg_vd->vdev_ashift;
843 
844 	ASSERT(MUTEX_HELD(&msp->ms_lock));
845 	if (msp->ms_sm == NULL)
846 		return;
847 
848 	mutex_enter(&mg->mg_lock);
849 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
850 		ASSERT3U(mg->mg_histogram[i + ashift], >=,
851 		    msp->ms_sm->sm_phys->smp_histogram[i]);
852 		ASSERT3U(mc->mc_histogram[i + ashift], >=,
853 		    msp->ms_sm->sm_phys->smp_histogram[i]);
854 
855 		mg->mg_histogram[i + ashift] -=
856 		    msp->ms_sm->sm_phys->smp_histogram[i];
857 		mc->mc_histogram[i + ashift] -=
858 		    msp->ms_sm->sm_phys->smp_histogram[i];
859 	}
860 	mutex_exit(&mg->mg_lock);
861 }
862 
863 static void
metaslab_group_add(metaslab_group_t * mg,metaslab_t * msp)864 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
865 {
866 	ASSERT(msp->ms_group == NULL);
867 	mutex_enter(&mg->mg_lock);
868 	msp->ms_group = mg;
869 	msp->ms_weight = 0;
870 	avl_add(&mg->mg_metaslab_tree, msp);
871 	mutex_exit(&mg->mg_lock);
872 
873 	mutex_enter(&msp->ms_lock);
874 	metaslab_group_histogram_add(mg, msp);
875 	mutex_exit(&msp->ms_lock);
876 }
877 
878 static void
metaslab_group_remove(metaslab_group_t * mg,metaslab_t * msp)879 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
880 {
881 	mutex_enter(&msp->ms_lock);
882 	metaslab_group_histogram_remove(mg, msp);
883 	mutex_exit(&msp->ms_lock);
884 
885 	mutex_enter(&mg->mg_lock);
886 	ASSERT(msp->ms_group == mg);
887 	avl_remove(&mg->mg_metaslab_tree, msp);
888 	msp->ms_group = NULL;
889 	mutex_exit(&mg->mg_lock);
890 }
891 
892 static void
metaslab_group_sort(metaslab_group_t * mg,metaslab_t * msp,uint64_t weight)893 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
894 {
895 	/*
896 	 * Although in principle the weight can be any value, in
897 	 * practice we do not use values in the range [1, 511].
898 	 */
899 	ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
900 	ASSERT(MUTEX_HELD(&msp->ms_lock));
901 
902 	mutex_enter(&mg->mg_lock);
903 	ASSERT(msp->ms_group == mg);
904 	avl_remove(&mg->mg_metaslab_tree, msp);
905 	msp->ms_weight = weight;
906 	avl_add(&mg->mg_metaslab_tree, msp);
907 	mutex_exit(&mg->mg_lock);
908 }
909 
910 /*
911  * Calculate the fragmentation for a given metaslab group. We can use
912  * a simple average here since all metaslabs within the group must have
913  * the same size. The return value will be a value between 0 and 100
914  * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
915  * group have a fragmentation metric.
916  */
917 uint64_t
metaslab_group_fragmentation(metaslab_group_t * mg)918 metaslab_group_fragmentation(metaslab_group_t *mg)
919 {
920 	vdev_t *vd = mg->mg_vd;
921 	uint64_t fragmentation = 0;
922 	uint64_t valid_ms = 0;
923 
924 	for (int m = 0; m < vd->vdev_ms_count; m++) {
925 		metaslab_t *msp = vd->vdev_ms[m];
926 
927 		if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
928 			continue;
929 
930 		valid_ms++;
931 		fragmentation += msp->ms_fragmentation;
932 	}
933 
934 	if (valid_ms <= vd->vdev_ms_count / 2)
935 		return (ZFS_FRAG_INVALID);
936 
937 	fragmentation /= valid_ms;
938 	ASSERT3U(fragmentation, <=, 100);
939 	return (fragmentation);
940 }
941 
942 /*
943  * Determine if a given metaslab group should skip allocations. A metaslab
944  * group should avoid allocations if its free capacity is less than the
945  * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
946  * zfs_mg_fragmentation_threshold and there is at least one metaslab group
947  * that can still handle allocations. If the allocation throttle is enabled
948  * then we skip allocations to devices that have reached their maximum
949  * allocation queue depth unless the selected metaslab group is the only
950  * eligible group remaining.
951  */
952 static boolean_t
metaslab_group_allocatable(metaslab_group_t * mg,metaslab_group_t * rotor,uint64_t psize)953 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
954     uint64_t psize)
955 {
956 	spa_t *spa = mg->mg_vd->vdev_spa;
957 	metaslab_class_t *mc = mg->mg_class;
958 
959 	/*
960 	 * We can only consider skipping this metaslab group if it's
961 	 * in the normal metaslab class and there are other metaslab
962 	 * groups to select from. Otherwise, we always consider it eligible
963 	 * for allocations.
964 	 */
965 	if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
966 		return (B_TRUE);
967 
968 	/*
969 	 * If the metaslab group's mg_allocatable flag is set (see comments
970 	 * in metaslab_group_alloc_update() for more information) and
971 	 * the allocation throttle is disabled then allow allocations to this
972 	 * device. However, if the allocation throttle is enabled then
973 	 * check if we have reached our allocation limit (mg_alloc_queue_depth)
974 	 * to determine if we should allow allocations to this metaslab group.
975 	 * If all metaslab groups are no longer considered allocatable
976 	 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
977 	 * gang block size then we allow allocations on this metaslab group
978 	 * regardless of the mg_allocatable or throttle settings.
979 	 */
980 	if (mg->mg_allocatable) {
981 		metaslab_group_t *mgp;
982 		int64_t qdepth;
983 		uint64_t qmax = mg->mg_max_alloc_queue_depth;
984 
985 		if (!mc->mc_alloc_throttle_enabled)
986 			return (B_TRUE);
987 
988 		/*
989 		 * If this metaslab group does not have any free space, then
990 		 * there is no point in looking further.
991 		 */
992 		if (mg->mg_no_free_space)
993 			return (B_FALSE);
994 
995 		qdepth = refcount_count(&mg->mg_alloc_queue_depth);
996 
997 		/*
998 		 * If this metaslab group is below its qmax or it's
999 		 * the only allocatable metasable group, then attempt
1000 		 * to allocate from it.
1001 		 */
1002 		if (qdepth < qmax || mc->mc_alloc_groups == 1)
1003 			return (B_TRUE);
1004 		ASSERT3U(mc->mc_alloc_groups, >, 1);
1005 
1006 		/*
1007 		 * Since this metaslab group is at or over its qmax, we
1008 		 * need to determine if there are metaslab groups after this
1009 		 * one that might be able to handle this allocation. This is
1010 		 * racy since we can't hold the locks for all metaslab
1011 		 * groups at the same time when we make this check.
1012 		 */
1013 		for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
1014 			qmax = mgp->mg_max_alloc_queue_depth;
1015 
1016 			qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
1017 
1018 			/*
1019 			 * If there is another metaslab group that
1020 			 * might be able to handle the allocation, then
1021 			 * we return false so that we skip this group.
1022 			 */
1023 			if (qdepth < qmax && !mgp->mg_no_free_space)
1024 				return (B_FALSE);
1025 		}
1026 
1027 		/*
1028 		 * We didn't find another group to handle the allocation
1029 		 * so we can't skip this metaslab group even though
1030 		 * we are at or over our qmax.
1031 		 */
1032 		return (B_TRUE);
1033 
1034 	} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1035 		return (B_TRUE);
1036 	}
1037 	return (B_FALSE);
1038 }
1039 
1040 /*
1041  * ==========================================================================
1042  * Range tree callbacks
1043  * ==========================================================================
1044  */
1045 
1046 /*
1047  * Comparison function for the private size-ordered tree. Tree is sorted
1048  * by size, larger sizes at the end of the tree.
1049  */
1050 static int
metaslab_rangesize_compare(const void * x1,const void * x2)1051 metaslab_rangesize_compare(const void *x1, const void *x2)
1052 {
1053 	const range_seg_t *r1 = x1;
1054 	const range_seg_t *r2 = x2;
1055 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1056 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1057 
1058 	if (rs_size1 < rs_size2)
1059 		return (-1);
1060 	if (rs_size1 > rs_size2)
1061 		return (1);
1062 
1063 	if (r1->rs_start < r2->rs_start)
1064 		return (-1);
1065 
1066 	if (r1->rs_start > r2->rs_start)
1067 		return (1);
1068 
1069 	return (0);
1070 }
1071 
1072 /*
1073  * Create any block allocator specific components. The current allocators
1074  * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1075  */
1076 static void
metaslab_rt_create(range_tree_t * rt,void * arg)1077 metaslab_rt_create(range_tree_t *rt, void *arg)
1078 {
1079 	metaslab_t *msp = arg;
1080 
1081 	ASSERT3P(rt->rt_arg, ==, msp);
1082 	ASSERT(msp->ms_tree == NULL);
1083 
1084 	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1085 	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1086 }
1087 
1088 /*
1089  * Destroy the block allocator specific components.
1090  */
1091 static void
metaslab_rt_destroy(range_tree_t * rt,void * arg)1092 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1093 {
1094 	metaslab_t *msp = arg;
1095 
1096 	ASSERT3P(rt->rt_arg, ==, msp);
1097 	ASSERT3P(msp->ms_tree, ==, rt);
1098 	ASSERT0(avl_numnodes(&msp->ms_size_tree));
1099 
1100 	avl_destroy(&msp->ms_size_tree);
1101 }
1102 
1103 static void
metaslab_rt_add(range_tree_t * rt,range_seg_t * rs,void * arg)1104 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1105 {
1106 	metaslab_t *msp = arg;
1107 
1108 	ASSERT3P(rt->rt_arg, ==, msp);
1109 	ASSERT3P(msp->ms_tree, ==, rt);
1110 	VERIFY(!msp->ms_condensing);
1111 	avl_add(&msp->ms_size_tree, rs);
1112 }
1113 
1114 static void
metaslab_rt_remove(range_tree_t * rt,range_seg_t * rs,void * arg)1115 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1116 {
1117 	metaslab_t *msp = arg;
1118 
1119 	ASSERT3P(rt->rt_arg, ==, msp);
1120 	ASSERT3P(msp->ms_tree, ==, rt);
1121 	VERIFY(!msp->ms_condensing);
1122 	avl_remove(&msp->ms_size_tree, rs);
1123 }
1124 
1125 static void
metaslab_rt_vacate(range_tree_t * rt,void * arg)1126 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1127 {
1128 	metaslab_t *msp = arg;
1129 
1130 	ASSERT3P(rt->rt_arg, ==, msp);
1131 	ASSERT3P(msp->ms_tree, ==, rt);
1132 
1133 	/*
1134 	 * Normally one would walk the tree freeing nodes along the way.
1135 	 * Since the nodes are shared with the range trees we can avoid
1136 	 * walking all nodes and just reinitialize the avl tree. The nodes
1137 	 * will be freed by the range tree, so we don't want to free them here.
1138 	 */
1139 	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1140 	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1141 }
1142 
1143 static range_tree_ops_t metaslab_rt_ops = {
1144 	metaslab_rt_create,
1145 	metaslab_rt_destroy,
1146 	metaslab_rt_add,
1147 	metaslab_rt_remove,
1148 	metaslab_rt_vacate
1149 };
1150 
1151 /*
1152  * ==========================================================================
1153  * Common allocator routines
1154  * ==========================================================================
1155  */
1156 
1157 /*
1158  * Return the maximum contiguous segment within the metaslab.
1159  */
1160 uint64_t
metaslab_block_maxsize(metaslab_t * msp)1161 metaslab_block_maxsize(metaslab_t *msp)
1162 {
1163 	avl_tree_t *t = &msp->ms_size_tree;
1164 	range_seg_t *rs;
1165 
1166 	if (t == NULL || (rs = avl_last(t)) == NULL)
1167 		return (0ULL);
1168 
1169 	return (rs->rs_end - rs->rs_start);
1170 }
1171 
1172 static range_seg_t *
metaslab_block_find(avl_tree_t * t,uint64_t start,uint64_t size)1173 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1174 {
1175 	range_seg_t *rs, rsearch;
1176 	avl_index_t where;
1177 
1178 	rsearch.rs_start = start;
1179 	rsearch.rs_end = start + size;
1180 
1181 	rs = avl_find(t, &rsearch, &where);
1182 	if (rs == NULL) {
1183 		rs = avl_nearest(t, where, AVL_AFTER);
1184 	}
1185 
1186 	return (rs);
1187 }
1188 
1189 /*
1190  * This is a helper function that can be used by the allocator to find
1191  * a suitable block to allocate. This will search the specified AVL
1192  * tree looking for a block that matches the specified criteria.
1193  */
1194 static uint64_t
metaslab_block_picker(avl_tree_t * t,uint64_t * cursor,uint64_t size,uint64_t align)1195 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1196     uint64_t align)
1197 {
1198 	range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1199 
1200 	while (rs != NULL) {
1201 		uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1202 
1203 		if (offset + size <= rs->rs_end) {
1204 			*cursor = offset + size;
1205 			return (offset);
1206 		}
1207 		rs = AVL_NEXT(t, rs);
1208 	}
1209 
1210 	/*
1211 	 * If we know we've searched the whole map (*cursor == 0), give up.
1212 	 * Otherwise, reset the cursor to the beginning and try again.
1213 	 */
1214 	if (*cursor == 0)
1215 		return (-1ULL);
1216 
1217 	*cursor = 0;
1218 	return (metaslab_block_picker(t, cursor, size, align));
1219 }
1220 
1221 /*
1222  * ==========================================================================
1223  * The first-fit block allocator
1224  * ==========================================================================
1225  */
1226 static uint64_t
metaslab_ff_alloc(metaslab_t * msp,uint64_t size)1227 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1228 {
1229 	/*
1230 	 * Find the largest power of 2 block size that evenly divides the
1231 	 * requested size. This is used to try to allocate blocks with similar
1232 	 * alignment from the same area of the metaslab (i.e. same cursor
1233 	 * bucket) but it does not guarantee that other allocations sizes
1234 	 * may exist in the same region.
1235 	 */
1236 	uint64_t align = size & -size;
1237 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1238 	avl_tree_t *t = &msp->ms_tree->rt_root;
1239 
1240 	return (metaslab_block_picker(t, cursor, size, align));
1241 }
1242 
1243 static metaslab_ops_t metaslab_ff_ops = {
1244 	metaslab_ff_alloc
1245 };
1246 
1247 /*
1248  * ==========================================================================
1249  * Dynamic block allocator -
1250  * Uses the first fit allocation scheme until space get low and then
1251  * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1252  * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1253  * ==========================================================================
1254  */
1255 static uint64_t
metaslab_df_alloc(metaslab_t * msp,uint64_t size)1256 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1257 {
1258 	/*
1259 	 * Find the largest power of 2 block size that evenly divides the
1260 	 * requested size. This is used to try to allocate blocks with similar
1261 	 * alignment from the same area of the metaslab (i.e. same cursor
1262 	 * bucket) but it does not guarantee that other allocations sizes
1263 	 * may exist in the same region.
1264 	 */
1265 	uint64_t align = size & -size;
1266 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1267 	range_tree_t *rt = msp->ms_tree;
1268 	avl_tree_t *t = &rt->rt_root;
1269 	uint64_t max_size = metaslab_block_maxsize(msp);
1270 	int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1271 
1272 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1273 	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1274 
1275 	if (max_size < size)
1276 		return (-1ULL);
1277 
1278 	/*
1279 	 * If we're running low on space switch to using the size
1280 	 * sorted AVL tree (best-fit).
1281 	 */
1282 	if (max_size < metaslab_df_alloc_threshold ||
1283 	    free_pct < metaslab_df_free_pct) {
1284 		t = &msp->ms_size_tree;
1285 		*cursor = 0;
1286 	}
1287 
1288 	return (metaslab_block_picker(t, cursor, size, 1ULL));
1289 }
1290 
1291 static metaslab_ops_t metaslab_df_ops = {
1292 	metaslab_df_alloc
1293 };
1294 
1295 /*
1296  * ==========================================================================
1297  * Cursor fit block allocator -
1298  * Select the largest region in the metaslab, set the cursor to the beginning
1299  * of the range and the cursor_end to the end of the range. As allocations
1300  * are made advance the cursor. Continue allocating from the cursor until
1301  * the range is exhausted and then find a new range.
1302  * ==========================================================================
1303  */
1304 static uint64_t
metaslab_cf_alloc(metaslab_t * msp,uint64_t size)1305 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1306 {
1307 	range_tree_t *rt = msp->ms_tree;
1308 	avl_tree_t *t = &msp->ms_size_tree;
1309 	uint64_t *cursor = &msp->ms_lbas[0];
1310 	uint64_t *cursor_end = &msp->ms_lbas[1];
1311 	uint64_t offset = 0;
1312 
1313 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1314 	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1315 
1316 	ASSERT3U(*cursor_end, >=, *cursor);
1317 
1318 	if ((*cursor + size) > *cursor_end) {
1319 		range_seg_t *rs;
1320 
1321 		rs = avl_last(&msp->ms_size_tree);
1322 		if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1323 			return (-1ULL);
1324 
1325 		*cursor = rs->rs_start;
1326 		*cursor_end = rs->rs_end;
1327 	}
1328 
1329 	offset = *cursor;
1330 	*cursor += size;
1331 
1332 	return (offset);
1333 }
1334 
1335 static metaslab_ops_t metaslab_cf_ops = {
1336 	metaslab_cf_alloc
1337 };
1338 
1339 /*
1340  * ==========================================================================
1341  * New dynamic fit allocator -
1342  * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1343  * contiguous blocks. If no region is found then just use the largest segment
1344  * that remains.
1345  * ==========================================================================
1346  */
1347 
1348 /*
1349  * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1350  * to request from the allocator.
1351  */
1352 uint64_t metaslab_ndf_clump_shift = 4;
1353 
1354 static uint64_t
metaslab_ndf_alloc(metaslab_t * msp,uint64_t size)1355 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1356 {
1357 	avl_tree_t *t = &msp->ms_tree->rt_root;
1358 	avl_index_t where;
1359 	range_seg_t *rs, rsearch;
1360 	uint64_t hbit = highbit64(size);
1361 	uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1362 	uint64_t max_size = metaslab_block_maxsize(msp);
1363 
1364 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1365 	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1366 
1367 	if (max_size < size)
1368 		return (-1ULL);
1369 
1370 	rsearch.rs_start = *cursor;
1371 	rsearch.rs_end = *cursor + size;
1372 
1373 	rs = avl_find(t, &rsearch, &where);
1374 	if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1375 		t = &msp->ms_size_tree;
1376 
1377 		rsearch.rs_start = 0;
1378 		rsearch.rs_end = MIN(max_size,
1379 		    1ULL << (hbit + metaslab_ndf_clump_shift));
1380 		rs = avl_find(t, &rsearch, &where);
1381 		if (rs == NULL)
1382 			rs = avl_nearest(t, where, AVL_AFTER);
1383 		ASSERT(rs != NULL);
1384 	}
1385 
1386 	if ((rs->rs_end - rs->rs_start) >= size) {
1387 		*cursor = rs->rs_start + size;
1388 		return (rs->rs_start);
1389 	}
1390 	return (-1ULL);
1391 }
1392 
1393 static metaslab_ops_t metaslab_ndf_ops = {
1394 	metaslab_ndf_alloc
1395 };
1396 
1397 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1398 
1399 /*
1400  * ==========================================================================
1401  * Metaslabs
1402  * ==========================================================================
1403  */
1404 
1405 /*
1406  * Wait for any in-progress metaslab loads to complete.
1407  */
1408 void
metaslab_load_wait(metaslab_t * msp)1409 metaslab_load_wait(metaslab_t *msp)
1410 {
1411 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1412 
1413 	while (msp->ms_loading) {
1414 		ASSERT(!msp->ms_loaded);
1415 		cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1416 	}
1417 }
1418 
1419 int
metaslab_load(metaslab_t * msp)1420 metaslab_load(metaslab_t *msp)
1421 {
1422 	int error = 0;
1423 	boolean_t success = B_FALSE;
1424 
1425 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1426 	ASSERT(!msp->ms_loaded);
1427 	ASSERT(!msp->ms_loading);
1428 
1429 	msp->ms_loading = B_TRUE;
1430 
1431 	/*
1432 	 * If the space map has not been allocated yet, then treat
1433 	 * all the space in the metaslab as free and add it to the
1434 	 * ms_tree.
1435 	 */
1436 	if (msp->ms_sm != NULL)
1437 		error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1438 	else
1439 		range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1440 
1441 	success = (error == 0);
1442 	msp->ms_loading = B_FALSE;
1443 
1444 	if (success) {
1445 		ASSERT3P(msp->ms_group, !=, NULL);
1446 		msp->ms_loaded = B_TRUE;
1447 
1448 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1449 			range_tree_walk(msp->ms_defertree[t],
1450 			    range_tree_remove, msp->ms_tree);
1451 		}
1452 		msp->ms_max_size = metaslab_block_maxsize(msp);
1453 	}
1454 	cv_broadcast(&msp->ms_load_cv);
1455 	return (error);
1456 }
1457 
1458 void
metaslab_unload(metaslab_t * msp)1459 metaslab_unload(metaslab_t *msp)
1460 {
1461 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1462 	range_tree_vacate(msp->ms_tree, NULL, NULL);
1463 	msp->ms_loaded = B_FALSE;
1464 	msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1465 	msp->ms_max_size = 0;
1466 }
1467 
1468 int
metaslab_init(metaslab_group_t * mg,uint64_t id,uint64_t object,uint64_t txg,metaslab_t ** msp)1469 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1470     metaslab_t **msp)
1471 {
1472 	vdev_t *vd = mg->mg_vd;
1473 	objset_t *mos = vd->vdev_spa->spa_meta_objset;
1474 	metaslab_t *ms;
1475 	int error;
1476 
1477 	ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1478 	mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1479 	cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1480 	ms->ms_id = id;
1481 	ms->ms_start = id << vd->vdev_ms_shift;
1482 	ms->ms_size = 1ULL << vd->vdev_ms_shift;
1483 
1484 	/*
1485 	 * We only open space map objects that already exist. All others
1486 	 * will be opened when we finally allocate an object for it.
1487 	 */
1488 	if (object != 0) {
1489 		error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1490 		    ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1491 
1492 		if (error != 0) {
1493 			kmem_free(ms, sizeof (metaslab_t));
1494 			return (error);
1495 		}
1496 
1497 		ASSERT(ms->ms_sm != NULL);
1498 	}
1499 
1500 	/*
1501 	 * We create the main range tree here, but we don't create the
1502 	 * alloctree and freetree until metaslab_sync_done().  This serves
1503 	 * two purposes: it allows metaslab_sync_done() to detect the
1504 	 * addition of new space; and for debugging, it ensures that we'd
1505 	 * data fault on any attempt to use this metaslab before it's ready.
1506 	 */
1507 	ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1508 	metaslab_group_add(mg, ms);
1509 
1510 	metaslab_set_fragmentation(ms);
1511 
1512 	/*
1513 	 * If we're opening an existing pool (txg == 0) or creating
1514 	 * a new one (txg == TXG_INITIAL), all space is available now.
1515 	 * If we're adding space to an existing pool, the new space
1516 	 * does not become available until after this txg has synced.
1517 	 * The metaslab's weight will also be initialized when we sync
1518 	 * out this txg. This ensures that we don't attempt to allocate
1519 	 * from it before we have initialized it completely.
1520 	 */
1521 	if (txg <= TXG_INITIAL)
1522 		metaslab_sync_done(ms, 0);
1523 
1524 	/*
1525 	 * If metaslab_debug_load is set and we're initializing a metaslab
1526 	 * that has an allocated space map object then load the its space
1527 	 * map so that can verify frees.
1528 	 */
1529 	if (metaslab_debug_load && ms->ms_sm != NULL) {
1530 		mutex_enter(&ms->ms_lock);
1531 		VERIFY0(metaslab_load(ms));
1532 		mutex_exit(&ms->ms_lock);
1533 	}
1534 
1535 	if (txg != 0) {
1536 		vdev_dirty(vd, 0, NULL, txg);
1537 		vdev_dirty(vd, VDD_METASLAB, ms, txg);
1538 	}
1539 
1540 	*msp = ms;
1541 
1542 	return (0);
1543 }
1544 
1545 void
metaslab_fini(metaslab_t * msp)1546 metaslab_fini(metaslab_t *msp)
1547 {
1548 	metaslab_group_t *mg = msp->ms_group;
1549 
1550 	metaslab_group_remove(mg, msp);
1551 
1552 	mutex_enter(&msp->ms_lock);
1553 	VERIFY(msp->ms_group == NULL);
1554 	vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1555 	    0, -msp->ms_size);
1556 	space_map_close(msp->ms_sm);
1557 
1558 	metaslab_unload(msp);
1559 	range_tree_destroy(msp->ms_tree);
1560 
1561 	for (int t = 0; t < TXG_SIZE; t++) {
1562 		range_tree_destroy(msp->ms_alloctree[t]);
1563 		range_tree_destroy(msp->ms_freetree[t]);
1564 	}
1565 
1566 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1567 		range_tree_destroy(msp->ms_defertree[t]);
1568 	}
1569 
1570 	ASSERT0(msp->ms_deferspace);
1571 
1572 	mutex_exit(&msp->ms_lock);
1573 	cv_destroy(&msp->ms_load_cv);
1574 	mutex_destroy(&msp->ms_lock);
1575 
1576 	kmem_free(msp, sizeof (metaslab_t));
1577 }
1578 
1579 #define	FRAGMENTATION_TABLE_SIZE	17
1580 
1581 /*
1582  * This table defines a segment size based fragmentation metric that will
1583  * allow each metaslab to derive its own fragmentation value. This is done
1584  * by calculating the space in each bucket of the spacemap histogram and
1585  * multiplying that by the fragmetation metric in this table. Doing
1586  * this for all buckets and dividing it by the total amount of free
1587  * space in this metaslab (i.e. the total free space in all buckets) gives
1588  * us the fragmentation metric. This means that a high fragmentation metric
1589  * equates to most of the free space being comprised of small segments.
1590  * Conversely, if the metric is low, then most of the free space is in
1591  * large segments. A 10% change in fragmentation equates to approximately
1592  * double the number of segments.
1593  *
1594  * This table defines 0% fragmented space using 16MB segments. Testing has
1595  * shown that segments that are greater than or equal to 16MB do not suffer
1596  * from drastic performance problems. Using this value, we derive the rest
1597  * of the table. Since the fragmentation value is never stored on disk, it
1598  * is possible to change these calculations in the future.
1599  */
1600 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1601 	100,	/* 512B	*/
1602 	100,	/* 1K	*/
1603 	98,	/* 2K	*/
1604 	95,	/* 4K	*/
1605 	90,	/* 8K	*/
1606 	80,	/* 16K	*/
1607 	70,	/* 32K	*/
1608 	60,	/* 64K	*/
1609 	50,	/* 128K	*/
1610 	40,	/* 256K	*/
1611 	30,	/* 512K	*/
1612 	20,	/* 1M	*/
1613 	15,	/* 2M	*/
1614 	10,	/* 4M	*/
1615 	5,	/* 8M	*/
1616 	0	/* 16M	*/
1617 };
1618 
1619 /*
1620  * Calclate the metaslab's fragmentation metric. A return value
1621  * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1622  * not support this metric. Otherwise, the return value should be in the
1623  * range [0, 100].
1624  */
1625 static void
metaslab_set_fragmentation(metaslab_t * msp)1626 metaslab_set_fragmentation(metaslab_t *msp)
1627 {
1628 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1629 	uint64_t fragmentation = 0;
1630 	uint64_t total = 0;
1631 	boolean_t feature_enabled = spa_feature_is_enabled(spa,
1632 	    SPA_FEATURE_SPACEMAP_HISTOGRAM);
1633 
1634 	if (!feature_enabled) {
1635 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
1636 		return;
1637 	}
1638 
1639 	/*
1640 	 * A null space map means that the entire metaslab is free
1641 	 * and thus is not fragmented.
1642 	 */
1643 	if (msp->ms_sm == NULL) {
1644 		msp->ms_fragmentation = 0;
1645 		return;
1646 	}
1647 
1648 	/*
1649 	 * If this metaslab's space map has not been upgraded, flag it
1650 	 * so that we upgrade next time we encounter it.
1651 	 */
1652 	if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1653 		uint64_t txg = spa_syncing_txg(spa);
1654 		vdev_t *vd = msp->ms_group->mg_vd;
1655 
1656 		if (spa_writeable(spa)) {
1657 			msp->ms_condense_wanted = B_TRUE;
1658 			vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1659 			spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1660 			    "msp %p, vd %p", txg, msp, vd);
1661 		}
1662 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
1663 		return;
1664 	}
1665 
1666 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1667 		uint64_t space = 0;
1668 		uint8_t shift = msp->ms_sm->sm_shift;
1669 
1670 		int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1671 		    FRAGMENTATION_TABLE_SIZE - 1);
1672 
1673 		if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1674 			continue;
1675 
1676 		space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1677 		total += space;
1678 
1679 		ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1680 		fragmentation += space * zfs_frag_table[idx];
1681 	}
1682 
1683 	if (total > 0)
1684 		fragmentation /= total;
1685 	ASSERT3U(fragmentation, <=, 100);
1686 
1687 	msp->ms_fragmentation = fragmentation;
1688 }
1689 
1690 /*
1691  * Compute a weight -- a selection preference value -- for the given metaslab.
1692  * This is based on the amount of free space, the level of fragmentation,
1693  * the LBA range, and whether the metaslab is loaded.
1694  */
1695 static uint64_t
metaslab_space_weight(metaslab_t * msp)1696 metaslab_space_weight(metaslab_t *msp)
1697 {
1698 	metaslab_group_t *mg = msp->ms_group;
1699 	vdev_t *vd = mg->mg_vd;
1700 	uint64_t weight, space;
1701 
1702 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1703 	ASSERT(!vd->vdev_removing);
1704 
1705 	/*
1706 	 * The baseline weight is the metaslab's free space.
1707 	 */
1708 	space = msp->ms_size - space_map_allocated(msp->ms_sm);
1709 
1710 	if (metaslab_fragmentation_factor_enabled &&
1711 	    msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1712 		/*
1713 		 * Use the fragmentation information to inversely scale
1714 		 * down the baseline weight. We need to ensure that we
1715 		 * don't exclude this metaslab completely when it's 100%
1716 		 * fragmented. To avoid this we reduce the fragmented value
1717 		 * by 1.
1718 		 */
1719 		space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1720 
1721 		/*
1722 		 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1723 		 * this metaslab again. The fragmentation metric may have
1724 		 * decreased the space to something smaller than
1725 		 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1726 		 * so that we can consume any remaining space.
1727 		 */
1728 		if (space > 0 && space < SPA_MINBLOCKSIZE)
1729 			space = SPA_MINBLOCKSIZE;
1730 	}
1731 	weight = space;
1732 
1733 	/*
1734 	 * Modern disks have uniform bit density and constant angular velocity.
1735 	 * Therefore, the outer recording zones are faster (higher bandwidth)
1736 	 * than the inner zones by the ratio of outer to inner track diameter,
1737 	 * which is typically around 2:1.  We account for this by assigning
1738 	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1739 	 * In effect, this means that we'll select the metaslab with the most
1740 	 * free bandwidth rather than simply the one with the most free space.
1741 	 */
1742 	if (metaslab_lba_weighting_enabled) {
1743 		weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1744 		ASSERT(weight >= space && weight <= 2 * space);
1745 	}
1746 
1747 	/*
1748 	 * If this metaslab is one we're actively using, adjust its
1749 	 * weight to make it preferable to any inactive metaslab so
1750 	 * we'll polish it off. If the fragmentation on this metaslab
1751 	 * has exceed our threshold, then don't mark it active.
1752 	 */
1753 	if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1754 	    msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1755 		weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1756 	}
1757 
1758 	WEIGHT_SET_SPACEBASED(weight);
1759 	return (weight);
1760 }
1761 
1762 /*
1763  * Return the weight of the specified metaslab, according to the segment-based
1764  * weighting algorithm. The metaslab must be loaded. This function can
1765  * be called within a sync pass since it relies only on the metaslab's
1766  * range tree which is always accurate when the metaslab is loaded.
1767  */
1768 static uint64_t
metaslab_weight_from_range_tree(metaslab_t * msp)1769 metaslab_weight_from_range_tree(metaslab_t *msp)
1770 {
1771 	uint64_t weight = 0;
1772 	uint32_t segments = 0;
1773 
1774 	ASSERT(msp->ms_loaded);
1775 
1776 	for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
1777 	    i--) {
1778 		uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1779 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1780 
1781 		segments <<= 1;
1782 		segments += msp->ms_tree->rt_histogram[i];
1783 
1784 		/*
1785 		 * The range tree provides more precision than the space map
1786 		 * and must be downgraded so that all values fit within the
1787 		 * space map's histogram. This allows us to compare loaded
1788 		 * vs. unloaded metaslabs to determine which metaslab is
1789 		 * considered "best".
1790 		 */
1791 		if (i > max_idx)
1792 			continue;
1793 
1794 		if (segments != 0) {
1795 			WEIGHT_SET_COUNT(weight, segments);
1796 			WEIGHT_SET_INDEX(weight, i);
1797 			WEIGHT_SET_ACTIVE(weight, 0);
1798 			break;
1799 		}
1800 	}
1801 	return (weight);
1802 }
1803 
1804 /*
1805  * Calculate the weight based on the on-disk histogram. This should only
1806  * be called after a sync pass has completely finished since the on-disk
1807  * information is updated in metaslab_sync().
1808  */
1809 static uint64_t
metaslab_weight_from_spacemap(metaslab_t * msp)1810 metaslab_weight_from_spacemap(metaslab_t *msp)
1811 {
1812 	uint64_t weight = 0;
1813 
1814 	for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
1815 		if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1816 			WEIGHT_SET_COUNT(weight,
1817 			    msp->ms_sm->sm_phys->smp_histogram[i]);
1818 			WEIGHT_SET_INDEX(weight, i +
1819 			    msp->ms_sm->sm_shift);
1820 			WEIGHT_SET_ACTIVE(weight, 0);
1821 			break;
1822 		}
1823 	}
1824 	return (weight);
1825 }
1826 
1827 /*
1828  * Compute a segment-based weight for the specified metaslab. The weight
1829  * is determined by highest bucket in the histogram. The information
1830  * for the highest bucket is encoded into the weight value.
1831  */
1832 static uint64_t
metaslab_segment_weight(metaslab_t * msp)1833 metaslab_segment_weight(metaslab_t *msp)
1834 {
1835 	metaslab_group_t *mg = msp->ms_group;
1836 	uint64_t weight = 0;
1837 	uint8_t shift = mg->mg_vd->vdev_ashift;
1838 
1839 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1840 
1841 	/*
1842 	 * The metaslab is completely free.
1843 	 */
1844 	if (space_map_allocated(msp->ms_sm) == 0) {
1845 		int idx = highbit64(msp->ms_size) - 1;
1846 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1847 
1848 		if (idx < max_idx) {
1849 			WEIGHT_SET_COUNT(weight, 1ULL);
1850 			WEIGHT_SET_INDEX(weight, idx);
1851 		} else {
1852 			WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1853 			WEIGHT_SET_INDEX(weight, max_idx);
1854 		}
1855 		WEIGHT_SET_ACTIVE(weight, 0);
1856 		ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1857 
1858 		return (weight);
1859 	}
1860 
1861 	ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1862 
1863 	/*
1864 	 * If the metaslab is fully allocated then just make the weight 0.
1865 	 */
1866 	if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1867 		return (0);
1868 	/*
1869 	 * If the metaslab is already loaded, then use the range tree to
1870 	 * determine the weight. Otherwise, we rely on the space map information
1871 	 * to generate the weight.
1872 	 */
1873 	if (msp->ms_loaded) {
1874 		weight = metaslab_weight_from_range_tree(msp);
1875 	} else {
1876 		weight = metaslab_weight_from_spacemap(msp);
1877 	}
1878 
1879 	/*
1880 	 * If the metaslab was active the last time we calculated its weight
1881 	 * then keep it active. We want to consume the entire region that
1882 	 * is associated with this weight.
1883 	 */
1884 	if (msp->ms_activation_weight != 0 && weight != 0)
1885 		WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
1886 	return (weight);
1887 }
1888 
1889 /*
1890  * Determine if we should attempt to allocate from this metaslab. If the
1891  * metaslab has a maximum size then we can quickly determine if the desired
1892  * allocation size can be satisfied. Otherwise, if we're using segment-based
1893  * weighting then we can determine the maximum allocation that this metaslab
1894  * can accommodate based on the index encoded in the weight. If we're using
1895  * space-based weights then rely on the entire weight (excluding the weight
1896  * type bit).
1897  */
1898 boolean_t
metaslab_should_allocate(metaslab_t * msp,uint64_t asize)1899 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
1900 {
1901 	boolean_t should_allocate;
1902 
1903 	if (msp->ms_max_size != 0)
1904 		return (msp->ms_max_size >= asize);
1905 
1906 	if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
1907 		/*
1908 		 * The metaslab segment weight indicates segments in the
1909 		 * range [2^i, 2^(i+1)), where i is the index in the weight.
1910 		 * Since the asize might be in the middle of the range, we
1911 		 * should attempt the allocation if asize < 2^(i+1).
1912 		 */
1913 		should_allocate = (asize <
1914 		    1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
1915 	} else {
1916 		should_allocate = (asize <=
1917 		    (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
1918 	}
1919 	return (should_allocate);
1920 }
1921 
1922 static uint64_t
metaslab_weight(metaslab_t * msp)1923 metaslab_weight(metaslab_t *msp)
1924 {
1925 	vdev_t *vd = msp->ms_group->mg_vd;
1926 	spa_t *spa = vd->vdev_spa;
1927 	uint64_t weight;
1928 
1929 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1930 
1931 	/*
1932 	 * This vdev is in the process of being removed so there is nothing
1933 	 * for us to do here.
1934 	 */
1935 	if (vd->vdev_removing) {
1936 		ASSERT0(space_map_allocated(msp->ms_sm));
1937 		ASSERT0(vd->vdev_ms_shift);
1938 		return (0);
1939 	}
1940 
1941 	metaslab_set_fragmentation(msp);
1942 
1943 	/*
1944 	 * Update the maximum size if the metaslab is loaded. This will
1945 	 * ensure that we get an accurate maximum size if newly freed space
1946 	 * has been added back into the free tree.
1947 	 */
1948 	if (msp->ms_loaded)
1949 		msp->ms_max_size = metaslab_block_maxsize(msp);
1950 
1951 	/*
1952 	 * Segment-based weighting requires space map histogram support.
1953 	 */
1954 	if (zfs_metaslab_segment_weight_enabled &&
1955 	    spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
1956 	    (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
1957 	    sizeof (space_map_phys_t))) {
1958 		weight = metaslab_segment_weight(msp);
1959 	} else {
1960 		weight = metaslab_space_weight(msp);
1961 	}
1962 	return (weight);
1963 }
1964 
1965 static int
metaslab_activate(metaslab_t * msp,uint64_t activation_weight)1966 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1967 {
1968 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1969 
1970 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1971 		metaslab_load_wait(msp);
1972 		if (!msp->ms_loaded) {
1973 			int error = metaslab_load(msp);
1974 			if (error) {
1975 				metaslab_group_sort(msp->ms_group, msp, 0);
1976 				return (error);
1977 			}
1978 		}
1979 
1980 		msp->ms_activation_weight = msp->ms_weight;
1981 		metaslab_group_sort(msp->ms_group, msp,
1982 		    msp->ms_weight | activation_weight);
1983 	}
1984 	ASSERT(msp->ms_loaded);
1985 	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1986 
1987 	return (0);
1988 }
1989 
1990 static void
metaslab_passivate(metaslab_t * msp,uint64_t weight)1991 metaslab_passivate(metaslab_t *msp, uint64_t weight)
1992 {
1993 	uint64_t size = weight & ~METASLAB_WEIGHT_TYPE;
1994 
1995 	/*
1996 	 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1997 	 * this metaslab again.  In that case, it had better be empty,
1998 	 * or we would be leaving space on the table.
1999 	 */
2000 	ASSERT(size >= SPA_MINBLOCKSIZE ||
2001 	    range_tree_space(msp->ms_tree) == 0);
2002 	ASSERT0(weight & METASLAB_ACTIVE_MASK);
2003 
2004 	msp->ms_activation_weight = 0;
2005 	metaslab_group_sort(msp->ms_group, msp, weight);
2006 	ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
2007 }
2008 
2009 /*
2010  * Segment-based metaslabs are activated once and remain active until
2011  * we either fail an allocation attempt (similar to space-based metaslabs)
2012  * or have exhausted the free space in zfs_metaslab_switch_threshold
2013  * buckets since the metaslab was activated. This function checks to see
2014  * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2015  * metaslab and passivates it proactively. This will allow us to select a
2016  * metaslabs with larger contiguous region if any remaining within this
2017  * metaslab group. If we're in sync pass > 1, then we continue using this
2018  * metaslab so that we don't dirty more block and cause more sync passes.
2019  */
2020 void
metaslab_segment_may_passivate(metaslab_t * msp)2021 metaslab_segment_may_passivate(metaslab_t *msp)
2022 {
2023 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2024 
2025 	if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
2026 		return;
2027 
2028 	/*
2029 	 * Since we are in the middle of a sync pass, the most accurate
2030 	 * information that is accessible to us is the in-core range tree
2031 	 * histogram; calculate the new weight based on that information.
2032 	 */
2033 	uint64_t weight = metaslab_weight_from_range_tree(msp);
2034 	int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
2035 	int current_idx = WEIGHT_GET_INDEX(weight);
2036 
2037 	if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
2038 		metaslab_passivate(msp, weight);
2039 }
2040 
2041 static void
metaslab_preload(void * arg)2042 metaslab_preload(void *arg)
2043 {
2044 	metaslab_t *msp = arg;
2045 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2046 
2047 	ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
2048 
2049 	mutex_enter(&msp->ms_lock);
2050 	metaslab_load_wait(msp);
2051 	if (!msp->ms_loaded)
2052 		(void) metaslab_load(msp);
2053 	msp->ms_selected_txg = spa_syncing_txg(spa);
2054 	mutex_exit(&msp->ms_lock);
2055 }
2056 
2057 static void
metaslab_group_preload(metaslab_group_t * mg)2058 metaslab_group_preload(metaslab_group_t *mg)
2059 {
2060 	spa_t *spa = mg->mg_vd->vdev_spa;
2061 	metaslab_t *msp;
2062 	avl_tree_t *t = &mg->mg_metaslab_tree;
2063 	int m = 0;
2064 
2065 	if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
2066 		taskq_wait(mg->mg_taskq);
2067 		return;
2068 	}
2069 
2070 	mutex_enter(&mg->mg_lock);
2071 	/*
2072 	 * Load the next potential metaslabs
2073 	 */
2074 	for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
2075 		/*
2076 		 * We preload only the maximum number of metaslabs specified
2077 		 * by metaslab_preload_limit. If a metaslab is being forced
2078 		 * to condense then we preload it too. This will ensure
2079 		 * that force condensing happens in the next txg.
2080 		 */
2081 		if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2082 			continue;
2083 		}
2084 
2085 		VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2086 		    msp, TQ_SLEEP) != 0);
2087 	}
2088 	mutex_exit(&mg->mg_lock);
2089 }
2090 
2091 /*
2092  * Determine if the space map's on-disk footprint is past our tolerance
2093  * for inefficiency. We would like to use the following criteria to make
2094  * our decision:
2095  *
2096  * 1. The size of the space map object should not dramatically increase as a
2097  * result of writing out the free space range tree.
2098  *
2099  * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2100  * times the size than the free space range tree representation
2101  * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
2102  *
2103  * 3. The on-disk size of the space map should actually decrease.
2104  *
2105  * Checking the first condition is tricky since we don't want to walk
2106  * the entire AVL tree calculating the estimated on-disk size. Instead we
2107  * use the size-ordered range tree in the metaslab and calculate the
2108  * size required to write out the largest segment in our free tree. If the
2109  * size required to represent that segment on disk is larger than the space
2110  * map object then we avoid condensing this map.
2111  *
2112  * To determine the second criterion we use a best-case estimate and assume
2113  * each segment can be represented on-disk as a single 64-bit entry. We refer
2114  * to this best-case estimate as the space map's minimal form.
2115  *
2116  * Unfortunately, we cannot compute the on-disk size of the space map in this
2117  * context because we cannot accurately compute the effects of compression, etc.
2118  * Instead, we apply the heuristic described in the block comment for
2119  * zfs_metaslab_condense_block_threshold - we only condense if the space used
2120  * is greater than a threshold number of blocks.
2121  */
2122 static boolean_t
metaslab_should_condense(metaslab_t * msp)2123 metaslab_should_condense(metaslab_t *msp)
2124 {
2125 	space_map_t *sm = msp->ms_sm;
2126 	range_seg_t *rs;
2127 	uint64_t size, entries, segsz, object_size, optimal_size, record_size;
2128 	dmu_object_info_t doi;
2129 	uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift;
2130 
2131 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2132 	ASSERT(msp->ms_loaded);
2133 
2134 	/*
2135 	 * Use the ms_size_tree range tree, which is ordered by size, to
2136 	 * obtain the largest segment in the free tree. We always condense
2137 	 * metaslabs that are empty and metaslabs for which a condense
2138 	 * request has been made.
2139 	 */
2140 	rs = avl_last(&msp->ms_size_tree);
2141 	if (rs == NULL || msp->ms_condense_wanted)
2142 		return (B_TRUE);
2143 
2144 	/*
2145 	 * Calculate the number of 64-bit entries this segment would
2146 	 * require when written to disk. If this single segment would be
2147 	 * larger on-disk than the entire current on-disk structure, then
2148 	 * clearly condensing will increase the on-disk structure size.
2149 	 */
2150 	size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
2151 	entries = size / (MIN(size, SM_RUN_MAX));
2152 	segsz = entries * sizeof (uint64_t);
2153 
2154 	optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
2155 	object_size = space_map_length(msp->ms_sm);
2156 
2157 	dmu_object_info_from_db(sm->sm_dbuf, &doi);
2158 	record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2159 
2160 	return (segsz <= object_size &&
2161 	    object_size >= (optimal_size * zfs_condense_pct / 100) &&
2162 	    object_size > zfs_metaslab_condense_block_threshold * record_size);
2163 }
2164 
2165 /*
2166  * Condense the on-disk space map representation to its minimized form.
2167  * The minimized form consists of a small number of allocations followed by
2168  * the entries of the free range tree.
2169  */
2170 static void
metaslab_condense(metaslab_t * msp,uint64_t txg,dmu_tx_t * tx)2171 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2172 {
2173 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2174 	range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
2175 	range_tree_t *condense_tree;
2176 	space_map_t *sm = msp->ms_sm;
2177 
2178 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2179 	ASSERT3U(spa_sync_pass(spa), ==, 1);
2180 	ASSERT(msp->ms_loaded);
2181 
2182 
2183 	spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2184 	    "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2185 	    msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2186 	    msp->ms_group->mg_vd->vdev_spa->spa_name,
2187 	    space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root),
2188 	    msp->ms_condense_wanted ? "TRUE" : "FALSE");
2189 
2190 	msp->ms_condense_wanted = B_FALSE;
2191 
2192 	/*
2193 	 * Create an range tree that is 100% allocated. We remove segments
2194 	 * that have been freed in this txg, any deferred frees that exist,
2195 	 * and any allocation in the future. Removing segments should be
2196 	 * a relatively inexpensive operation since we expect these trees to
2197 	 * have a small number of nodes.
2198 	 */
2199 	condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
2200 	range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2201 
2202 	/*
2203 	 * Remove what's been freed in this txg from the condense_tree.
2204 	 * Since we're in sync_pass 1, we know that all the frees from
2205 	 * this txg are in the freetree.
2206 	 */
2207 	range_tree_walk(freetree, range_tree_remove, condense_tree);
2208 
2209 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2210 		range_tree_walk(msp->ms_defertree[t],
2211 		    range_tree_remove, condense_tree);
2212 	}
2213 
2214 	for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2215 		range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
2216 		    range_tree_remove, condense_tree);
2217 	}
2218 
2219 	/*
2220 	 * We're about to drop the metaslab's lock thus allowing
2221 	 * other consumers to change it's content. Set the
2222 	 * metaslab's ms_condensing flag to ensure that
2223 	 * allocations on this metaslab do not occur while we're
2224 	 * in the middle of committing it to disk. This is only critical
2225 	 * for the ms_tree as all other range trees use per txg
2226 	 * views of their content.
2227 	 */
2228 	msp->ms_condensing = B_TRUE;
2229 
2230 	mutex_exit(&msp->ms_lock);
2231 	space_map_truncate(sm, tx);
2232 	mutex_enter(&msp->ms_lock);
2233 
2234 	/*
2235 	 * While we would ideally like to create a space map representation
2236 	 * that consists only of allocation records, doing so can be
2237 	 * prohibitively expensive because the in-core free tree can be
2238 	 * large, and therefore computationally expensive to subtract
2239 	 * from the condense_tree. Instead we sync out two trees, a cheap
2240 	 * allocation only tree followed by the in-core free tree. While not
2241 	 * optimal, this is typically close to optimal, and much cheaper to
2242 	 * compute.
2243 	 */
2244 	space_map_write(sm, condense_tree, SM_ALLOC, tx);
2245 	range_tree_vacate(condense_tree, NULL, NULL);
2246 	range_tree_destroy(condense_tree);
2247 
2248 	space_map_write(sm, msp->ms_tree, SM_FREE, tx);
2249 	msp->ms_condensing = B_FALSE;
2250 }
2251 
2252 /*
2253  * Write a metaslab to disk in the context of the specified transaction group.
2254  */
2255 void
metaslab_sync(metaslab_t * msp,uint64_t txg)2256 metaslab_sync(metaslab_t *msp, uint64_t txg)
2257 {
2258 	metaslab_group_t *mg = msp->ms_group;
2259 	vdev_t *vd = mg->mg_vd;
2260 	spa_t *spa = vd->vdev_spa;
2261 	objset_t *mos = spa_meta_objset(spa);
2262 	range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
2263 	range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
2264 	range_tree_t **freed_tree =
2265 	    &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
2266 	dmu_tx_t *tx;
2267 	uint64_t object = space_map_object(msp->ms_sm);
2268 
2269 	ASSERT(!vd->vdev_ishole);
2270 
2271 	/*
2272 	 * This metaslab has just been added so there's no work to do now.
2273 	 */
2274 	if (*freetree == NULL) {
2275 		ASSERT3P(alloctree, ==, NULL);
2276 		return;
2277 	}
2278 
2279 	ASSERT3P(alloctree, !=, NULL);
2280 	ASSERT3P(*freetree, !=, NULL);
2281 	ASSERT3P(*freed_tree, !=, NULL);
2282 
2283 	/*
2284 	 * Normally, we don't want to process a metaslab if there
2285 	 * are no allocations or frees to perform. However, if the metaslab
2286 	 * is being forced to condense we need to let it through.
2287 	 */
2288 	if (range_tree_space(alloctree) == 0 &&
2289 	    range_tree_space(*freetree) == 0 &&
2290 	    !msp->ms_condense_wanted)
2291 		return;
2292 
2293 	/*
2294 	 * The only state that can actually be changing concurrently with
2295 	 * metaslab_sync() is the metaslab's ms_tree.  No other thread can
2296 	 * be modifying this txg's alloctree, freetree, freed_tree, or
2297 	 * space_map_phys_t. Therefore, we only hold ms_lock to satify
2298 	 * space map ASSERTs. We drop it whenever we call into the DMU,
2299 	 * because the DMU can call down to us (e.g. via zio_free()) at
2300 	 * any time.
2301 	 */
2302 
2303 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2304 
2305 	if (msp->ms_sm == NULL) {
2306 		uint64_t new_object;
2307 
2308 		new_object = space_map_alloc(mos, tx);
2309 		VERIFY3U(new_object, !=, 0);
2310 
2311 		VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2312 		    msp->ms_start, msp->ms_size, vd->vdev_ashift,
2313 		    &msp->ms_lock));
2314 		ASSERT(msp->ms_sm != NULL);
2315 	}
2316 
2317 	mutex_enter(&msp->ms_lock);
2318 
2319 	/*
2320 	 * Note: metaslab_condense() clears the space map's histogram.
2321 	 * Therefore we must verify and remove this histogram before
2322 	 * condensing.
2323 	 */
2324 	metaslab_group_histogram_verify(mg);
2325 	metaslab_class_histogram_verify(mg->mg_class);
2326 	metaslab_group_histogram_remove(mg, msp);
2327 
2328 	if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
2329 	    metaslab_should_condense(msp)) {
2330 		metaslab_condense(msp, txg, tx);
2331 	} else {
2332 		space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
2333 		space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
2334 	}
2335 
2336 	if (msp->ms_loaded) {
2337 		/*
2338 		 * When the space map is loaded, we have an accruate
2339 		 * histogram in the range tree. This gives us an opportunity
2340 		 * to bring the space map's histogram up-to-date so we clear
2341 		 * it first before updating it.
2342 		 */
2343 		space_map_histogram_clear(msp->ms_sm);
2344 		space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
2345 
2346 		/*
2347 		 * Since we've cleared the histogram we need to add back
2348 		 * any free space that has already been processed, plus
2349 		 * any deferred space. This allows the on-disk histogram
2350 		 * to accurately reflect all free space even if some space
2351 		 * is not yet available for allocation (i.e. deferred).
2352 		 */
2353 		space_map_histogram_add(msp->ms_sm, *freed_tree, tx);
2354 
2355 		/*
2356 		 * Add back any deferred free space that has not been
2357 		 * added back into the in-core free tree yet. This will
2358 		 * ensure that we don't end up with a space map histogram
2359 		 * that is completely empty unless the metaslab is fully
2360 		 * allocated.
2361 		 */
2362 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2363 			space_map_histogram_add(msp->ms_sm,
2364 			    msp->ms_defertree[t], tx);
2365 		}
2366 	}
2367 
2368 	/*
2369 	 * Always add the free space from this sync pass to the space
2370 	 * map histogram. We want to make sure that the on-disk histogram
2371 	 * accounts for all free space. If the space map is not loaded,
2372 	 * then we will lose some accuracy but will correct it the next
2373 	 * time we load the space map.
2374 	 */
2375 	space_map_histogram_add(msp->ms_sm, *freetree, tx);
2376 
2377 	metaslab_group_histogram_add(mg, msp);
2378 	metaslab_group_histogram_verify(mg);
2379 	metaslab_class_histogram_verify(mg->mg_class);
2380 
2381 	/*
2382 	 * For sync pass 1, we avoid traversing this txg's free range tree
2383 	 * and instead will just swap the pointers for freetree and
2384 	 * freed_tree. We can safely do this since the freed_tree is
2385 	 * guaranteed to be empty on the initial pass.
2386 	 */
2387 	if (spa_sync_pass(spa) == 1) {
2388 		range_tree_swap(freetree, freed_tree);
2389 	} else {
2390 		range_tree_vacate(*freetree, range_tree_add, *freed_tree);
2391 	}
2392 	range_tree_vacate(alloctree, NULL, NULL);
2393 
2394 	ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2395 	ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK]));
2396 	ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
2397 
2398 	mutex_exit(&msp->ms_lock);
2399 
2400 	if (object != space_map_object(msp->ms_sm)) {
2401 		object = space_map_object(msp->ms_sm);
2402 		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2403 		    msp->ms_id, sizeof (uint64_t), &object, tx);
2404 	}
2405 	dmu_tx_commit(tx);
2406 }
2407 
2408 /*
2409  * Called after a transaction group has completely synced to mark
2410  * all of the metaslab's free space as usable.
2411  */
2412 void
metaslab_sync_done(metaslab_t * msp,uint64_t txg)2413 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2414 {
2415 	metaslab_group_t *mg = msp->ms_group;
2416 	vdev_t *vd = mg->mg_vd;
2417 	spa_t *spa = vd->vdev_spa;
2418 	range_tree_t **freed_tree;
2419 	range_tree_t **defer_tree;
2420 	int64_t alloc_delta, defer_delta;
2421 	boolean_t defer_allowed = B_TRUE;
2422 
2423 	ASSERT(!vd->vdev_ishole);
2424 
2425 	mutex_enter(&msp->ms_lock);
2426 
2427 	/*
2428 	 * If this metaslab is just becoming available, initialize its
2429 	 * alloctrees, freetrees, and defertree and add its capacity to
2430 	 * the vdev.
2431 	 */
2432 	if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
2433 		for (int t = 0; t < TXG_SIZE; t++) {
2434 			ASSERT(msp->ms_alloctree[t] == NULL);
2435 			ASSERT(msp->ms_freetree[t] == NULL);
2436 
2437 			msp->ms_alloctree[t] = range_tree_create(NULL, msp,
2438 			    &msp->ms_lock);
2439 			msp->ms_freetree[t] = range_tree_create(NULL, msp,
2440 			    &msp->ms_lock);
2441 		}
2442 
2443 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2444 			ASSERT(msp->ms_defertree[t] == NULL);
2445 
2446 			msp->ms_defertree[t] = range_tree_create(NULL, msp,
2447 			    &msp->ms_lock);
2448 		}
2449 
2450 		vdev_space_update(vd, 0, 0, msp->ms_size);
2451 	}
2452 
2453 	freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
2454 	defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
2455 
2456 	uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
2457 	    metaslab_class_get_alloc(spa_normal_class(spa));
2458 	if (free_space <= spa_get_slop_space(spa)) {
2459 		defer_allowed = B_FALSE;
2460 	}
2461 
2462 	defer_delta = 0;
2463 	alloc_delta = space_map_alloc_delta(msp->ms_sm);
2464 	if (defer_allowed) {
2465 		defer_delta = range_tree_space(*freed_tree) -
2466 		    range_tree_space(*defer_tree);
2467 	} else {
2468 		defer_delta -= range_tree_space(*defer_tree);
2469 	}
2470 
2471 	vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
2472 
2473 	ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2474 	ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
2475 
2476 	/*
2477 	 * If there's a metaslab_load() in progress, wait for it to complete
2478 	 * so that we have a consistent view of the in-core space map.
2479 	 */
2480 	metaslab_load_wait(msp);
2481 
2482 	/*
2483 	 * Move the frees from the defer_tree back to the free
2484 	 * range tree (if it's loaded). Swap the freed_tree and the
2485 	 * defer_tree -- this is safe to do because we've just emptied out
2486 	 * the defer_tree.
2487 	 */
2488 	range_tree_vacate(*defer_tree,
2489 	    msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2490 	if (defer_allowed) {
2491 		range_tree_swap(freed_tree, defer_tree);
2492 	} else {
2493 		range_tree_vacate(*freed_tree,
2494 		    msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2495 	}
2496 
2497 	space_map_update(msp->ms_sm);
2498 
2499 	msp->ms_deferspace += defer_delta;
2500 	ASSERT3S(msp->ms_deferspace, >=, 0);
2501 	ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2502 	if (msp->ms_deferspace != 0) {
2503 		/*
2504 		 * Keep syncing this metaslab until all deferred frees
2505 		 * are back in circulation.
2506 		 */
2507 		vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2508 	}
2509 
2510 	/*
2511 	 * Calculate the new weights before unloading any metaslabs.
2512 	 * This will give us the most accurate weighting.
2513 	 */
2514 	metaslab_group_sort(mg, msp, metaslab_weight(msp));
2515 
2516 	/*
2517 	 * If the metaslab is loaded and we've not tried to load or allocate
2518 	 * from it in 'metaslab_unload_delay' txgs, then unload it.
2519 	 */
2520 	if (msp->ms_loaded &&
2521 	    msp->ms_selected_txg + metaslab_unload_delay < txg) {
2522 		for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2523 			VERIFY0(range_tree_space(
2524 			    msp->ms_alloctree[(txg + t) & TXG_MASK]));
2525 		}
2526 
2527 		if (!metaslab_debug_unload)
2528 			metaslab_unload(msp);
2529 	}
2530 
2531 	mutex_exit(&msp->ms_lock);
2532 }
2533 
2534 void
metaslab_sync_reassess(metaslab_group_t * mg)2535 metaslab_sync_reassess(metaslab_group_t *mg)
2536 {
2537 	metaslab_group_alloc_update(mg);
2538 	mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2539 
2540 	/*
2541 	 * Preload the next potential metaslabs
2542 	 */
2543 	metaslab_group_preload(mg);
2544 }
2545 
2546 static uint64_t
metaslab_distance(metaslab_t * msp,dva_t * dva)2547 metaslab_distance(metaslab_t *msp, dva_t *dva)
2548 {
2549 	uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2550 	uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2551 	uint64_t start = msp->ms_id;
2552 
2553 	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2554 		return (1ULL << 63);
2555 
2556 	if (offset < start)
2557 		return ((start - offset) << ms_shift);
2558 	if (offset > start)
2559 		return ((offset - start) << ms_shift);
2560 	return (0);
2561 }
2562 
2563 /*
2564  * ==========================================================================
2565  * Metaslab allocation tracing facility
2566  * ==========================================================================
2567  */
2568 kstat_t *metaslab_trace_ksp;
2569 kstat_named_t metaslab_trace_over_limit;
2570 
2571 void
metaslab_alloc_trace_init(void)2572 metaslab_alloc_trace_init(void)
2573 {
2574 	ASSERT(metaslab_alloc_trace_cache == NULL);
2575 	metaslab_alloc_trace_cache = kmem_cache_create(
2576 	    "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2577 	    0, NULL, NULL, NULL, NULL, NULL, 0);
2578 	metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2579 	    "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2580 	if (metaslab_trace_ksp != NULL) {
2581 		metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2582 		kstat_named_init(&metaslab_trace_over_limit,
2583 		    "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2584 		kstat_install(metaslab_trace_ksp);
2585 	}
2586 }
2587 
2588 void
metaslab_alloc_trace_fini(void)2589 metaslab_alloc_trace_fini(void)
2590 {
2591 	if (metaslab_trace_ksp != NULL) {
2592 		kstat_delete(metaslab_trace_ksp);
2593 		metaslab_trace_ksp = NULL;
2594 	}
2595 	kmem_cache_destroy(metaslab_alloc_trace_cache);
2596 	metaslab_alloc_trace_cache = NULL;
2597 }
2598 
2599 /*
2600  * Add an allocation trace element to the allocation tracing list.
2601  */
2602 static void
metaslab_trace_add(zio_alloc_list_t * zal,metaslab_group_t * mg,metaslab_t * msp,uint64_t psize,uint32_t dva_id,uint64_t offset)2603 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
2604     metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset)
2605 {
2606 	if (!metaslab_trace_enabled)
2607 		return;
2608 
2609 	/*
2610 	 * When the tracing list reaches its maximum we remove
2611 	 * the second element in the list before adding a new one.
2612 	 * By removing the second element we preserve the original
2613 	 * entry as a clue to what allocations steps have already been
2614 	 * performed.
2615 	 */
2616 	if (zal->zal_size == metaslab_trace_max_entries) {
2617 		metaslab_alloc_trace_t *mat_next;
2618 #ifdef DEBUG
2619 		panic("too many entries in allocation list");
2620 #endif
2621 		atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2622 		zal->zal_size--;
2623 		mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2624 		list_remove(&zal->zal_list, mat_next);
2625 		kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2626 	}
2627 
2628 	metaslab_alloc_trace_t *mat =
2629 	    kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2630 	list_link_init(&mat->mat_list_node);
2631 	mat->mat_mg = mg;
2632 	mat->mat_msp = msp;
2633 	mat->mat_size = psize;
2634 	mat->mat_dva_id = dva_id;
2635 	mat->mat_offset = offset;
2636 	mat->mat_weight = 0;
2637 
2638 	if (msp != NULL)
2639 		mat->mat_weight = msp->ms_weight;
2640 
2641 	/*
2642 	 * The list is part of the zio so locking is not required. Only
2643 	 * a single thread will perform allocations for a given zio.
2644 	 */
2645 	list_insert_tail(&zal->zal_list, mat);
2646 	zal->zal_size++;
2647 
2648 	ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2649 }
2650 
2651 void
metaslab_trace_init(zio_alloc_list_t * zal)2652 metaslab_trace_init(zio_alloc_list_t *zal)
2653 {
2654 	list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2655 	    offsetof(metaslab_alloc_trace_t, mat_list_node));
2656 	zal->zal_size = 0;
2657 }
2658 
2659 void
metaslab_trace_fini(zio_alloc_list_t * zal)2660 metaslab_trace_fini(zio_alloc_list_t *zal)
2661 {
2662 	metaslab_alloc_trace_t *mat;
2663 
2664 	while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2665 		kmem_cache_free(metaslab_alloc_trace_cache, mat);
2666 	list_destroy(&zal->zal_list);
2667 	zal->zal_size = 0;
2668 }
2669 
2670 /*
2671  * ==========================================================================
2672  * Metaslab block operations
2673  * ==========================================================================
2674  */
2675 
2676 static void
metaslab_group_alloc_increment(spa_t * spa,uint64_t vdev,void * tag,int flags)2677 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
2678 {
2679 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
2680 	    flags & METASLAB_DONT_THROTTLE)
2681 		return;
2682 
2683 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2684 	if (!mg->mg_class->mc_alloc_throttle_enabled)
2685 		return;
2686 
2687 	(void) refcount_add(&mg->mg_alloc_queue_depth, tag);
2688 }
2689 
2690 void
metaslab_group_alloc_decrement(spa_t * spa,uint64_t vdev,void * tag,int flags)2691 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
2692 {
2693 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
2694 	    flags & METASLAB_DONT_THROTTLE)
2695 		return;
2696 
2697 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2698 	if (!mg->mg_class->mc_alloc_throttle_enabled)
2699 		return;
2700 
2701 	(void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
2702 }
2703 
2704 void
metaslab_group_alloc_verify(spa_t * spa,const blkptr_t * bp,void * tag)2705 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
2706 {
2707 #ifdef ZFS_DEBUG
2708 	const dva_t *dva = bp->blk_dva;
2709 	int ndvas = BP_GET_NDVAS(bp);
2710 
2711 	for (int d = 0; d < ndvas; d++) {
2712 		uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2713 		metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2714 		VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
2715 	}
2716 #endif
2717 }
2718 
2719 static uint64_t
metaslab_block_alloc(metaslab_t * msp,uint64_t size,uint64_t txg)2720 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
2721 {
2722 	uint64_t start;
2723 	range_tree_t *rt = msp->ms_tree;
2724 	metaslab_class_t *mc = msp->ms_group->mg_class;
2725 
2726 	VERIFY(!msp->ms_condensing);
2727 
2728 	start = mc->mc_ops->msop_alloc(msp, size);
2729 	if (start != -1ULL) {
2730 		metaslab_group_t *mg = msp->ms_group;
2731 		vdev_t *vd = mg->mg_vd;
2732 
2733 		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
2734 		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2735 		VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
2736 		range_tree_remove(rt, start, size);
2737 
2738 		if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2739 			vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2740 
2741 		range_tree_add(msp->ms_alloctree[txg & TXG_MASK], start, size);
2742 
2743 		/* Track the last successful allocation */
2744 		msp->ms_alloc_txg = txg;
2745 		metaslab_verify_space(msp, txg);
2746 	}
2747 
2748 	/*
2749 	 * Now that we've attempted the allocation we need to update the
2750 	 * metaslab's maximum block size since it may have changed.
2751 	 */
2752 	msp->ms_max_size = metaslab_block_maxsize(msp);
2753 	return (start);
2754 }
2755 
2756 static uint64_t
metaslab_group_alloc_normal(metaslab_group_t * mg,zio_alloc_list_t * zal,uint64_t asize,uint64_t txg,uint64_t min_distance,dva_t * dva,int d)2757 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
2758     uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2759 {
2760 	metaslab_t *msp = NULL;
2761 	uint64_t offset = -1ULL;
2762 	uint64_t activation_weight;
2763 	uint64_t target_distance;
2764 	int i;
2765 
2766 	activation_weight = METASLAB_WEIGHT_PRIMARY;
2767 	for (i = 0; i < d; i++) {
2768 		if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
2769 			activation_weight = METASLAB_WEIGHT_SECONDARY;
2770 			break;
2771 		}
2772 	}
2773 
2774 	metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
2775 	search->ms_weight = UINT64_MAX;
2776 	search->ms_start = 0;
2777 	for (;;) {
2778 		boolean_t was_active;
2779 		avl_tree_t *t = &mg->mg_metaslab_tree;
2780 		avl_index_t idx;
2781 
2782 		mutex_enter(&mg->mg_lock);
2783 
2784 		/*
2785 		 * Find the metaslab with the highest weight that is less
2786 		 * than what we've already tried.  In the common case, this
2787 		 * means that we will examine each metaslab at most once.
2788 		 * Note that concurrent callers could reorder metaslabs
2789 		 * by activation/passivation once we have dropped the mg_lock.
2790 		 * If a metaslab is activated by another thread, and we fail
2791 		 * to allocate from the metaslab we have selected, we may
2792 		 * not try the newly-activated metaslab, and instead activate
2793 		 * another metaslab.  This is not optimal, but generally
2794 		 * does not cause any problems (a possible exception being
2795 		 * if every metaslab is completely full except for the
2796 		 * the newly-activated metaslab which we fail to examine).
2797 		 */
2798 		msp = avl_find(t, search, &idx);
2799 		if (msp == NULL)
2800 			msp = avl_nearest(t, idx, AVL_AFTER);
2801 		for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
2802 
2803 			if (!metaslab_should_allocate(msp, asize)) {
2804 				metaslab_trace_add(zal, mg, msp, asize, d,
2805 				    TRACE_TOO_SMALL);
2806 				continue;
2807 			}
2808 
2809 			/*
2810 			 * If the selected metaslab is condensing, skip it.
2811 			 */
2812 			if (msp->ms_condensing)
2813 				continue;
2814 
2815 			was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2816 			if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2817 				break;
2818 
2819 			target_distance = min_distance +
2820 			    (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2821 			    min_distance >> 1);
2822 
2823 			for (i = 0; i < d; i++) {
2824 				if (metaslab_distance(msp, &dva[i]) <
2825 				    target_distance)
2826 					break;
2827 			}
2828 			if (i == d)
2829 				break;
2830 		}
2831 		mutex_exit(&mg->mg_lock);
2832 		if (msp == NULL) {
2833 			kmem_free(search, sizeof (*search));
2834 			return (-1ULL);
2835 		}
2836 		search->ms_weight = msp->ms_weight;
2837 		search->ms_start = msp->ms_start + 1;
2838 
2839 		mutex_enter(&msp->ms_lock);
2840 
2841 		/*
2842 		 * Ensure that the metaslab we have selected is still
2843 		 * capable of handling our request. It's possible that
2844 		 * another thread may have changed the weight while we
2845 		 * were blocked on the metaslab lock. We check the
2846 		 * active status first to see if we need to reselect
2847 		 * a new metaslab.
2848 		 */
2849 		if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
2850 			mutex_exit(&msp->ms_lock);
2851 			continue;
2852 		}
2853 
2854 		if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2855 		    activation_weight == METASLAB_WEIGHT_PRIMARY) {
2856 			metaslab_passivate(msp,
2857 			    msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2858 			mutex_exit(&msp->ms_lock);
2859 			continue;
2860 		}
2861 
2862 		if (metaslab_activate(msp, activation_weight) != 0) {
2863 			mutex_exit(&msp->ms_lock);
2864 			continue;
2865 		}
2866 		msp->ms_selected_txg = txg;
2867 
2868 		/*
2869 		 * Now that we have the lock, recheck to see if we should
2870 		 * continue to use this metaslab for this allocation. The
2871 		 * the metaslab is now loaded so metaslab_should_allocate() can
2872 		 * accurately determine if the allocation attempt should
2873 		 * proceed.
2874 		 */
2875 		if (!metaslab_should_allocate(msp, asize)) {
2876 			/* Passivate this metaslab and select a new one. */
2877 			metaslab_trace_add(zal, mg, msp, asize, d,
2878 			    TRACE_TOO_SMALL);
2879 			goto next;
2880 		}
2881 
2882 		/*
2883 		 * If this metaslab is currently condensing then pick again as
2884 		 * we can't manipulate this metaslab until it's committed
2885 		 * to disk.
2886 		 */
2887 		if (msp->ms_condensing) {
2888 			metaslab_trace_add(zal, mg, msp, asize, d,
2889 			    TRACE_CONDENSING);
2890 			mutex_exit(&msp->ms_lock);
2891 			continue;
2892 		}
2893 
2894 		offset = metaslab_block_alloc(msp, asize, txg);
2895 		metaslab_trace_add(zal, mg, msp, asize, d, offset);
2896 
2897 		if (offset != -1ULL) {
2898 			/* Proactively passivate the metaslab, if needed */
2899 			metaslab_segment_may_passivate(msp);
2900 			break;
2901 		}
2902 next:
2903 		ASSERT(msp->ms_loaded);
2904 
2905 		/*
2906 		 * We were unable to allocate from this metaslab so determine
2907 		 * a new weight for this metaslab. Now that we have loaded
2908 		 * the metaslab we can provide a better hint to the metaslab
2909 		 * selector.
2910 		 *
2911 		 * For space-based metaslabs, we use the maximum block size.
2912 		 * This information is only available when the metaslab
2913 		 * is loaded and is more accurate than the generic free
2914 		 * space weight that was calculated by metaslab_weight().
2915 		 * This information allows us to quickly compare the maximum
2916 		 * available allocation in the metaslab to the allocation
2917 		 * size being requested.
2918 		 *
2919 		 * For segment-based metaslabs, determine the new weight
2920 		 * based on the highest bucket in the range tree. We
2921 		 * explicitly use the loaded segment weight (i.e. the range
2922 		 * tree histogram) since it contains the space that is
2923 		 * currently available for allocation and is accurate
2924 		 * even within a sync pass.
2925 		 */
2926 		if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2927 			uint64_t weight = metaslab_block_maxsize(msp);
2928 			WEIGHT_SET_SPACEBASED(weight);
2929 			metaslab_passivate(msp, weight);
2930 		} else {
2931 			metaslab_passivate(msp,
2932 			    metaslab_weight_from_range_tree(msp));
2933 		}
2934 
2935 		/*
2936 		 * We have just failed an allocation attempt, check
2937 		 * that metaslab_should_allocate() agrees. Otherwise,
2938 		 * we may end up in an infinite loop retrying the same
2939 		 * metaslab.
2940 		 */
2941 		ASSERT(!metaslab_should_allocate(msp, asize));
2942 		mutex_exit(&msp->ms_lock);
2943 	}
2944 	mutex_exit(&msp->ms_lock);
2945 	kmem_free(search, sizeof (*search));
2946 	return (offset);
2947 }
2948 
2949 static uint64_t
metaslab_group_alloc(metaslab_group_t * mg,zio_alloc_list_t * zal,uint64_t asize,uint64_t txg,uint64_t min_distance,dva_t * dva,int d)2950 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
2951     uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2952 {
2953 	uint64_t offset;
2954 	ASSERT(mg->mg_initialized);
2955 
2956 	offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
2957 	    min_distance, dva, d);
2958 
2959 	mutex_enter(&mg->mg_lock);
2960 	if (offset == -1ULL) {
2961 		mg->mg_failed_allocations++;
2962 		metaslab_trace_add(zal, mg, NULL, asize, d,
2963 		    TRACE_GROUP_FAILURE);
2964 		if (asize == SPA_GANGBLOCKSIZE) {
2965 			/*
2966 			 * This metaslab group was unable to allocate
2967 			 * the minimum gang block size so it must be out of
2968 			 * space. We must notify the allocation throttle
2969 			 * to start skipping allocation attempts to this
2970 			 * metaslab group until more space becomes available.
2971 			 * Note: this failure cannot be caused by the
2972 			 * allocation throttle since the allocation throttle
2973 			 * is only responsible for skipping devices and
2974 			 * not failing block allocations.
2975 			 */
2976 			mg->mg_no_free_space = B_TRUE;
2977 		}
2978 	}
2979 	mg->mg_allocations++;
2980 	mutex_exit(&mg->mg_lock);
2981 	return (offset);
2982 }
2983 
2984 /*
2985  * If we have to write a ditto block (i.e. more than one DVA for a given BP)
2986  * on the same vdev as an existing DVA of this BP, then try to allocate it
2987  * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
2988  * existing DVAs.
2989  */
2990 int ditto_same_vdev_distance_shift = 3;
2991 
2992 /*
2993  * Allocate a block for the specified i/o.
2994  */
2995 static int
metaslab_alloc_dva(spa_t * spa,metaslab_class_t * mc,uint64_t psize,dva_t * dva,int d,dva_t * hintdva,uint64_t txg,int flags,zio_alloc_list_t * zal)2996 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
2997     dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
2998     zio_alloc_list_t *zal)
2999 {
3000 	metaslab_group_t *mg, *rotor;
3001 	vdev_t *vd;
3002 	boolean_t try_hard = B_FALSE;
3003 
3004 	ASSERT(!DVA_IS_VALID(&dva[d]));
3005 
3006 	/*
3007 	 * For testing, make some blocks above a certain size be gang blocks.
3008 	 */
3009 	if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) {
3010 		metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG);
3011 		return (SET_ERROR(ENOSPC));
3012 	}
3013 
3014 	/*
3015 	 * Start at the rotor and loop through all mgs until we find something.
3016 	 * Note that there's no locking on mc_rotor or mc_aliquot because
3017 	 * nothing actually breaks if we miss a few updates -- we just won't
3018 	 * allocate quite as evenly.  It all balances out over time.
3019 	 *
3020 	 * If we are doing ditto or log blocks, try to spread them across
3021 	 * consecutive vdevs.  If we're forced to reuse a vdev before we've
3022 	 * allocated all of our ditto blocks, then try and spread them out on
3023 	 * that vdev as much as possible.  If it turns out to not be possible,
3024 	 * gradually lower our standards until anything becomes acceptable.
3025 	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3026 	 * gives us hope of containing our fault domains to something we're
3027 	 * able to reason about.  Otherwise, any two top-level vdev failures
3028 	 * will guarantee the loss of data.  With consecutive allocation,
3029 	 * only two adjacent top-level vdev failures will result in data loss.
3030 	 *
3031 	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3032 	 * ourselves on the same vdev as our gang block header.  That
3033 	 * way, we can hope for locality in vdev_cache, plus it makes our
3034 	 * fault domains something tractable.
3035 	 */
3036 	if (hintdva) {
3037 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
3038 
3039 		/*
3040 		 * It's possible the vdev we're using as the hint no
3041 		 * longer exists (i.e. removed). Consult the rotor when
3042 		 * all else fails.
3043 		 */
3044 		if (vd != NULL) {
3045 			mg = vd->vdev_mg;
3046 
3047 			if (flags & METASLAB_HINTBP_AVOID &&
3048 			    mg->mg_next != NULL)
3049 				mg = mg->mg_next;
3050 		} else {
3051 			mg = mc->mc_rotor;
3052 		}
3053 	} else if (d != 0) {
3054 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3055 		mg = vd->vdev_mg->mg_next;
3056 	} else {
3057 		mg = mc->mc_rotor;
3058 	}
3059 
3060 	/*
3061 	 * If the hint put us into the wrong metaslab class, or into a
3062 	 * metaslab group that has been passivated, just follow the rotor.
3063 	 */
3064 	if (mg->mg_class != mc || mg->mg_activation_count <= 0)
3065 		mg = mc->mc_rotor;
3066 
3067 	rotor = mg;
3068 top:
3069 	do {
3070 		boolean_t allocatable;
3071 
3072 		ASSERT(mg->mg_activation_count == 1);
3073 		vd = mg->mg_vd;
3074 
3075 		/*
3076 		 * Don't allocate from faulted devices.
3077 		 */
3078 		if (try_hard) {
3079 			spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3080 			allocatable = vdev_allocatable(vd);
3081 			spa_config_exit(spa, SCL_ZIO, FTAG);
3082 		} else {
3083 			allocatable = vdev_allocatable(vd);
3084 		}
3085 
3086 		/*
3087 		 * Determine if the selected metaslab group is eligible
3088 		 * for allocations. If we're ganging then don't allow
3089 		 * this metaslab group to skip allocations since that would
3090 		 * inadvertently return ENOSPC and suspend the pool
3091 		 * even though space is still available.
3092 		 */
3093 		if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3094 			allocatable = metaslab_group_allocatable(mg, rotor,
3095 			    psize);
3096 		}
3097 
3098 		if (!allocatable) {
3099 			metaslab_trace_add(zal, mg, NULL, psize, d,
3100 			    TRACE_NOT_ALLOCATABLE);
3101 			goto next;
3102 		}
3103 
3104 		ASSERT(mg->mg_initialized);
3105 
3106 		/*
3107 		 * Avoid writing single-copy data to a failing,
3108 		 * non-redundant vdev, unless we've already tried all
3109 		 * other vdevs.
3110 		 */
3111 		if ((vd->vdev_stat.vs_write_errors > 0 ||
3112 		    vd->vdev_state < VDEV_STATE_HEALTHY) &&
3113 		    d == 0 && !try_hard && vd->vdev_children == 0) {
3114 			metaslab_trace_add(zal, mg, NULL, psize, d,
3115 			    TRACE_VDEV_ERROR);
3116 			goto next;
3117 		}
3118 
3119 		ASSERT(mg->mg_class == mc);
3120 
3121 		/*
3122 		 * If we don't need to try hard, then require that the
3123 		 * block be 1/8th of the device away from any other DVAs
3124 		 * in this BP.  If we are trying hard, allow any offset
3125 		 * to be used (distance=0).
3126 		 */
3127 		uint64_t distance = 0;
3128 		if (!try_hard) {
3129 			distance = vd->vdev_asize >>
3130 			    ditto_same_vdev_distance_shift;
3131 			if (distance <= (1ULL << vd->vdev_ms_shift))
3132 				distance = 0;
3133 		}
3134 
3135 		uint64_t asize = vdev_psize_to_asize(vd, psize);
3136 		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3137 
3138 		uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
3139 		    distance, dva, d);
3140 
3141 		if (offset != -1ULL) {
3142 			/*
3143 			 * If we've just selected this metaslab group,
3144 			 * figure out whether the corresponding vdev is
3145 			 * over- or under-used relative to the pool,
3146 			 * and set an allocation bias to even it out.
3147 			 */
3148 			if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
3149 				vdev_stat_t *vs = &vd->vdev_stat;
3150 				int64_t vu, cu;
3151 
3152 				vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
3153 				cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
3154 
3155 				/*
3156 				 * Calculate how much more or less we should
3157 				 * try to allocate from this device during
3158 				 * this iteration around the rotor.
3159 				 * For example, if a device is 80% full
3160 				 * and the pool is 20% full then we should
3161 				 * reduce allocations by 60% on this device.
3162 				 *
3163 				 * mg_bias = (20 - 80) * 512K / 100 = -307K
3164 				 *
3165 				 * This reduces allocations by 307K for this
3166 				 * iteration.
3167 				 */
3168 				mg->mg_bias = ((cu - vu) *
3169 				    (int64_t)mg->mg_aliquot) / 100;
3170 			} else if (!metaslab_bias_enabled) {
3171 				mg->mg_bias = 0;
3172 			}
3173 
3174 			if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
3175 			    mg->mg_aliquot + mg->mg_bias) {
3176 				mc->mc_rotor = mg->mg_next;
3177 				mc->mc_aliquot = 0;
3178 			}
3179 
3180 			DVA_SET_VDEV(&dva[d], vd->vdev_id);
3181 			DVA_SET_OFFSET(&dva[d], offset);
3182 			DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
3183 			DVA_SET_ASIZE(&dva[d], asize);
3184 
3185 			return (0);
3186 		}
3187 next:
3188 		mc->mc_rotor = mg->mg_next;
3189 		mc->mc_aliquot = 0;
3190 	} while ((mg = mg->mg_next) != rotor);
3191 
3192 	/*
3193 	 * If we haven't tried hard, do so now.
3194 	 */
3195 	if (!try_hard) {
3196 		try_hard = B_TRUE;
3197 		goto top;
3198 	}
3199 
3200 	bzero(&dva[d], sizeof (dva_t));
3201 
3202 	metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC);
3203 	return (SET_ERROR(ENOSPC));
3204 }
3205 
3206 /*
3207  * Free the block represented by DVA in the context of the specified
3208  * transaction group.
3209  */
3210 static void
metaslab_free_dva(spa_t * spa,const dva_t * dva,uint64_t txg,boolean_t now)3211 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
3212 {
3213 	uint64_t vdev = DVA_GET_VDEV(dva);
3214 	uint64_t offset = DVA_GET_OFFSET(dva);
3215 	uint64_t size = DVA_GET_ASIZE(dva);
3216 	vdev_t *vd;
3217 	metaslab_t *msp;
3218 
3219 	ASSERT(DVA_IS_VALID(dva));
3220 
3221 	if (txg > spa_freeze_txg(spa))
3222 		return;
3223 
3224 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3225 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
3226 		cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
3227 		    (u_longlong_t)vdev, (u_longlong_t)offset);
3228 		ASSERT(0);
3229 		return;
3230 	}
3231 
3232 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3233 
3234 	if (DVA_GET_GANG(dva))
3235 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3236 
3237 	mutex_enter(&msp->ms_lock);
3238 
3239 	if (now) {
3240 		range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
3241 		    offset, size);
3242 
3243 		VERIFY(!msp->ms_condensing);
3244 		VERIFY3U(offset, >=, msp->ms_start);
3245 		VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
3246 		VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
3247 		    msp->ms_size);
3248 		VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3249 		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3250 		range_tree_add(msp->ms_tree, offset, size);
3251 		msp->ms_max_size = metaslab_block_maxsize(msp);
3252 	} else {
3253 		if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
3254 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
3255 		range_tree_add(msp->ms_freetree[txg & TXG_MASK],
3256 		    offset, size);
3257 	}
3258 
3259 	mutex_exit(&msp->ms_lock);
3260 }
3261 
3262 /*
3263  * Intent log support: upon opening the pool after a crash, notify the SPA
3264  * of blocks that the intent log has allocated for immediate write, but
3265  * which are still considered free by the SPA because the last transaction
3266  * group didn't commit yet.
3267  */
3268 static int
metaslab_claim_dva(spa_t * spa,const dva_t * dva,uint64_t txg)3269 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3270 {
3271 	uint64_t vdev = DVA_GET_VDEV(dva);
3272 	uint64_t offset = DVA_GET_OFFSET(dva);
3273 	uint64_t size = DVA_GET_ASIZE(dva);
3274 	vdev_t *vd;
3275 	metaslab_t *msp;
3276 	int error = 0;
3277 
3278 	ASSERT(DVA_IS_VALID(dva));
3279 
3280 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3281 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
3282 		return (SET_ERROR(ENXIO));
3283 
3284 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3285 
3286 	if (DVA_GET_GANG(dva))
3287 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3288 
3289 	mutex_enter(&msp->ms_lock);
3290 
3291 	if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
3292 		error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
3293 
3294 	if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
3295 		error = SET_ERROR(ENOENT);
3296 
3297 	if (error || txg == 0) {	/* txg == 0 indicates dry run */
3298 		mutex_exit(&msp->ms_lock);
3299 		return (error);
3300 	}
3301 
3302 	VERIFY(!msp->ms_condensing);
3303 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3304 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3305 	VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
3306 	range_tree_remove(msp->ms_tree, offset, size);
3307 
3308 	if (spa_writeable(spa)) {	/* don't dirty if we're zdb(1M) */
3309 		if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
3310 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
3311 		range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
3312 	}
3313 
3314 	mutex_exit(&msp->ms_lock);
3315 
3316 	return (0);
3317 }
3318 
3319 /*
3320  * Reserve some allocation slots. The reservation system must be called
3321  * before we call into the allocator. If there aren't any available slots
3322  * then the I/O will be throttled until an I/O completes and its slots are
3323  * freed up. The function returns true if it was successful in placing
3324  * the reservation.
3325  */
3326 boolean_t
metaslab_class_throttle_reserve(metaslab_class_t * mc,int slots,zio_t * zio,int flags)3327 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
3328     int flags)
3329 {
3330 	uint64_t available_slots = 0;
3331 	boolean_t slot_reserved = B_FALSE;
3332 
3333 	ASSERT(mc->mc_alloc_throttle_enabled);
3334 	mutex_enter(&mc->mc_lock);
3335 
3336 	uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots);
3337 	if (reserved_slots < mc->mc_alloc_max_slots)
3338 		available_slots = mc->mc_alloc_max_slots - reserved_slots;
3339 
3340 	if (slots <= available_slots || GANG_ALLOCATION(flags)) {
3341 		/*
3342 		 * We reserve the slots individually so that we can unreserve
3343 		 * them individually when an I/O completes.
3344 		 */
3345 		for (int d = 0; d < slots; d++) {
3346 			reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
3347 		}
3348 		zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3349 		slot_reserved = B_TRUE;
3350 	}
3351 
3352 	mutex_exit(&mc->mc_lock);
3353 	return (slot_reserved);
3354 }
3355 
3356 void
metaslab_class_throttle_unreserve(metaslab_class_t * mc,int slots,zio_t * zio)3357 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
3358 {
3359 	ASSERT(mc->mc_alloc_throttle_enabled);
3360 	mutex_enter(&mc->mc_lock);
3361 	for (int d = 0; d < slots; d++) {
3362 		(void) refcount_remove(&mc->mc_alloc_slots, zio);
3363 	}
3364 	mutex_exit(&mc->mc_lock);
3365 }
3366 
3367 int
metaslab_alloc(spa_t * spa,metaslab_class_t * mc,uint64_t psize,blkptr_t * bp,int ndvas,uint64_t txg,blkptr_t * hintbp,int flags,zio_alloc_list_t * zal,zio_t * zio)3368 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
3369     int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
3370     zio_alloc_list_t *zal, zio_t *zio)
3371 {
3372 	dva_t *dva = bp->blk_dva;
3373 	dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
3374 	int error = 0;
3375 
3376 	ASSERT(bp->blk_birth == 0);
3377 	ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
3378 
3379 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3380 
3381 	if (mc->mc_rotor == NULL) {	/* no vdevs in this class */
3382 		spa_config_exit(spa, SCL_ALLOC, FTAG);
3383 		return (SET_ERROR(ENOSPC));
3384 	}
3385 
3386 	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
3387 	ASSERT(BP_GET_NDVAS(bp) == 0);
3388 	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
3389 	ASSERT3P(zal, !=, NULL);
3390 
3391 	for (int d = 0; d < ndvas; d++) {
3392 		error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
3393 		    txg, flags, zal);
3394 		if (error != 0) {
3395 			for (d--; d >= 0; d--) {
3396 				metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
3397 				metaslab_group_alloc_decrement(spa,
3398 				    DVA_GET_VDEV(&dva[d]), zio, flags);
3399 				bzero(&dva[d], sizeof (dva_t));
3400 			}
3401 			spa_config_exit(spa, SCL_ALLOC, FTAG);
3402 			return (error);
3403 		} else {
3404 			/*
3405 			 * Update the metaslab group's queue depth
3406 			 * based on the newly allocated dva.
3407 			 */
3408 			metaslab_group_alloc_increment(spa,
3409 			    DVA_GET_VDEV(&dva[d]), zio, flags);
3410 		}
3411 
3412 	}
3413 	ASSERT(error == 0);
3414 	ASSERT(BP_GET_NDVAS(bp) == ndvas);
3415 
3416 	spa_config_exit(spa, SCL_ALLOC, FTAG);
3417 
3418 	BP_SET_BIRTH(bp, txg, txg);
3419 
3420 	return (0);
3421 }
3422 
3423 void
metaslab_free(spa_t * spa,const blkptr_t * bp,uint64_t txg,boolean_t now)3424 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
3425 {
3426 	const dva_t *dva = bp->blk_dva;
3427 	int ndvas = BP_GET_NDVAS(bp);
3428 
3429 	ASSERT(!BP_IS_HOLE(bp));
3430 	ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
3431 
3432 	spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
3433 
3434 	for (int d = 0; d < ndvas; d++)
3435 		metaslab_free_dva(spa, &dva[d], txg, now);
3436 
3437 	spa_config_exit(spa, SCL_FREE, FTAG);
3438 }
3439 
3440 int
metaslab_claim(spa_t * spa,const blkptr_t * bp,uint64_t txg)3441 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
3442 {
3443 	const dva_t *dva = bp->blk_dva;
3444 	int ndvas = BP_GET_NDVAS(bp);
3445 	int error = 0;
3446 
3447 	ASSERT(!BP_IS_HOLE(bp));
3448 
3449 	if (txg != 0) {
3450 		/*
3451 		 * First do a dry run to make sure all DVAs are claimable,
3452 		 * so we don't have to unwind from partial failures below.
3453 		 */
3454 		if ((error = metaslab_claim(spa, bp, 0)) != 0)
3455 			return (error);
3456 	}
3457 
3458 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3459 
3460 	for (int d = 0; d < ndvas; d++)
3461 		if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
3462 			break;
3463 
3464 	spa_config_exit(spa, SCL_ALLOC, FTAG);
3465 
3466 	ASSERT(error == 0 || txg == 0);
3467 
3468 	return (error);
3469 }
3470 
3471 void
metaslab_check_free(spa_t * spa,const blkptr_t * bp)3472 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
3473 {
3474 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
3475 		return;
3476 
3477 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3478 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
3479 		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
3480 		vdev_t *vd = vdev_lookup_top(spa, vdev);
3481 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
3482 		uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
3483 		metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3484 
3485 		if (msp->ms_loaded)
3486 			range_tree_verify(msp->ms_tree, offset, size);
3487 
3488 		for (int j = 0; j < TXG_SIZE; j++)
3489 			range_tree_verify(msp->ms_freetree[j], offset, size);
3490 		for (int j = 0; j < TXG_DEFER_SIZE; j++)
3491 			range_tree_verify(msp->ms_defertree[j], offset, size);
3492 	}
3493 	spa_config_exit(spa, SCL_VDEV, FTAG);
3494 }
3495