xref: /freebsd/contrib/jemalloc/src/extent.c (revision 2f513db7)
1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
12 
13 /******************************************************************************/
14 /* Data. */
15 
16 rtree_t		extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t	extent_mutex_pool;
19 
20 size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
21 
22 static const bitmap_info_t extents_bitmap_info =
23     BITMAP_INFO_INITIALIZER(NPSIZES+1);
24 
25 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26     size_t size, size_t alignment, bool *zero, bool *commit,
27     unsigned arena_ind);
28 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29     size_t size, bool committed, unsigned arena_ind);
30 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31     size_t size, bool committed, unsigned arena_ind);
32 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33     size_t size, size_t offset, size_t length, unsigned arena_ind);
34 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36     size_t length, bool growing_retained);
37 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39 #ifdef PAGES_CAN_PURGE_LAZY
40 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41     size_t size, size_t offset, size_t length, unsigned arena_ind);
42 #endif
43 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45     size_t length, bool growing_retained);
46 #ifdef PAGES_CAN_PURGE_FORCED
47 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
49 #endif
50 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52     size_t length, bool growing_retained);
53 #ifdef JEMALLOC_MAPS_COALESCE
54 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
55     size_t size, size_t size_a, size_t size_b, bool committed,
56     unsigned arena_ind);
57 #endif
58 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
59     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
60     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
61     bool growing_retained);
62 #ifdef JEMALLOC_MAPS_COALESCE
63 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
64     size_t size_a, void *addr_b, size_t size_b, bool committed,
65     unsigned arena_ind);
66 #endif
67 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
68     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
69     bool growing_retained);
70 
71 const extent_hooks_t	extent_hooks_default = {
72 	extent_alloc_default,
73 	extent_dalloc_default,
74 	extent_destroy_default,
75 	extent_commit_default,
76 	extent_decommit_default
77 #ifdef PAGES_CAN_PURGE_LAZY
78 	,
79 	extent_purge_lazy_default
80 #else
81 	,
82 	NULL
83 #endif
84 #ifdef PAGES_CAN_PURGE_FORCED
85 	,
86 	extent_purge_forced_default
87 #else
88 	,
89 	NULL
90 #endif
91 #ifdef JEMALLOC_MAPS_COALESCE
92 	,
93 	extent_split_default,
94 	extent_merge_default
95 #endif
96 };
97 
98 /* Used exclusively for gdump triggering. */
99 static atomic_zu_t curpages;
100 static atomic_zu_t highpages;
101 
102 /******************************************************************************/
103 /*
104  * Function prototypes for static functions that are referenced prior to
105  * definition.
106  */
107 
108 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
110     extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
111     size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
112     bool *zero, bool *commit, bool growing_retained);
113 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
114     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
115     extent_t *extent, bool *coalesced, bool growing_retained);
116 static void extent_record(tsdn_t *tsdn, arena_t *arena,
117     extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
118     bool growing_retained);
119 
120 /******************************************************************************/
121 
122 ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
123     extent_esnead_comp)
124 
125 typedef enum {
126 	lock_result_success,
127 	lock_result_failure,
128 	lock_result_no_extent
129 } lock_result_t;
130 
131 static lock_result_t
132 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
133     extent_t **result) {
134 	extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
135 	    elm, true);
136 
137 	if (extent1 == NULL) {
138 		return lock_result_no_extent;
139 	}
140 	/*
141 	 * It's possible that the extent changed out from under us, and with it
142 	 * the leaf->extent mapping.  We have to recheck while holding the lock.
143 	 */
144 	extent_lock(tsdn, extent1);
145 	extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
146 	    &extents_rtree, elm, true);
147 
148 	if (extent1 == extent2) {
149 		*result = extent1;
150 		return lock_result_success;
151 	} else {
152 		extent_unlock(tsdn, extent1);
153 		return lock_result_failure;
154 	}
155 }
156 
157 /*
158  * Returns a pool-locked extent_t * if there's one associated with the given
159  * address, and NULL otherwise.
160  */
161 static extent_t *
162 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
163 	extent_t *ret = NULL;
164 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
165 	    rtree_ctx, (uintptr_t)addr, false, false);
166 	if (elm == NULL) {
167 		return NULL;
168 	}
169 	lock_result_t lock_result;
170 	do {
171 		lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
172 	} while (lock_result == lock_result_failure);
173 	return ret;
174 }
175 
176 extent_t *
177 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
178 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
179 	extent_t *extent = extent_avail_first(&arena->extent_avail);
180 	if (extent == NULL) {
181 		malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
182 		return base_alloc_extent(tsdn, arena->base);
183 	}
184 	extent_avail_remove(&arena->extent_avail, extent);
185 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
186 	return extent;
187 }
188 
189 void
190 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
191 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
192 	extent_avail_insert(&arena->extent_avail, extent);
193 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
194 }
195 
196 extent_hooks_t *
197 extent_hooks_get(arena_t *arena) {
198 	return base_extent_hooks_get(arena->base);
199 }
200 
201 extent_hooks_t *
202 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
203 	background_thread_info_t *info;
204 	if (have_background_thread) {
205 		info = arena_background_thread_info_get(arena);
206 		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
207 	}
208 	extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
209 	if (have_background_thread) {
210 		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
211 	}
212 
213 	return ret;
214 }
215 
216 static void
217 extent_hooks_assure_initialized(arena_t *arena,
218     extent_hooks_t **r_extent_hooks) {
219 	if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
220 		*r_extent_hooks = extent_hooks_get(arena);
221 	}
222 }
223 
224 #ifndef JEMALLOC_JET
225 static
226 #endif
227 size_t
228 extent_size_quantize_floor(size_t size) {
229 	size_t ret;
230 	pszind_t pind;
231 
232 	assert(size > 0);
233 	assert((size & PAGE_MASK) == 0);
234 
235 	pind = sz_psz2ind(size - sz_large_pad + 1);
236 	if (pind == 0) {
237 		/*
238 		 * Avoid underflow.  This short-circuit would also do the right
239 		 * thing for all sizes in the range for which there are
240 		 * PAGE-spaced size classes, but it's simplest to just handle
241 		 * the one case that would cause erroneous results.
242 		 */
243 		return size;
244 	}
245 	ret = sz_pind2sz(pind - 1) + sz_large_pad;
246 	assert(ret <= size);
247 	return ret;
248 }
249 
250 #ifndef JEMALLOC_JET
251 static
252 #endif
253 size_t
254 extent_size_quantize_ceil(size_t size) {
255 	size_t ret;
256 
257 	assert(size > 0);
258 	assert(size - sz_large_pad <= LARGE_MAXCLASS);
259 	assert((size & PAGE_MASK) == 0);
260 
261 	ret = extent_size_quantize_floor(size);
262 	if (ret < size) {
263 		/*
264 		 * Skip a quantization that may have an adequately large extent,
265 		 * because under-sized extents may be mixed in.  This only
266 		 * happens when an unusual size is requested, i.e. for aligned
267 		 * allocation, and is just one of several places where linear
268 		 * search would potentially find sufficiently aligned available
269 		 * memory somewhere lower.
270 		 */
271 		ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
272 		    sz_large_pad;
273 	}
274 	return ret;
275 }
276 
277 /* Generate pairing heap functions. */
278 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
279 
280 bool
281 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
282     bool delay_coalesce) {
283 	if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
284 	    malloc_mutex_rank_exclusive)) {
285 		return true;
286 	}
287 	for (unsigned i = 0; i < NPSIZES+1; i++) {
288 		extent_heap_new(&extents->heaps[i]);
289 	}
290 	bitmap_init(extents->bitmap, &extents_bitmap_info, true);
291 	extent_list_init(&extents->lru);
292 	atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
293 	extents->state = state;
294 	extents->delay_coalesce = delay_coalesce;
295 	return false;
296 }
297 
298 extent_state_t
299 extents_state_get(const extents_t *extents) {
300 	return extents->state;
301 }
302 
303 size_t
304 extents_npages_get(extents_t *extents) {
305 	return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
306 }
307 
308 static void
309 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
310 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
311 	assert(extent_state_get(extent) == extents->state);
312 
313 	size_t size = extent_size_get(extent);
314 	size_t psz = extent_size_quantize_floor(size);
315 	pszind_t pind = sz_psz2ind(psz);
316 	if (extent_heap_empty(&extents->heaps[pind])) {
317 		bitmap_unset(extents->bitmap, &extents_bitmap_info,
318 		    (size_t)pind);
319 	}
320 	extent_heap_insert(&extents->heaps[pind], extent);
321 	extent_list_append(&extents->lru, extent);
322 	size_t npages = size >> LG_PAGE;
323 	/*
324 	 * All modifications to npages hold the mutex (as asserted above), so we
325 	 * don't need an atomic fetch-add; we can get by with a load followed by
326 	 * a store.
327 	 */
328 	size_t cur_extents_npages =
329 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
330 	atomic_store_zu(&extents->npages, cur_extents_npages + npages,
331 	    ATOMIC_RELAXED);
332 }
333 
334 static void
335 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
336 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
337 	assert(extent_state_get(extent) == extents->state);
338 
339 	size_t size = extent_size_get(extent);
340 	size_t psz = extent_size_quantize_floor(size);
341 	pszind_t pind = sz_psz2ind(psz);
342 	extent_heap_remove(&extents->heaps[pind], extent);
343 	if (extent_heap_empty(&extents->heaps[pind])) {
344 		bitmap_set(extents->bitmap, &extents_bitmap_info,
345 		    (size_t)pind);
346 	}
347 	extent_list_remove(&extents->lru, extent);
348 	size_t npages = size >> LG_PAGE;
349 	/*
350 	 * As in extents_insert_locked, we hold extents->mtx and so don't need
351 	 * atomic operations for updating extents->npages.
352 	 */
353 	size_t cur_extents_npages =
354 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
355 	assert(cur_extents_npages >= npages);
356 	atomic_store_zu(&extents->npages,
357 	    cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
358 }
359 
360 /*
361  * Find an extent with size [min_size, max_size) to satisfy the alignment
362  * requirement.  For each size, try only the first extent in the heap.
363  */
364 static extent_t *
365 extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
366     size_t alignment) {
367         pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
368         pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
369 
370 	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
371 	    &extents_bitmap_info, (size_t)pind); i < pind_max; i =
372 	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
373 	    (size_t)i+1)) {
374 		assert(i < NPSIZES);
375 		assert(!extent_heap_empty(&extents->heaps[i]));
376 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
377 		uintptr_t base = (uintptr_t)extent_base_get(extent);
378 		size_t candidate_size = extent_size_get(extent);
379 		assert(candidate_size >= min_size);
380 
381 		uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
382 		    PAGE_CEILING(alignment));
383 		if (base > next_align || base + candidate_size <= next_align) {
384 			/* Overflow or not crossing the next alignment. */
385 			continue;
386 		}
387 
388 		size_t leadsize = next_align - base;
389 		if (candidate_size - leadsize >= min_size) {
390 			return extent;
391 		}
392 	}
393 
394 	return NULL;
395 }
396 
397 /* Do any-best-fit extent selection, i.e. select any extent that best fits. */
398 static extent_t *
399 extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
400     size_t size) {
401 	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
402 	pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
403 	    (size_t)pind);
404 	if (i < NPSIZES+1) {
405 		/*
406 		 * In order to reduce fragmentation, avoid reusing and splitting
407 		 * large extents for much smaller sizes.
408 		 */
409 		if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
410 			return NULL;
411 		}
412 		assert(!extent_heap_empty(&extents->heaps[i]));
413 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
414 		assert(extent_size_get(extent) >= size);
415 		return extent;
416 	}
417 
418 	return NULL;
419 }
420 
421 /*
422  * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
423  * large enough.
424  */
425 static extent_t *
426 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
427     size_t size) {
428 	extent_t *ret = NULL;
429 
430 	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
431 	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
432 	    &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
433 	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
434 	    (size_t)i+1)) {
435 		assert(!extent_heap_empty(&extents->heaps[i]));
436 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
437 		assert(extent_size_get(extent) >= size);
438 		if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
439 			ret = extent;
440 		}
441 		if (i == NPSIZES) {
442 			break;
443 		}
444 		assert(i < NPSIZES);
445 	}
446 
447 	return ret;
448 }
449 
450 /*
451  * Do {best,first}-fit extent selection, where the selection policy choice is
452  * based on extents->delay_coalesce.  Best-fit selection requires less
453  * searching, but its layout policy is less stable and may cause higher virtual
454  * memory fragmentation as a side effect.
455  */
456 static extent_t *
457 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
458     size_t esize, size_t alignment) {
459 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
460 
461 	size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
462 	/* Beware size_t wrap-around. */
463 	if (max_size < esize) {
464 		return NULL;
465 	}
466 
467 	extent_t *extent = extents->delay_coalesce ?
468 	    extents_best_fit_locked(tsdn, arena, extents, max_size) :
469 	    extents_first_fit_locked(tsdn, arena, extents, max_size);
470 
471 	if (alignment > PAGE && extent == NULL) {
472 		/*
473 		 * max_size guarantees the alignment requirement but is rather
474 		 * pessimistic.  Next we try to satisfy the aligned allocation
475 		 * with sizes in [esize, max_size).
476 		 */
477 		extent = extents_fit_alignment(extents, esize, max_size,
478 		    alignment);
479 	}
480 
481 	return extent;
482 }
483 
484 static bool
485 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
486     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
487     extent_t *extent) {
488 	extent_state_set(extent, extent_state_active);
489 	bool coalesced;
490 	extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
491 	    extents, extent, &coalesced, false);
492 	extent_state_set(extent, extents_state_get(extents));
493 
494 	if (!coalesced) {
495 		return true;
496 	}
497 	extents_insert_locked(tsdn, extents, extent);
498 	return false;
499 }
500 
501 extent_t *
502 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
503     extents_t *extents, void *new_addr, size_t size, size_t pad,
504     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
505 	assert(size + pad != 0);
506 	assert(alignment != 0);
507 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
508 	    WITNESS_RANK_CORE, 0);
509 
510 	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
511 	    new_addr, size, pad, alignment, slab, szind, zero, commit, false);
512 	assert(extent == NULL || extent_dumpable_get(extent));
513 	return extent;
514 }
515 
516 void
517 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
518     extents_t *extents, extent_t *extent) {
519 	assert(extent_base_get(extent) != NULL);
520 	assert(extent_size_get(extent) != 0);
521 	assert(extent_dumpable_get(extent));
522 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
523 	    WITNESS_RANK_CORE, 0);
524 
525 	extent_addr_set(extent, extent_base_get(extent));
526 	extent_zeroed_set(extent, false);
527 
528 	extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
529 }
530 
531 extent_t *
532 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
533     extents_t *extents, size_t npages_min) {
534 	rtree_ctx_t rtree_ctx_fallback;
535 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
536 
537 	malloc_mutex_lock(tsdn, &extents->mtx);
538 
539 	/*
540 	 * Get the LRU coalesced extent, if any.  If coalescing was delayed,
541 	 * the loop will iterate until the LRU extent is fully coalesced.
542 	 */
543 	extent_t *extent;
544 	while (true) {
545 		/* Get the LRU extent, if any. */
546 		extent = extent_list_first(&extents->lru);
547 		if (extent == NULL) {
548 			goto label_return;
549 		}
550 		/* Check the eviction limit. */
551 		size_t extents_npages = atomic_load_zu(&extents->npages,
552 		    ATOMIC_RELAXED);
553 		if (extents_npages <= npages_min) {
554 			extent = NULL;
555 			goto label_return;
556 		}
557 		extents_remove_locked(tsdn, extents, extent);
558 		if (!extents->delay_coalesce) {
559 			break;
560 		}
561 		/* Try to coalesce. */
562 		if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
563 		    rtree_ctx, extents, extent)) {
564 			break;
565 		}
566 		/*
567 		 * The LRU extent was just coalesced and the result placed in
568 		 * the LRU at its neighbor's position.  Start over.
569 		 */
570 	}
571 
572 	/*
573 	 * Either mark the extent active or deregister it to protect against
574 	 * concurrent operations.
575 	 */
576 	switch (extents_state_get(extents)) {
577 	case extent_state_active:
578 		not_reached();
579 	case extent_state_dirty:
580 	case extent_state_muzzy:
581 		extent_state_set(extent, extent_state_active);
582 		break;
583 	case extent_state_retained:
584 		extent_deregister(tsdn, extent);
585 		break;
586 	default:
587 		not_reached();
588 	}
589 
590 label_return:
591 	malloc_mutex_unlock(tsdn, &extents->mtx);
592 	return extent;
593 }
594 
595 static void
596 extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
597     extents_t *extents, extent_t *extent, bool growing_retained) {
598 	/*
599 	 * Leak extent after making sure its pages have already been purged, so
600 	 * that this is only a virtual memory leak.
601 	 */
602 	if (extents_state_get(extents) == extent_state_dirty) {
603 		if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
604 		    extent, 0, extent_size_get(extent), growing_retained)) {
605 			extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
606 			    extent, 0, extent_size_get(extent),
607 			    growing_retained);
608 		}
609 	}
610 	extent_dalloc(tsdn, arena, extent);
611 }
612 
613 void
614 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
615 	malloc_mutex_prefork(tsdn, &extents->mtx);
616 }
617 
618 void
619 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
620 	malloc_mutex_postfork_parent(tsdn, &extents->mtx);
621 }
622 
623 void
624 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
625 	malloc_mutex_postfork_child(tsdn, &extents->mtx);
626 }
627 
628 static void
629 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
630     extent_t *extent) {
631 	assert(extent_arena_get(extent) == arena);
632 	assert(extent_state_get(extent) == extent_state_active);
633 
634 	extent_state_set(extent, extents_state_get(extents));
635 	extents_insert_locked(tsdn, extents, extent);
636 }
637 
638 static void
639 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
640     extent_t *extent) {
641 	malloc_mutex_lock(tsdn, &extents->mtx);
642 	extent_deactivate_locked(tsdn, arena, extents, extent);
643 	malloc_mutex_unlock(tsdn, &extents->mtx);
644 }
645 
646 static void
647 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
648     extent_t *extent) {
649 	assert(extent_arena_get(extent) == arena);
650 	assert(extent_state_get(extent) == extents_state_get(extents));
651 
652 	extents_remove_locked(tsdn, extents, extent);
653 	extent_state_set(extent, extent_state_active);
654 }
655 
656 static bool
657 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
658     const extent_t *extent, bool dependent, bool init_missing,
659     rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
660 	*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
661 	    (uintptr_t)extent_base_get(extent), dependent, init_missing);
662 	if (!dependent && *r_elm_a == NULL) {
663 		return true;
664 	}
665 	assert(*r_elm_a != NULL);
666 
667 	*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
668 	    (uintptr_t)extent_last_get(extent), dependent, init_missing);
669 	if (!dependent && *r_elm_b == NULL) {
670 		return true;
671 	}
672 	assert(*r_elm_b != NULL);
673 
674 	return false;
675 }
676 
677 static void
678 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
679     rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
680 	rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
681 	if (elm_b != NULL) {
682 		rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
683 		    slab);
684 	}
685 }
686 
687 static void
688 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
689     szind_t szind) {
690 	assert(extent_slab_get(extent));
691 
692 	/* Register interior. */
693 	for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
694 		rtree_write(tsdn, &extents_rtree, rtree_ctx,
695 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
696 		    LG_PAGE), extent, szind, true);
697 	}
698 }
699 
700 static void
701 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
702 	cassert(config_prof);
703 	/* prof_gdump() requirement. */
704 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
705 	    WITNESS_RANK_CORE, 0);
706 
707 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
708 		size_t nadd = extent_size_get(extent) >> LG_PAGE;
709 		size_t cur = atomic_fetch_add_zu(&curpages, nadd,
710 		    ATOMIC_RELAXED) + nadd;
711 		size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
712 		while (cur > high && !atomic_compare_exchange_weak_zu(
713 		    &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
714 			/*
715 			 * Don't refresh cur, because it may have decreased
716 			 * since this thread lost the highpages update race.
717 			 * Note that high is updated in case of CAS failure.
718 			 */
719 		}
720 		if (cur > high && prof_gdump_get_unlocked()) {
721 			prof_gdump(tsdn);
722 		}
723 	}
724 }
725 
726 static void
727 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
728 	cassert(config_prof);
729 
730 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
731 		size_t nsub = extent_size_get(extent) >> LG_PAGE;
732 		assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
733 		atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
734 	}
735 }
736 
737 static bool
738 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
739 	rtree_ctx_t rtree_ctx_fallback;
740 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
741 	rtree_leaf_elm_t *elm_a, *elm_b;
742 
743 	/*
744 	 * We need to hold the lock to protect against a concurrent coalesce
745 	 * operation that sees us in a partial state.
746 	 */
747 	extent_lock(tsdn, extent);
748 
749 	if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
750 	    &elm_a, &elm_b)) {
751 		return true;
752 	}
753 
754 	szind_t szind = extent_szind_get_maybe_invalid(extent);
755 	bool slab = extent_slab_get(extent);
756 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
757 	if (slab) {
758 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
759 	}
760 
761 	extent_unlock(tsdn, extent);
762 
763 	if (config_prof && gdump_add) {
764 		extent_gdump_add(tsdn, extent);
765 	}
766 
767 	return false;
768 }
769 
770 static bool
771 extent_register(tsdn_t *tsdn, extent_t *extent) {
772 	return extent_register_impl(tsdn, extent, true);
773 }
774 
775 static bool
776 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
777 	return extent_register_impl(tsdn, extent, false);
778 }
779 
780 static void
781 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
782 	bool err = extent_register(tsdn, extent);
783 	assert(!err);
784 }
785 
786 /*
787  * Removes all pointers to the given extent from the global rtree indices for
788  * its interior.  This is relevant for slab extents, for which we need to do
789  * metadata lookups at places other than the head of the extent.  We deregister
790  * on the interior, then, when an extent moves from being an active slab to an
791  * inactive state.
792  */
793 static void
794 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
795     extent_t *extent) {
796 	size_t i;
797 
798 	assert(extent_slab_get(extent));
799 
800 	for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
801 		rtree_clear(tsdn, &extents_rtree, rtree_ctx,
802 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
803 		    LG_PAGE));
804 	}
805 }
806 
807 /*
808  * Removes all pointers to the given extent from the global rtree.
809  */
810 static void
811 extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
812 	rtree_ctx_t rtree_ctx_fallback;
813 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
814 	rtree_leaf_elm_t *elm_a, *elm_b;
815 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
816 	    &elm_a, &elm_b);
817 
818 	extent_lock(tsdn, extent);
819 
820 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
821 	if (extent_slab_get(extent)) {
822 		extent_interior_deregister(tsdn, rtree_ctx, extent);
823 		extent_slab_set(extent, false);
824 	}
825 
826 	extent_unlock(tsdn, extent);
827 
828 	if (config_prof && gdump) {
829 		extent_gdump_sub(tsdn, extent);
830 	}
831 }
832 
833 static void
834 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
835 	extent_deregister_impl(tsdn, extent, true);
836 }
837 
838 static void
839 extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
840 	extent_deregister_impl(tsdn, extent, false);
841 }
842 
843 /*
844  * Tries to find and remove an extent from extents that can be used for the
845  * given allocation request.
846  */
847 static extent_t *
848 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
849     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
850     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
851     bool growing_retained) {
852 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
853 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
854 	assert(alignment > 0);
855 	if (config_debug && new_addr != NULL) {
856 		/*
857 		 * Non-NULL new_addr has two use cases:
858 		 *
859 		 *   1) Recycle a known-extant extent, e.g. during purging.
860 		 *   2) Perform in-place expanding reallocation.
861 		 *
862 		 * Regardless of use case, new_addr must either refer to a
863 		 * non-existing extent, or to the base of an extant extent,
864 		 * since only active slabs support interior lookups (which of
865 		 * course cannot be recycled).
866 		 */
867 		assert(PAGE_ADDR2BASE(new_addr) == new_addr);
868 		assert(pad == 0);
869 		assert(alignment <= PAGE);
870 	}
871 
872 	size_t esize = size + pad;
873 	malloc_mutex_lock(tsdn, &extents->mtx);
874 	extent_hooks_assure_initialized(arena, r_extent_hooks);
875 	extent_t *extent;
876 	if (new_addr != NULL) {
877 		extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
878 		if (extent != NULL) {
879 			/*
880 			 * We might null-out extent to report an error, but we
881 			 * still need to unlock the associated mutex after.
882 			 */
883 			extent_t *unlock_extent = extent;
884 			assert(extent_base_get(extent) == new_addr);
885 			if (extent_arena_get(extent) != arena ||
886 			    extent_size_get(extent) < esize ||
887 			    extent_state_get(extent) !=
888 			    extents_state_get(extents)) {
889 				extent = NULL;
890 			}
891 			extent_unlock(tsdn, unlock_extent);
892 		}
893 	} else {
894 		extent = extents_fit_locked(tsdn, arena, extents, esize,
895 		    alignment);
896 	}
897 	if (extent == NULL) {
898 		malloc_mutex_unlock(tsdn, &extents->mtx);
899 		return NULL;
900 	}
901 
902 	extent_activate_locked(tsdn, arena, extents, extent);
903 	malloc_mutex_unlock(tsdn, &extents->mtx);
904 
905 	return extent;
906 }
907 
908 /*
909  * Given an allocation request and an extent guaranteed to be able to satisfy
910  * it, this splits off lead and trail extents, leaving extent pointing to an
911  * extent satisfying the allocation.
912  * This function doesn't put lead or trail into any extents_t; it's the caller's
913  * job to ensure that they can be reused.
914  */
915 typedef enum {
916 	/*
917 	 * Split successfully.  lead, extent, and trail, are modified to extents
918 	 * describing the ranges before, in, and after the given allocation.
919 	 */
920 	extent_split_interior_ok,
921 	/*
922 	 * The extent can't satisfy the given allocation request.  None of the
923 	 * input extent_t *s are touched.
924 	 */
925 	extent_split_interior_cant_alloc,
926 	/*
927 	 * In a potentially invalid state.  Must leak (if *to_leak is non-NULL),
928 	 * and salvage what's still salvageable (if *to_salvage is non-NULL).
929 	 * None of lead, extent, or trail are valid.
930 	 */
931 	extent_split_interior_error
932 } extent_split_interior_result_t;
933 
934 static extent_split_interior_result_t
935 extent_split_interior(tsdn_t *tsdn, arena_t *arena,
936     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
937     /* The result of splitting, in case of success. */
938     extent_t **extent, extent_t **lead, extent_t **trail,
939     /* The mess to clean up, in case of error. */
940     extent_t **to_leak, extent_t **to_salvage,
941     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
942     szind_t szind, bool growing_retained) {
943 	size_t esize = size + pad;
944 	size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
945 	    PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
946 	assert(new_addr == NULL || leadsize == 0);
947 	if (extent_size_get(*extent) < leadsize + esize) {
948 		return extent_split_interior_cant_alloc;
949 	}
950 	size_t trailsize = extent_size_get(*extent) - leadsize - esize;
951 
952 	*lead = NULL;
953 	*trail = NULL;
954 	*to_leak = NULL;
955 	*to_salvage = NULL;
956 
957 	/* Split the lead. */
958 	if (leadsize != 0) {
959 		*lead = *extent;
960 		*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
961 		    *lead, leadsize, NSIZES, false, esize + trailsize, szind,
962 		    slab, growing_retained);
963 		if (*extent == NULL) {
964 			*to_leak = *lead;
965 			*lead = NULL;
966 			return extent_split_interior_error;
967 		}
968 	}
969 
970 	/* Split the trail. */
971 	if (trailsize != 0) {
972 		*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
973 		    esize, szind, slab, trailsize, NSIZES, false,
974 		    growing_retained);
975 		if (*trail == NULL) {
976 			*to_leak = *extent;
977 			*to_salvage = *lead;
978 			*lead = NULL;
979 			*extent = NULL;
980 			return extent_split_interior_error;
981 		}
982 	}
983 
984 	if (leadsize == 0 && trailsize == 0) {
985 		/*
986 		 * Splitting causes szind to be set as a side effect, but no
987 		 * splitting occurred.
988 		 */
989 		extent_szind_set(*extent, szind);
990 		if (szind != NSIZES) {
991 			rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
992 			    (uintptr_t)extent_addr_get(*extent), szind, slab);
993 			if (slab && extent_size_get(*extent) > PAGE) {
994 				rtree_szind_slab_update(tsdn, &extents_rtree,
995 				    rtree_ctx,
996 				    (uintptr_t)extent_past_get(*extent) -
997 				    (uintptr_t)PAGE, szind, slab);
998 			}
999 		}
1000 	}
1001 
1002 	return extent_split_interior_ok;
1003 }
1004 
1005 /*
1006  * This fulfills the indicated allocation request out of the given extent (which
1007  * the caller should have ensured was big enough).  If there's any unused space
1008  * before or after the resulting allocation, that space is given its own extent
1009  * and put back into extents.
1010  */
1011 static extent_t *
1012 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1013     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1014     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1015     szind_t szind, extent_t *extent, bool growing_retained) {
1016 	extent_t *lead;
1017 	extent_t *trail;
1018 	extent_t *to_leak;
1019 	extent_t *to_salvage;
1020 
1021 	extent_split_interior_result_t result = extent_split_interior(
1022 	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1023 	    &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1024 	    growing_retained);
1025 
1026 	if (result == extent_split_interior_ok) {
1027 		if (lead != NULL) {
1028 			extent_deactivate(tsdn, arena, extents, lead);
1029 		}
1030 		if (trail != NULL) {
1031 			extent_deactivate(tsdn, arena, extents, trail);
1032 		}
1033 		return extent;
1034 	} else {
1035 		/*
1036 		 * We should have picked an extent that was large enough to
1037 		 * fulfill our allocation request.
1038 		 */
1039 		assert(result == extent_split_interior_error);
1040 		if (to_salvage != NULL) {
1041 			extent_deregister(tsdn, to_salvage);
1042 		}
1043 		if (to_leak != NULL) {
1044 			void *leak = extent_base_get(to_leak);
1045 			extent_deregister_no_gdump_sub(tsdn, to_leak);
1046 			extents_leak(tsdn, arena, r_extent_hooks, extents,
1047 			    to_leak, growing_retained);
1048 			assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
1049 			    == NULL);
1050 		}
1051 		return NULL;
1052 	}
1053 	unreachable();
1054 }
1055 
1056 /*
1057  * Tries to satisfy the given allocation request by reusing one of the extents
1058  * in the given extents_t.
1059  */
1060 static extent_t *
1061 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1062     extents_t *extents, void *new_addr, size_t size, size_t pad,
1063     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1064     bool growing_retained) {
1065 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1066 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1067 	assert(new_addr == NULL || !slab);
1068 	assert(pad == 0 || !slab);
1069 	assert(!*zero || !slab);
1070 
1071 	rtree_ctx_t rtree_ctx_fallback;
1072 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1073 
1074 	extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1075 	    rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1076 	    growing_retained);
1077 	if (extent == NULL) {
1078 		return NULL;
1079 	}
1080 
1081 	extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1082 	    extents, new_addr, size, pad, alignment, slab, szind, extent,
1083 	    growing_retained);
1084 	if (extent == NULL) {
1085 		return NULL;
1086 	}
1087 
1088 	if (*commit && !extent_committed_get(extent)) {
1089 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1090 		    0, extent_size_get(extent), growing_retained)) {
1091 			extent_record(tsdn, arena, r_extent_hooks, extents,
1092 			    extent, growing_retained);
1093 			return NULL;
1094 		}
1095 		extent_zeroed_set(extent, true);
1096 	}
1097 
1098 	if (extent_committed_get(extent)) {
1099 		*commit = true;
1100 	}
1101 	if (extent_zeroed_get(extent)) {
1102 		*zero = true;
1103 	}
1104 
1105 	if (pad != 0) {
1106 		extent_addr_randomize(tsdn, extent, alignment);
1107 	}
1108 	assert(extent_state_get(extent) == extent_state_active);
1109 	if (slab) {
1110 		extent_slab_set(extent, slab);
1111 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
1112 	}
1113 
1114 	if (*zero) {
1115 		void *addr = extent_base_get(extent);
1116 		if (!extent_zeroed_get(extent)) {
1117 			size_t size = extent_size_get(extent);
1118 			if (pages_purge_forced(addr, size)) {
1119 				memset(addr, 0, size);
1120 			}
1121 		} else if (config_debug) {
1122 			size_t *p = (size_t *)(uintptr_t)addr;
1123 			/* Check the first page only. */
1124 			for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
1125 				assert(p[i] == 0);
1126 			}
1127 		}
1128 	}
1129 	return extent;
1130 }
1131 
1132 /*
1133  * If the caller specifies (!*zero), it is still possible to receive zeroed
1134  * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
1135  * advantage of this to avoid demanding zeroed extents, but taking advantage of
1136  * them if they are returned.
1137  */
1138 static void *
1139 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1140     size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1141 	void *ret;
1142 
1143 	assert(size != 0);
1144 	assert(alignment != 0);
1145 
1146 	/* "primary" dss. */
1147 	if (have_dss && dss_prec == dss_prec_primary && (ret =
1148 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1149 	    commit)) != NULL) {
1150 		return ret;
1151 	}
1152 	/* mmap. */
1153 	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1154 	    != NULL) {
1155 		return ret;
1156 	}
1157 	/* "secondary" dss. */
1158 	if (have_dss && dss_prec == dss_prec_secondary && (ret =
1159 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1160 	    commit)) != NULL) {
1161 		return ret;
1162 	}
1163 
1164 	/* All strategies for allocation failed. */
1165 	return NULL;
1166 }
1167 
1168 static void *
1169 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1170     size_t size, size_t alignment, bool *zero, bool *commit) {
1171 	void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1172 	    commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1173 	    ATOMIC_RELAXED));
1174 	if (have_madvise_huge && ret) {
1175 		pages_set_thp_state(ret, size);
1176 	}
1177 	return ret;
1178 }
1179 
1180 static void *
1181 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1182     size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1183 	tsdn_t *tsdn;
1184 	arena_t *arena;
1185 
1186 	tsdn = tsdn_fetch();
1187 	arena = arena_get(tsdn, arena_ind, false);
1188 	/*
1189 	 * The arena we're allocating on behalf of must have been initialized
1190 	 * already.
1191 	 */
1192 	assert(arena != NULL);
1193 
1194 	return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1195 	    alignment, zero, commit);
1196 }
1197 
1198 static void
1199 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1200 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1201 	if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1202 		/*
1203 		 * The only legitimate case of customized extent hooks for a0 is
1204 		 * hooks with no allocation activities.  One such example is to
1205 		 * place metadata on pre-allocated resources such as huge pages.
1206 		 * In that case, rely on reentrancy_level checks to catch
1207 		 * infinite recursions.
1208 		 */
1209 		pre_reentrancy(tsd, NULL);
1210 	} else {
1211 		pre_reentrancy(tsd, arena);
1212 	}
1213 }
1214 
1215 static void
1216 extent_hook_post_reentrancy(tsdn_t *tsdn) {
1217 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1218 	post_reentrancy(tsd);
1219 }
1220 
1221 /*
1222  * If virtual memory is retained, create increasingly larger extents from which
1223  * to split requested extents in order to limit the total number of disjoint
1224  * virtual memory ranges retained by each arena.
1225  */
1226 static extent_t *
1227 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1228     extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1229     bool slab, szind_t szind, bool *zero, bool *commit) {
1230 	malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1231 	assert(pad == 0 || !slab);
1232 	assert(!*zero || !slab);
1233 
1234 	size_t esize = size + pad;
1235 	size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1236 	/* Beware size_t wrap-around. */
1237 	if (alloc_size_min < esize) {
1238 		goto label_err;
1239 	}
1240 	/*
1241 	 * Find the next extent size in the series that would be large enough to
1242 	 * satisfy this request.
1243 	 */
1244 	pszind_t egn_skip = 0;
1245 	size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1246 	while (alloc_size < alloc_size_min) {
1247 		egn_skip++;
1248 		if (arena->extent_grow_next + egn_skip == NPSIZES) {
1249 			/* Outside legal range. */
1250 			goto label_err;
1251 		}
1252 		assert(arena->extent_grow_next + egn_skip < NPSIZES);
1253 		alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1254 	}
1255 
1256 	extent_t *extent = extent_alloc(tsdn, arena);
1257 	if (extent == NULL) {
1258 		goto label_err;
1259 	}
1260 	bool zeroed = false;
1261 	bool committed = false;
1262 
1263 	void *ptr;
1264 	if (*r_extent_hooks == &extent_hooks_default) {
1265 		ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1266 		    alloc_size, PAGE, &zeroed, &committed);
1267 	} else {
1268 		extent_hook_pre_reentrancy(tsdn, arena);
1269 		ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1270 		    alloc_size, PAGE, &zeroed, &committed,
1271 		    arena_ind_get(arena));
1272 		extent_hook_post_reentrancy(tsdn);
1273 	}
1274 
1275 	extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
1276 	    arena_extent_sn_next(arena), extent_state_active, zeroed,
1277 	    committed, true);
1278 	if (ptr == NULL) {
1279 		extent_dalloc(tsdn, arena, extent);
1280 		goto label_err;
1281 	}
1282 
1283 	if (extent_register_no_gdump_add(tsdn, extent)) {
1284 		extents_leak(tsdn, arena, r_extent_hooks,
1285 		    &arena->extents_retained, extent, true);
1286 		goto label_err;
1287 	}
1288 
1289 	if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1290 		*zero = true;
1291 	}
1292 	if (extent_committed_get(extent)) {
1293 		*commit = true;
1294 	}
1295 
1296 	rtree_ctx_t rtree_ctx_fallback;
1297 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1298 
1299 	extent_t *lead;
1300 	extent_t *trail;
1301 	extent_t *to_leak;
1302 	extent_t *to_salvage;
1303 	extent_split_interior_result_t result = extent_split_interior(
1304 	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1305 	    &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1306 	    true);
1307 
1308 	if (result == extent_split_interior_ok) {
1309 		if (lead != NULL) {
1310 			extent_record(tsdn, arena, r_extent_hooks,
1311 			    &arena->extents_retained, lead, true);
1312 		}
1313 		if (trail != NULL) {
1314 			extent_record(tsdn, arena, r_extent_hooks,
1315 			    &arena->extents_retained, trail, true);
1316 		}
1317 	} else {
1318 		/*
1319 		 * We should have allocated a sufficiently large extent; the
1320 		 * cant_alloc case should not occur.
1321 		 */
1322 		assert(result == extent_split_interior_error);
1323 		if (to_salvage != NULL) {
1324 			if (config_prof) {
1325 				extent_gdump_add(tsdn, to_salvage);
1326 			}
1327 			extent_record(tsdn, arena, r_extent_hooks,
1328 			    &arena->extents_retained, to_salvage, true);
1329 		}
1330 		if (to_leak != NULL) {
1331 			extent_deregister_no_gdump_sub(tsdn, to_leak);
1332 			extents_leak(tsdn, arena, r_extent_hooks,
1333 			    &arena->extents_retained, to_leak, true);
1334 		}
1335 		goto label_err;
1336 	}
1337 
1338 	if (*commit && !extent_committed_get(extent)) {
1339 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1340 		    extent_size_get(extent), true)) {
1341 			extent_record(tsdn, arena, r_extent_hooks,
1342 			    &arena->extents_retained, extent, true);
1343 			goto label_err;
1344 		}
1345 		extent_zeroed_set(extent, true);
1346 	}
1347 
1348 	/*
1349 	 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1350 	 * range.
1351 	 */
1352 	if (arena->extent_grow_next + egn_skip + 1 <=
1353 	    arena->retain_grow_limit) {
1354 		arena->extent_grow_next += egn_skip + 1;
1355 	} else {
1356 		arena->extent_grow_next = arena->retain_grow_limit;
1357 	}
1358 	/* All opportunities for failure are past. */
1359 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1360 
1361 	if (config_prof) {
1362 		/* Adjust gdump stats now that extent is final size. */
1363 		extent_gdump_add(tsdn, extent);
1364 	}
1365 	if (pad != 0) {
1366 		extent_addr_randomize(tsdn, extent, alignment);
1367 	}
1368 	if (slab) {
1369 		rtree_ctx_t rtree_ctx_fallback;
1370 		rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1371 		    &rtree_ctx_fallback);
1372 
1373 		extent_slab_set(extent, true);
1374 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
1375 	}
1376 	if (*zero && !extent_zeroed_get(extent)) {
1377 		void *addr = extent_base_get(extent);
1378 		size_t size = extent_size_get(extent);
1379 		if (pages_purge_forced(addr, size)) {
1380 			memset(addr, 0, size);
1381 		}
1382 	}
1383 
1384 	return extent;
1385 label_err:
1386 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1387 	return NULL;
1388 }
1389 
1390 static extent_t *
1391 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1392     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1393     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1394 	assert(size != 0);
1395 	assert(alignment != 0);
1396 
1397 	malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1398 
1399 	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1400 	    &arena->extents_retained, new_addr, size, pad, alignment, slab,
1401 	    szind, zero, commit, true);
1402 	if (extent != NULL) {
1403 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1404 		if (config_prof) {
1405 			extent_gdump_add(tsdn, extent);
1406 		}
1407 	} else if (opt_retain && new_addr == NULL) {
1408 		extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1409 		    pad, alignment, slab, szind, zero, commit);
1410 		/* extent_grow_retained() always releases extent_grow_mtx. */
1411 	} else {
1412 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1413 	}
1414 	malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1415 
1416 	return extent;
1417 }
1418 
1419 static extent_t *
1420 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1421     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1422     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1423 	size_t esize = size + pad;
1424 	extent_t *extent = extent_alloc(tsdn, arena);
1425 	if (extent == NULL) {
1426 		return NULL;
1427 	}
1428 	void *addr;
1429 	if (*r_extent_hooks == &extent_hooks_default) {
1430 		/* Call directly to propagate tsdn. */
1431 		addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1432 		    alignment, zero, commit);
1433 	} else {
1434 		extent_hook_pre_reentrancy(tsdn, arena);
1435 		addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1436 		    esize, alignment, zero, commit, arena_ind_get(arena));
1437 		extent_hook_post_reentrancy(tsdn);
1438 	}
1439 	if (addr == NULL) {
1440 		extent_dalloc(tsdn, arena, extent);
1441 		return NULL;
1442 	}
1443 	extent_init(extent, arena, addr, esize, slab, szind,
1444 	    arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1445 	    true);
1446 	if (pad != 0) {
1447 		extent_addr_randomize(tsdn, extent, alignment);
1448 	}
1449 	if (extent_register(tsdn, extent)) {
1450 		extents_leak(tsdn, arena, r_extent_hooks,
1451 		    &arena->extents_retained, extent, false);
1452 		return NULL;
1453 	}
1454 
1455 	return extent;
1456 }
1457 
1458 extent_t *
1459 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1460     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1461     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1462 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1463 	    WITNESS_RANK_CORE, 0);
1464 
1465 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1466 
1467 	extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1468 	    new_addr, size, pad, alignment, slab, szind, zero, commit);
1469 	if (extent == NULL) {
1470 		if (opt_retain && new_addr != NULL) {
1471 			/*
1472 			 * When retain is enabled and new_addr is set, we do not
1473 			 * attempt extent_alloc_wrapper_hard which does mmap
1474 			 * that is very unlikely to succeed (unless it happens
1475 			 * to be at the end).
1476 			 */
1477 			return NULL;
1478 		}
1479 		extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1480 		    new_addr, size, pad, alignment, slab, szind, zero, commit);
1481 	}
1482 
1483 	assert(extent == NULL || extent_dumpable_get(extent));
1484 	return extent;
1485 }
1486 
1487 static bool
1488 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1489     const extent_t *outer) {
1490 	assert(extent_arena_get(inner) == arena);
1491 	if (extent_arena_get(outer) != arena) {
1492 		return false;
1493 	}
1494 
1495 	assert(extent_state_get(inner) == extent_state_active);
1496 	if (extent_state_get(outer) != extents->state) {
1497 		return false;
1498 	}
1499 
1500 	if (extent_committed_get(inner) != extent_committed_get(outer)) {
1501 		return false;
1502 	}
1503 
1504 	return true;
1505 }
1506 
1507 static bool
1508 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1509     extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1510     bool growing_retained) {
1511 	assert(extent_can_coalesce(arena, extents, inner, outer));
1512 
1513 	extent_activate_locked(tsdn, arena, extents, outer);
1514 
1515 	malloc_mutex_unlock(tsdn, &extents->mtx);
1516 	bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1517 	    forward ? inner : outer, forward ? outer : inner, growing_retained);
1518 	malloc_mutex_lock(tsdn, &extents->mtx);
1519 
1520 	if (err) {
1521 		extent_deactivate_locked(tsdn, arena, extents, outer);
1522 	}
1523 
1524 	return err;
1525 }
1526 
1527 static extent_t *
1528 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1529     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1530     extent_t *extent, bool *coalesced, bool growing_retained) {
1531 	/*
1532 	 * Continue attempting to coalesce until failure, to protect against
1533 	 * races with other threads that are thwarted by this one.
1534 	 */
1535 	bool again;
1536 	do {
1537 		again = false;
1538 
1539 		/* Try to coalesce forward. */
1540 		extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1541 		    extent_past_get(extent));
1542 		if (next != NULL) {
1543 			/*
1544 			 * extents->mtx only protects against races for
1545 			 * like-state extents, so call extent_can_coalesce()
1546 			 * before releasing next's pool lock.
1547 			 */
1548 			bool can_coalesce = extent_can_coalesce(arena, extents,
1549 			    extent, next);
1550 
1551 			extent_unlock(tsdn, next);
1552 
1553 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1554 			    r_extent_hooks, extents, extent, next, true,
1555 			    growing_retained)) {
1556 				if (extents->delay_coalesce) {
1557 					/* Do minimal coalescing. */
1558 					*coalesced = true;
1559 					return extent;
1560 				}
1561 				again = true;
1562 			}
1563 		}
1564 
1565 		/* Try to coalesce backward. */
1566 		extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1567 		    extent_before_get(extent));
1568 		if (prev != NULL) {
1569 			bool can_coalesce = extent_can_coalesce(arena, extents,
1570 			    extent, prev);
1571 			extent_unlock(tsdn, prev);
1572 
1573 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1574 			    r_extent_hooks, extents, extent, prev, false,
1575 			    growing_retained)) {
1576 				extent = prev;
1577 				if (extents->delay_coalesce) {
1578 					/* Do minimal coalescing. */
1579 					*coalesced = true;
1580 					return extent;
1581 				}
1582 				again = true;
1583 			}
1584 		}
1585 	} while (again);
1586 
1587 	if (extents->delay_coalesce) {
1588 		*coalesced = false;
1589 	}
1590 	return extent;
1591 }
1592 
1593 /*
1594  * Does the metadata management portions of putting an unused extent into the
1595  * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1596  */
1597 static void
1598 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1599     extents_t *extents, extent_t *extent, bool growing_retained) {
1600 	rtree_ctx_t rtree_ctx_fallback;
1601 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1602 
1603 	assert((extents_state_get(extents) != extent_state_dirty &&
1604 	    extents_state_get(extents) != extent_state_muzzy) ||
1605 	    !extent_zeroed_get(extent));
1606 
1607 	malloc_mutex_lock(tsdn, &extents->mtx);
1608 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1609 
1610 	extent_szind_set(extent, NSIZES);
1611 	if (extent_slab_get(extent)) {
1612 		extent_interior_deregister(tsdn, rtree_ctx, extent);
1613 		extent_slab_set(extent, false);
1614 	}
1615 
1616 	assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1617 	    (uintptr_t)extent_base_get(extent), true) == extent);
1618 
1619 	if (!extents->delay_coalesce) {
1620 		extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1621 		    rtree_ctx, extents, extent, NULL, growing_retained);
1622 	} else if (extent_size_get(extent) >= LARGE_MINCLASS) {
1623 		/* Always coalesce large extents eagerly. */
1624 		bool coalesced;
1625 		size_t prev_size;
1626 		do {
1627 			prev_size = extent_size_get(extent);
1628 			assert(extent_state_get(extent) == extent_state_active);
1629 			extent = extent_try_coalesce(tsdn, arena,
1630 			    r_extent_hooks, rtree_ctx, extents, extent,
1631 			    &coalesced, growing_retained);
1632 		} while (coalesced &&
1633 		    extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
1634 	}
1635 	extent_deactivate_locked(tsdn, arena, extents, extent);
1636 
1637 	malloc_mutex_unlock(tsdn, &extents->mtx);
1638 }
1639 
1640 void
1641 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1642 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1643 
1644 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1645 	    WITNESS_RANK_CORE, 0);
1646 
1647 	if (extent_register(tsdn, extent)) {
1648 		extents_leak(tsdn, arena, &extent_hooks,
1649 		    &arena->extents_retained, extent, false);
1650 		return;
1651 	}
1652 	extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1653 }
1654 
1655 static bool
1656 extent_dalloc_default_impl(void *addr, size_t size) {
1657 	if (!have_dss || !extent_in_dss(addr)) {
1658 		return extent_dalloc_mmap(addr, size);
1659 	}
1660 	return true;
1661 }
1662 
1663 static bool
1664 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1665     bool committed, unsigned arena_ind) {
1666 	return extent_dalloc_default_impl(addr, size);
1667 }
1668 
1669 static bool
1670 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1671     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1672 	bool err;
1673 
1674 	assert(extent_base_get(extent) != NULL);
1675 	assert(extent_size_get(extent) != 0);
1676 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1677 	    WITNESS_RANK_CORE, 0);
1678 
1679 	extent_addr_set(extent, extent_base_get(extent));
1680 
1681 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1682 	/* Try to deallocate. */
1683 	if (*r_extent_hooks == &extent_hooks_default) {
1684 		/* Call directly to propagate tsdn. */
1685 		err = extent_dalloc_default_impl(extent_base_get(extent),
1686 		    extent_size_get(extent));
1687 	} else {
1688 		extent_hook_pre_reentrancy(tsdn, arena);
1689 		err = ((*r_extent_hooks)->dalloc == NULL ||
1690 		    (*r_extent_hooks)->dalloc(*r_extent_hooks,
1691 		    extent_base_get(extent), extent_size_get(extent),
1692 		    extent_committed_get(extent), arena_ind_get(arena)));
1693 		extent_hook_post_reentrancy(tsdn);
1694 	}
1695 
1696 	if (!err) {
1697 		extent_dalloc(tsdn, arena, extent);
1698 	}
1699 
1700 	return err;
1701 }
1702 
1703 void
1704 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1705     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1706 	assert(extent_dumpable_get(extent));
1707 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1708 	    WITNESS_RANK_CORE, 0);
1709 
1710 	/*
1711 	 * Deregister first to avoid a race with other allocating threads, and
1712 	 * reregister if deallocation fails.
1713 	 */
1714 	extent_deregister(tsdn, extent);
1715 	if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
1716 		return;
1717 	}
1718 
1719 	extent_reregister(tsdn, extent);
1720 	if (*r_extent_hooks != &extent_hooks_default) {
1721 		extent_hook_pre_reentrancy(tsdn, arena);
1722 	}
1723 	/* Try to decommit; purge if that fails. */
1724 	bool zeroed;
1725 	if (!extent_committed_get(extent)) {
1726 		zeroed = true;
1727 	} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1728 	    0, extent_size_get(extent))) {
1729 		zeroed = true;
1730 	} else if ((*r_extent_hooks)->purge_forced != NULL &&
1731 	    !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1732 	    extent_base_get(extent), extent_size_get(extent), 0,
1733 	    extent_size_get(extent), arena_ind_get(arena))) {
1734 		zeroed = true;
1735 	} else if (extent_state_get(extent) == extent_state_muzzy ||
1736 	    ((*r_extent_hooks)->purge_lazy != NULL &&
1737 	    !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1738 	    extent_base_get(extent), extent_size_get(extent), 0,
1739 	    extent_size_get(extent), arena_ind_get(arena)))) {
1740 		zeroed = false;
1741 	} else {
1742 		zeroed = false;
1743 	}
1744 	if (*r_extent_hooks != &extent_hooks_default) {
1745 		extent_hook_post_reentrancy(tsdn);
1746 	}
1747 	extent_zeroed_set(extent, zeroed);
1748 
1749 	if (config_prof) {
1750 		extent_gdump_sub(tsdn, extent);
1751 	}
1752 
1753 	extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1754 	    extent, false);
1755 }
1756 
1757 static void
1758 extent_destroy_default_impl(void *addr, size_t size) {
1759 	if (!have_dss || !extent_in_dss(addr)) {
1760 		pages_unmap(addr, size);
1761 	}
1762 }
1763 
1764 static void
1765 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1766     bool committed, unsigned arena_ind) {
1767 	extent_destroy_default_impl(addr, size);
1768 }
1769 
1770 void
1771 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1772     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1773 	assert(extent_base_get(extent) != NULL);
1774 	assert(extent_size_get(extent) != 0);
1775 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1776 	    WITNESS_RANK_CORE, 0);
1777 
1778 	/* Deregister first to avoid a race with other allocating threads. */
1779 	extent_deregister(tsdn, extent);
1780 
1781 	extent_addr_set(extent, extent_base_get(extent));
1782 
1783 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1784 	/* Try to destroy; silently fail otherwise. */
1785 	if (*r_extent_hooks == &extent_hooks_default) {
1786 		/* Call directly to propagate tsdn. */
1787 		extent_destroy_default_impl(extent_base_get(extent),
1788 		    extent_size_get(extent));
1789 	} else if ((*r_extent_hooks)->destroy != NULL) {
1790 		extent_hook_pre_reentrancy(tsdn, arena);
1791 		(*r_extent_hooks)->destroy(*r_extent_hooks,
1792 		    extent_base_get(extent), extent_size_get(extent),
1793 		    extent_committed_get(extent), arena_ind_get(arena));
1794 		extent_hook_post_reentrancy(tsdn);
1795 	}
1796 
1797 	extent_dalloc(tsdn, arena, extent);
1798 }
1799 
1800 static bool
1801 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1802     size_t offset, size_t length, unsigned arena_ind) {
1803 	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1804 	    length);
1805 }
1806 
1807 static bool
1808 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1809     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1810     size_t length, bool growing_retained) {
1811 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1812 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1813 
1814 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1815 	if (*r_extent_hooks != &extent_hooks_default) {
1816 		extent_hook_pre_reentrancy(tsdn, arena);
1817 	}
1818 	bool err = ((*r_extent_hooks)->commit == NULL ||
1819 	    (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1820 	    extent_size_get(extent), offset, length, arena_ind_get(arena)));
1821 	if (*r_extent_hooks != &extent_hooks_default) {
1822 		extent_hook_post_reentrancy(tsdn);
1823 	}
1824 	extent_committed_set(extent, extent_committed_get(extent) || !err);
1825 	return err;
1826 }
1827 
1828 bool
1829 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1830     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1831     size_t length) {
1832 	return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1833 	    length, false);
1834 }
1835 
1836 static bool
1837 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1838     size_t offset, size_t length, unsigned arena_ind) {
1839 	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1840 	    length);
1841 }
1842 
1843 bool
1844 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1845     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1846     size_t length) {
1847 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1848 	    WITNESS_RANK_CORE, 0);
1849 
1850 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1851 
1852 	if (*r_extent_hooks != &extent_hooks_default) {
1853 		extent_hook_pre_reentrancy(tsdn, arena);
1854 	}
1855 	bool err = ((*r_extent_hooks)->decommit == NULL ||
1856 	    (*r_extent_hooks)->decommit(*r_extent_hooks,
1857 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1858 	    arena_ind_get(arena)));
1859 	if (*r_extent_hooks != &extent_hooks_default) {
1860 		extent_hook_post_reentrancy(tsdn);
1861 	}
1862 	extent_committed_set(extent, extent_committed_get(extent) && err);
1863 	return err;
1864 }
1865 
1866 #ifdef PAGES_CAN_PURGE_LAZY
1867 static bool
1868 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1869     size_t offset, size_t length, unsigned arena_ind) {
1870 	assert(addr != NULL);
1871 	assert((offset & PAGE_MASK) == 0);
1872 	assert(length != 0);
1873 	assert((length & PAGE_MASK) == 0);
1874 
1875 	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1876 	    length);
1877 }
1878 #endif
1879 
1880 static bool
1881 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1882     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1883     size_t length, bool growing_retained) {
1884 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1885 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1886 
1887 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1888 
1889 	if ((*r_extent_hooks)->purge_lazy == NULL) {
1890 		return true;
1891 	}
1892 	if (*r_extent_hooks != &extent_hooks_default) {
1893 		extent_hook_pre_reentrancy(tsdn, arena);
1894 	}
1895 	bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1896 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1897 	    arena_ind_get(arena));
1898 	if (*r_extent_hooks != &extent_hooks_default) {
1899 		extent_hook_post_reentrancy(tsdn);
1900 	}
1901 
1902 	return err;
1903 }
1904 
1905 bool
1906 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1907     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1908     size_t length) {
1909 	return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1910 	    offset, length, false);
1911 }
1912 
1913 #ifdef PAGES_CAN_PURGE_FORCED
1914 static bool
1915 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1916     size_t size, size_t offset, size_t length, unsigned arena_ind) {
1917 	assert(addr != NULL);
1918 	assert((offset & PAGE_MASK) == 0);
1919 	assert(length != 0);
1920 	assert((length & PAGE_MASK) == 0);
1921 
1922 	return pages_purge_forced((void *)((uintptr_t)addr +
1923 	    (uintptr_t)offset), length);
1924 }
1925 #endif
1926 
1927 static bool
1928 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
1929     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1930     size_t length, bool growing_retained) {
1931 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1932 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1933 
1934 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1935 
1936 	if ((*r_extent_hooks)->purge_forced == NULL) {
1937 		return true;
1938 	}
1939 	if (*r_extent_hooks != &extent_hooks_default) {
1940 		extent_hook_pre_reentrancy(tsdn, arena);
1941 	}
1942 	bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
1943 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1944 	    arena_ind_get(arena));
1945 	if (*r_extent_hooks != &extent_hooks_default) {
1946 		extent_hook_post_reentrancy(tsdn);
1947 	}
1948 	return err;
1949 }
1950 
1951 bool
1952 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
1953     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1954     size_t length) {
1955 	return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
1956 	    offset, length, false);
1957 }
1958 
1959 #ifdef JEMALLOC_MAPS_COALESCE
1960 static bool
1961 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1962     size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
1963 	return !maps_coalesce;
1964 }
1965 #endif
1966 
1967 /*
1968  * Accepts the extent to split, and the characteristics of each side of the
1969  * split.  The 'a' parameters go with the 'lead' of the resulting pair of
1970  * extents (the lower addressed portion of the split), and the 'b' parameters go
1971  * with the trail (the higher addressed portion).  This makes 'extent' the lead,
1972  * and returns the trail (except in case of error).
1973  */
1974 static extent_t *
1975 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
1976     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1977     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
1978     bool growing_retained) {
1979 	assert(extent_size_get(extent) == size_a + size_b);
1980 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1981 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1982 
1983 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1984 
1985 	if ((*r_extent_hooks)->split == NULL) {
1986 		return NULL;
1987 	}
1988 
1989 	extent_t *trail = extent_alloc(tsdn, arena);
1990 	if (trail == NULL) {
1991 		goto label_error_a;
1992 	}
1993 
1994 	extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
1995 	    size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
1996 	    extent_state_get(extent), extent_zeroed_get(extent),
1997 	    extent_committed_get(extent), extent_dumpable_get(extent));
1998 
1999 	rtree_ctx_t rtree_ctx_fallback;
2000 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2001 	rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2002 	{
2003 		extent_t lead;
2004 
2005 		extent_init(&lead, arena, extent_addr_get(extent), size_a,
2006 		    slab_a, szind_a, extent_sn_get(extent),
2007 		    extent_state_get(extent), extent_zeroed_get(extent),
2008 		    extent_committed_get(extent), extent_dumpable_get(extent));
2009 
2010 		extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2011 		    true, &lead_elm_a, &lead_elm_b);
2012 	}
2013 	rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2014 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2015 	    &trail_elm_a, &trail_elm_b);
2016 
2017 	if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2018 	    || trail_elm_b == NULL) {
2019 		goto label_error_b;
2020 	}
2021 
2022 	extent_lock2(tsdn, extent, trail);
2023 
2024 	if (*r_extent_hooks != &extent_hooks_default) {
2025 		extent_hook_pre_reentrancy(tsdn, arena);
2026 	}
2027 	bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2028 	    size_a + size_b, size_a, size_b, extent_committed_get(extent),
2029 	    arena_ind_get(arena));
2030 	if (*r_extent_hooks != &extent_hooks_default) {
2031 		extent_hook_post_reentrancy(tsdn);
2032 	}
2033 	if (err) {
2034 		goto label_error_c;
2035 	}
2036 
2037 	extent_size_set(extent, size_a);
2038 	extent_szind_set(extent, szind_a);
2039 
2040 	extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2041 	    szind_a, slab_a);
2042 	extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2043 	    szind_b, slab_b);
2044 
2045 	extent_unlock2(tsdn, extent, trail);
2046 
2047 	return trail;
2048 label_error_c:
2049 	extent_unlock2(tsdn, extent, trail);
2050 label_error_b:
2051 	extent_dalloc(tsdn, arena, trail);
2052 label_error_a:
2053 	return NULL;
2054 }
2055 
2056 extent_t *
2057 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2058     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2059     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2060 	return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2061 	    szind_a, slab_a, size_b, szind_b, slab_b, false);
2062 }
2063 
2064 static bool
2065 extent_merge_default_impl(void *addr_a, void *addr_b) {
2066 	if (!maps_coalesce) {
2067 		return true;
2068 	}
2069 	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2070 		return true;
2071 	}
2072 
2073 	return false;
2074 }
2075 
2076 #ifdef JEMALLOC_MAPS_COALESCE
2077 static bool
2078 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2079     void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2080 	return extent_merge_default_impl(addr_a, addr_b);
2081 }
2082 #endif
2083 
2084 static bool
2085 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2086     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2087     bool growing_retained) {
2088 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2089 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2090 
2091 	extent_hooks_assure_initialized(arena, r_extent_hooks);
2092 
2093 	if ((*r_extent_hooks)->merge == NULL) {
2094 		return true;
2095 	}
2096 
2097 	bool err;
2098 	if (*r_extent_hooks == &extent_hooks_default) {
2099 		/* Call directly to propagate tsdn. */
2100 		err = extent_merge_default_impl(extent_base_get(a),
2101 		    extent_base_get(b));
2102 	} else {
2103 		extent_hook_pre_reentrancy(tsdn, arena);
2104 		err = (*r_extent_hooks)->merge(*r_extent_hooks,
2105 		    extent_base_get(a), extent_size_get(a), extent_base_get(b),
2106 		    extent_size_get(b), extent_committed_get(a),
2107 		    arena_ind_get(arena));
2108 		extent_hook_post_reentrancy(tsdn);
2109 	}
2110 
2111 	if (err) {
2112 		return true;
2113 	}
2114 
2115 	/*
2116 	 * The rtree writes must happen while all the relevant elements are
2117 	 * owned, so the following code uses decomposed helper functions rather
2118 	 * than extent_{,de}register() to do things in the right order.
2119 	 */
2120 	rtree_ctx_t rtree_ctx_fallback;
2121 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2122 	rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2123 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2124 	    &a_elm_b);
2125 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2126 	    &b_elm_b);
2127 
2128 	extent_lock2(tsdn, a, b);
2129 
2130 	if (a_elm_b != NULL) {
2131 		rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2132 		    NSIZES, false);
2133 	}
2134 	if (b_elm_b != NULL) {
2135 		rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2136 		    NSIZES, false);
2137 	} else {
2138 		b_elm_b = b_elm_a;
2139 	}
2140 
2141 	extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2142 	extent_szind_set(a, NSIZES);
2143 	extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2144 	    extent_sn_get(a) : extent_sn_get(b));
2145 	extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2146 
2147 	extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
2148 
2149 	extent_unlock2(tsdn, a, b);
2150 
2151 	extent_dalloc(tsdn, extent_arena_get(b), b);
2152 
2153 	return false;
2154 }
2155 
2156 bool
2157 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2158     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2159 	return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2160 }
2161 
2162 bool
2163 extent_boot(void) {
2164 	if (rtree_new(&extents_rtree, true)) {
2165 		return true;
2166 	}
2167 
2168 	if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2169 	    WITNESS_RANK_EXTENT_POOL)) {
2170 		return true;
2171 	}
2172 
2173 	if (have_dss) {
2174 		extent_dss_boot();
2175 	}
2176 
2177 	return false;
2178 }
2179