1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/div.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/rtree.h"
11 #include "jemalloc/internal/size_classes.h"
12 #include "jemalloc/internal/util.h"
13 
14 /******************************************************************************/
15 /* Data. */
16 
17 /*
18  * Define names for both unininitialized and initialized phases, so that
19  * options and mallctl processing are straightforward.
20  */
21 const char *percpu_arena_mode_names[] = {
22 	"percpu",
23 	"phycpu",
24 	"disabled",
25 	"percpu",
26 	"phycpu"
27 };
28 percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
29 
30 ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
31 ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
32 
33 static atomic_zd_t dirty_decay_ms_default;
34 static atomic_zd_t muzzy_decay_ms_default;
35 
36 const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
37 #define STEP(step, h, x, y)			\
38 		h,
39 		SMOOTHSTEP
40 #undef STEP
41 };
42 
43 static div_info_t arena_binind_div_info[NBINS];
44 
45 /******************************************************************************/
46 /*
47  * Function prototypes for static functions that are referenced prior to
48  * definition.
49  */
50 
51 static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
52     arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
53     size_t npages_decay_max, bool is_background_thread);
54 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
55     bool is_background_thread, bool all);
56 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
57     bin_t *bin);
58 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
59     bin_t *bin);
60 
61 /******************************************************************************/
62 
63 void
arena_basic_stats_merge(UNUSED tsdn_t * tsdn,arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * dirty_decay_ms,ssize_t * muzzy_decay_ms,size_t * nactive,size_t * ndirty,size_t * nmuzzy)64 arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
65     const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
66     size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
67 	*nthreads += arena_nthreads_get(arena, false);
68 	*dss = dss_prec_names[arena_dss_prec_get(arena)];
69 	*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
70 	*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
71 	*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
72 	*ndirty += extents_npages_get(&arena->extents_dirty);
73 	*nmuzzy += extents_npages_get(&arena->extents_muzzy);
74 }
75 
76 void
arena_stats_merge(tsdn_t * tsdn,arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * dirty_decay_ms,ssize_t * muzzy_decay_ms,size_t * nactive,size_t * ndirty,size_t * nmuzzy,arena_stats_t * astats,bin_stats_t * bstats,arena_stats_large_t * lstats)77 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
78     const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
79     size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
80     bin_stats_t *bstats, arena_stats_large_t *lstats) {
81 	cassert(config_stats);
82 
83 	arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
84 	    muzzy_decay_ms, nactive, ndirty, nmuzzy);
85 
86 	size_t base_allocated, base_resident, base_mapped, metadata_thp;
87 	base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
88 	    &base_mapped, &metadata_thp);
89 
90 	arena_stats_lock(tsdn, &arena->stats);
91 
92 	arena_stats_accum_zu(&astats->mapped, base_mapped
93 	    + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
94 	arena_stats_accum_zu(&astats->retained,
95 	    extents_npages_get(&arena->extents_retained) << LG_PAGE);
96 
97 	arena_stats_accum_u64(&astats->decay_dirty.npurge,
98 	    arena_stats_read_u64(tsdn, &arena->stats,
99 	    &arena->stats.decay_dirty.npurge));
100 	arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
101 	    arena_stats_read_u64(tsdn, &arena->stats,
102 	    &arena->stats.decay_dirty.nmadvise));
103 	arena_stats_accum_u64(&astats->decay_dirty.purged,
104 	    arena_stats_read_u64(tsdn, &arena->stats,
105 	    &arena->stats.decay_dirty.purged));
106 
107 	arena_stats_accum_u64(&astats->decay_muzzy.npurge,
108 	    arena_stats_read_u64(tsdn, &arena->stats,
109 	    &arena->stats.decay_muzzy.npurge));
110 	arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
111 	    arena_stats_read_u64(tsdn, &arena->stats,
112 	    &arena->stats.decay_muzzy.nmadvise));
113 	arena_stats_accum_u64(&astats->decay_muzzy.purged,
114 	    arena_stats_read_u64(tsdn, &arena->stats,
115 	    &arena->stats.decay_muzzy.purged));
116 
117 	arena_stats_accum_zu(&astats->base, base_allocated);
118 	arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
119 	arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
120 	arena_stats_accum_zu(&astats->resident, base_resident +
121 	    (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
122 	    extents_npages_get(&arena->extents_dirty) +
123 	    extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
124 
125 	for (szind_t i = 0; i < NSIZES - NBINS; i++) {
126 		uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
127 		    &arena->stats.lstats[i].nmalloc);
128 		arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
129 		arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
130 
131 		uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
132 		    &arena->stats.lstats[i].ndalloc);
133 		arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
134 		arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
135 
136 		uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
137 		    &arena->stats.lstats[i].nrequests);
138 		arena_stats_accum_u64(&lstats[i].nrequests,
139 		    nmalloc + nrequests);
140 		arena_stats_accum_u64(&astats->nrequests_large,
141 		    nmalloc + nrequests);
142 
143 		assert(nmalloc >= ndalloc);
144 		assert(nmalloc - ndalloc <= SIZE_T_MAX);
145 		size_t curlextents = (size_t)(nmalloc - ndalloc);
146 		lstats[i].curlextents += curlextents;
147 		arena_stats_accum_zu(&astats->allocated_large,
148 		    curlextents * sz_index2size(NBINS + i));
149 	}
150 
151 	arena_stats_unlock(tsdn, &arena->stats);
152 
153 	/* tcache_bytes counts currently cached bytes. */
154 	atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
155 	malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
156 	cache_bin_array_descriptor_t *descriptor;
157 	ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
158 		szind_t i = 0;
159 		for (; i < NBINS; i++) {
160 			cache_bin_t *tbin = &descriptor->bins_small[i];
161 			arena_stats_accum_zu(&astats->tcache_bytes,
162 			    tbin->ncached * sz_index2size(i));
163 		}
164 		for (; i < nhbins; i++) {
165 			cache_bin_t *tbin = &descriptor->bins_large[i];
166 			arena_stats_accum_zu(&astats->tcache_bytes,
167 			    tbin->ncached * sz_index2size(i));
168 		}
169 	}
170 	malloc_mutex_prof_read(tsdn,
171 	    &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
172 	    &arena->tcache_ql_mtx);
173 	malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
174 
175 #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind)				\
176     malloc_mutex_lock(tsdn, &arena->mtx);				\
177     malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind],		\
178         &arena->mtx);							\
179     malloc_mutex_unlock(tsdn, &arena->mtx);
180 
181 	/* Gather per arena mutex profiling data. */
182 	READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
183 	READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
184 	    arena_prof_mutex_extent_avail)
185 	READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
186 	    arena_prof_mutex_extents_dirty)
187 	READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
188 	    arena_prof_mutex_extents_muzzy)
189 	READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
190 	    arena_prof_mutex_extents_retained)
191 	READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
192 	    arena_prof_mutex_decay_dirty)
193 	READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
194 	    arena_prof_mutex_decay_muzzy)
195 	READ_ARENA_MUTEX_PROF_DATA(base->mtx,
196 	    arena_prof_mutex_base)
197 #undef READ_ARENA_MUTEX_PROF_DATA
198 
199 	nstime_copy(&astats->uptime, &arena->create_time);
200 	nstime_update(&astats->uptime);
201 	nstime_subtract(&astats->uptime, &arena->create_time);
202 
203 	for (szind_t i = 0; i < NBINS; i++) {
204 		bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]);
205 	}
206 }
207 
208 void
arena_extents_dirty_dalloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)209 arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
210     extent_hooks_t **r_extent_hooks, extent_t *extent) {
211 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
212 	    WITNESS_RANK_CORE, 0);
213 
214 	extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
215 	    extent);
216 	if (arena_dirty_decay_ms_get(arena) == 0) {
217 		arena_decay_dirty(tsdn, arena, false, true);
218 	} else {
219 		arena_background_thread_inactivity_check(tsdn, arena, false);
220 	}
221 }
222 
223 static void *
arena_slab_reg_alloc(extent_t * slab,const bin_info_t * bin_info)224 arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
225 	void *ret;
226 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
227 	size_t regind;
228 
229 	assert(extent_nfree_get(slab) > 0);
230 	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
231 
232 	regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
233 	ret = (void *)((uintptr_t)extent_addr_get(slab) +
234 	    (uintptr_t)(bin_info->reg_size * regind));
235 	extent_nfree_dec(slab);
236 	return ret;
237 }
238 
239 #ifndef JEMALLOC_JET
240 static
241 #endif
242 size_t
arena_slab_regind(extent_t * slab,szind_t binind,const void * ptr)243 arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
244 	size_t diff, regind;
245 
246 	/* Freeing a pointer outside the slab can cause assertion failure. */
247 	assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
248 	assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
249 	/* Freeing an interior pointer can cause assertion failure. */
250 	assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
251 	    (uintptr_t)bin_infos[binind].reg_size == 0);
252 
253 	diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
254 
255 	/* Avoid doing division with a variable divisor. */
256 	regind = div_compute(&arena_binind_div_info[binind], diff);
257 
258 	assert(regind < bin_infos[binind].nregs);
259 
260 	return regind;
261 }
262 
263 static void
arena_slab_reg_dalloc(extent_t * slab,arena_slab_data_t * slab_data,void * ptr)264 arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
265 	szind_t binind = extent_szind_get(slab);
266 	const bin_info_t *bin_info = &bin_infos[binind];
267 	size_t regind = arena_slab_regind(slab, binind, ptr);
268 
269 	assert(extent_nfree_get(slab) < bin_info->nregs);
270 	/* Freeing an unallocated pointer can cause assertion failure. */
271 	assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
272 
273 	bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
274 	extent_nfree_inc(slab);
275 }
276 
277 static void
arena_nactive_add(arena_t * arena,size_t add_pages)278 arena_nactive_add(arena_t *arena, size_t add_pages) {
279 	atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
280 }
281 
282 static void
arena_nactive_sub(arena_t * arena,size_t sub_pages)283 arena_nactive_sub(arena_t *arena, size_t sub_pages) {
284 	assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
285 	atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
286 }
287 
288 static void
arena_large_malloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t usize)289 arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
290 	szind_t index, hindex;
291 
292 	cassert(config_stats);
293 
294 	if (usize < LARGE_MINCLASS) {
295 		usize = LARGE_MINCLASS;
296 	}
297 	index = sz_size2index(usize);
298 	hindex = (index >= NBINS) ? index - NBINS : 0;
299 
300 	arena_stats_add_u64(tsdn, &arena->stats,
301 	    &arena->stats.lstats[hindex].nmalloc, 1);
302 }
303 
304 static void
arena_large_dalloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t usize)305 arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
306 	szind_t index, hindex;
307 
308 	cassert(config_stats);
309 
310 	if (usize < LARGE_MINCLASS) {
311 		usize = LARGE_MINCLASS;
312 	}
313 	index = sz_size2index(usize);
314 	hindex = (index >= NBINS) ? index - NBINS : 0;
315 
316 	arena_stats_add_u64(tsdn, &arena->stats,
317 	    &arena->stats.lstats[hindex].ndalloc, 1);
318 }
319 
320 static void
arena_large_ralloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t oldusize,size_t usize)321 arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
322     size_t usize) {
323 	arena_large_dalloc_stats_update(tsdn, arena, oldusize);
324 	arena_large_malloc_stats_update(tsdn, arena, usize);
325 }
326 
327 extent_t *
arena_extent_alloc_large(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool * zero)328 arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
329     size_t alignment, bool *zero) {
330 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
331 
332 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
333 	    WITNESS_RANK_CORE, 0);
334 
335 	szind_t szind = sz_size2index(usize);
336 	size_t mapped_add;
337 	bool commit = true;
338 	extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
339 	    &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
340 	    szind, zero, &commit);
341 	if (extent == NULL) {
342 		extent = extents_alloc(tsdn, arena, &extent_hooks,
343 		    &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
344 		    false, szind, zero, &commit);
345 	}
346 	size_t size = usize + sz_large_pad;
347 	if (extent == NULL) {
348 		extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
349 		    usize, sz_large_pad, alignment, false, szind, zero,
350 		    &commit);
351 		if (config_stats) {
352 			/*
353 			 * extent may be NULL on OOM, but in that case
354 			 * mapped_add isn't used below, so there's no need to
355 			 * conditionlly set it to 0 here.
356 			 */
357 			mapped_add = size;
358 		}
359 	} else if (config_stats) {
360 		mapped_add = 0;
361 	}
362 
363 	if (extent != NULL) {
364 		if (config_stats) {
365 			arena_stats_lock(tsdn, &arena->stats);
366 			arena_large_malloc_stats_update(tsdn, arena, usize);
367 			if (mapped_add != 0) {
368 				arena_stats_add_zu(tsdn, &arena->stats,
369 				    &arena->stats.mapped, mapped_add);
370 			}
371 			arena_stats_unlock(tsdn, &arena->stats);
372 		}
373 		arena_nactive_add(arena, size >> LG_PAGE);
374 	}
375 
376 	return extent;
377 }
378 
379 void
arena_extent_dalloc_large_prep(tsdn_t * tsdn,arena_t * arena,extent_t * extent)380 arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
381 	if (config_stats) {
382 		arena_stats_lock(tsdn, &arena->stats);
383 		arena_large_dalloc_stats_update(tsdn, arena,
384 		    extent_usize_get(extent));
385 		arena_stats_unlock(tsdn, &arena->stats);
386 	}
387 	arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
388 }
389 
390 void
arena_extent_ralloc_large_shrink(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t oldusize)391 arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
392     size_t oldusize) {
393 	size_t usize = extent_usize_get(extent);
394 	size_t udiff = oldusize - usize;
395 
396 	if (config_stats) {
397 		arena_stats_lock(tsdn, &arena->stats);
398 		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
399 		arena_stats_unlock(tsdn, &arena->stats);
400 	}
401 	arena_nactive_sub(arena, udiff >> LG_PAGE);
402 }
403 
404 void
arena_extent_ralloc_large_expand(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t oldusize)405 arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
406     size_t oldusize) {
407 	size_t usize = extent_usize_get(extent);
408 	size_t udiff = usize - oldusize;
409 
410 	if (config_stats) {
411 		arena_stats_lock(tsdn, &arena->stats);
412 		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
413 		arena_stats_unlock(tsdn, &arena->stats);
414 	}
415 	arena_nactive_add(arena, udiff >> LG_PAGE);
416 }
417 
418 static ssize_t
arena_decay_ms_read(arena_decay_t * decay)419 arena_decay_ms_read(arena_decay_t *decay) {
420 	return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
421 }
422 
423 static void
arena_decay_ms_write(arena_decay_t * decay,ssize_t decay_ms)424 arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
425 	atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
426 }
427 
428 static void
arena_decay_deadline_init(arena_decay_t * decay)429 arena_decay_deadline_init(arena_decay_t *decay) {
430 	/*
431 	 * Generate a new deadline that is uniformly random within the next
432 	 * epoch after the current one.
433 	 */
434 	nstime_copy(&decay->deadline, &decay->epoch);
435 	nstime_add(&decay->deadline, &decay->interval);
436 	if (arena_decay_ms_read(decay) > 0) {
437 		nstime_t jitter;
438 
439 		nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
440 		    nstime_ns(&decay->interval)));
441 		nstime_add(&decay->deadline, &jitter);
442 	}
443 }
444 
445 static bool
arena_decay_deadline_reached(const arena_decay_t * decay,const nstime_t * time)446 arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
447 	return (nstime_compare(&decay->deadline, time) <= 0);
448 }
449 
450 static size_t
arena_decay_backlog_npages_limit(const arena_decay_t * decay)451 arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
452 	uint64_t sum;
453 	size_t npages_limit_backlog;
454 	unsigned i;
455 
456 	/*
457 	 * For each element of decay_backlog, multiply by the corresponding
458 	 * fixed-point smoothstep decay factor.  Sum the products, then divide
459 	 * to round down to the nearest whole number of pages.
460 	 */
461 	sum = 0;
462 	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
463 		sum += decay->backlog[i] * h_steps[i];
464 	}
465 	npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
466 
467 	return npages_limit_backlog;
468 }
469 
470 static void
arena_decay_backlog_update_last(arena_decay_t * decay,size_t current_npages)471 arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
472 	size_t npages_delta = (current_npages > decay->nunpurged) ?
473 	    current_npages - decay->nunpurged : 0;
474 	decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
475 
476 	if (config_debug) {
477 		if (current_npages > decay->ceil_npages) {
478 			decay->ceil_npages = current_npages;
479 		}
480 		size_t npages_limit = arena_decay_backlog_npages_limit(decay);
481 		assert(decay->ceil_npages >= npages_limit);
482 		if (decay->ceil_npages > npages_limit) {
483 			decay->ceil_npages = npages_limit;
484 		}
485 	}
486 }
487 
488 static void
arena_decay_backlog_update(arena_decay_t * decay,uint64_t nadvance_u64,size_t current_npages)489 arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
490     size_t current_npages) {
491 	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
492 		memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
493 		    sizeof(size_t));
494 	} else {
495 		size_t nadvance_z = (size_t)nadvance_u64;
496 
497 		assert((uint64_t)nadvance_z == nadvance_u64);
498 
499 		memmove(decay->backlog, &decay->backlog[nadvance_z],
500 		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
501 		if (nadvance_z > 1) {
502 			memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
503 			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
504 		}
505 	}
506 
507 	arena_decay_backlog_update_last(decay, current_npages);
508 }
509 
510 static void
arena_decay_try_purge(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,size_t current_npages,size_t npages_limit,bool is_background_thread)511 arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
512     extents_t *extents, size_t current_npages, size_t npages_limit,
513     bool is_background_thread) {
514 	if (current_npages > npages_limit) {
515 		arena_decay_to_limit(tsdn, arena, decay, extents, false,
516 		    npages_limit, current_npages - npages_limit,
517 		    is_background_thread);
518 	}
519 }
520 
521 static void
arena_decay_epoch_advance_helper(arena_decay_t * decay,const nstime_t * time,size_t current_npages)522 arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
523     size_t current_npages) {
524 	assert(arena_decay_deadline_reached(decay, time));
525 
526 	nstime_t delta;
527 	nstime_copy(&delta, time);
528 	nstime_subtract(&delta, &decay->epoch);
529 
530 	uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
531 	assert(nadvance_u64 > 0);
532 
533 	/* Add nadvance_u64 decay intervals to epoch. */
534 	nstime_copy(&delta, &decay->interval);
535 	nstime_imultiply(&delta, nadvance_u64);
536 	nstime_add(&decay->epoch, &delta);
537 
538 	/* Set a new deadline. */
539 	arena_decay_deadline_init(decay);
540 
541 	/* Update the backlog. */
542 	arena_decay_backlog_update(decay, nadvance_u64, current_npages);
543 }
544 
545 static void
arena_decay_epoch_advance(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,const nstime_t * time,bool is_background_thread)546 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
547     extents_t *extents, const nstime_t *time, bool is_background_thread) {
548 	size_t current_npages = extents_npages_get(extents);
549 	arena_decay_epoch_advance_helper(decay, time, current_npages);
550 
551 	size_t npages_limit = arena_decay_backlog_npages_limit(decay);
552 	/* We may unlock decay->mtx when try_purge(). Finish logging first. */
553 	decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
554 	    current_npages;
555 
556 	if (!background_thread_enabled() || is_background_thread) {
557 		arena_decay_try_purge(tsdn, arena, decay, extents,
558 		    current_npages, npages_limit, is_background_thread);
559 	}
560 }
561 
562 static void
arena_decay_reinit(arena_decay_t * decay,ssize_t decay_ms)563 arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
564 	arena_decay_ms_write(decay, decay_ms);
565 	if (decay_ms > 0) {
566 		nstime_init(&decay->interval, (uint64_t)decay_ms *
567 		    KQU(1000000));
568 		nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
569 	}
570 
571 	nstime_init(&decay->epoch, 0);
572 	nstime_update(&decay->epoch);
573 	decay->jitter_state = (uint64_t)(uintptr_t)decay;
574 	arena_decay_deadline_init(decay);
575 	decay->nunpurged = 0;
576 	memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
577 }
578 
579 static bool
arena_decay_init(arena_decay_t * decay,ssize_t decay_ms,arena_stats_decay_t * stats)580 arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
581     arena_stats_decay_t *stats) {
582 	if (config_debug) {
583 		for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
584 			assert(((char *)decay)[i] == 0);
585 		}
586 		decay->ceil_npages = 0;
587 	}
588 	if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
589 	    malloc_mutex_rank_exclusive)) {
590 		return true;
591 	}
592 	decay->purging = false;
593 	arena_decay_reinit(decay, decay_ms);
594 	/* Memory is zeroed, so there is no need to clear stats. */
595 	if (config_stats) {
596 		decay->stats = stats;
597 	}
598 	return false;
599 }
600 
601 static bool
arena_decay_ms_valid(ssize_t decay_ms)602 arena_decay_ms_valid(ssize_t decay_ms) {
603 	if (decay_ms < -1) {
604 		return false;
605 	}
606 	if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
607 	    KQU(1000)) {
608 		return true;
609 	}
610 	return false;
611 }
612 
613 static bool
arena_maybe_decay(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool is_background_thread)614 arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
615     extents_t *extents, bool is_background_thread) {
616 	malloc_mutex_assert_owner(tsdn, &decay->mtx);
617 
618 	/* Purge all or nothing if the option is disabled. */
619 	ssize_t decay_ms = arena_decay_ms_read(decay);
620 	if (decay_ms <= 0) {
621 		if (decay_ms == 0) {
622 			arena_decay_to_limit(tsdn, arena, decay, extents, false,
623 			    0, extents_npages_get(extents),
624 			    is_background_thread);
625 		}
626 		return false;
627 	}
628 
629 	nstime_t time;
630 	nstime_init(&time, 0);
631 	nstime_update(&time);
632 	if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
633 	    > 0)) {
634 		/*
635 		 * Time went backwards.  Move the epoch back in time and
636 		 * generate a new deadline, with the expectation that time
637 		 * typically flows forward for long enough periods of time that
638 		 * epochs complete.  Unfortunately, this strategy is susceptible
639 		 * to clock jitter triggering premature epoch advances, but
640 		 * clock jitter estimation and compensation isn't feasible here
641 		 * because calls into this code are event-driven.
642 		 */
643 		nstime_copy(&decay->epoch, &time);
644 		arena_decay_deadline_init(decay);
645 	} else {
646 		/* Verify that time does not go backwards. */
647 		assert(nstime_compare(&decay->epoch, &time) <= 0);
648 	}
649 
650 	/*
651 	 * If the deadline has been reached, advance to the current epoch and
652 	 * purge to the new limit if necessary.  Note that dirty pages created
653 	 * during the current epoch are not subject to purge until a future
654 	 * epoch, so as a result purging only happens during epoch advances, or
655 	 * being triggered by background threads (scheduled event).
656 	 */
657 	bool advance_epoch = arena_decay_deadline_reached(decay, &time);
658 	if (advance_epoch) {
659 		arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
660 		    is_background_thread);
661 	} else if (is_background_thread) {
662 		arena_decay_try_purge(tsdn, arena, decay, extents,
663 		    extents_npages_get(extents),
664 		    arena_decay_backlog_npages_limit(decay),
665 		    is_background_thread);
666 	}
667 
668 	return advance_epoch;
669 }
670 
671 static ssize_t
arena_decay_ms_get(arena_decay_t * decay)672 arena_decay_ms_get(arena_decay_t *decay) {
673 	return arena_decay_ms_read(decay);
674 }
675 
676 ssize_t
arena_dirty_decay_ms_get(arena_t * arena)677 arena_dirty_decay_ms_get(arena_t *arena) {
678 	return arena_decay_ms_get(&arena->decay_dirty);
679 }
680 
681 ssize_t
arena_muzzy_decay_ms_get(arena_t * arena)682 arena_muzzy_decay_ms_get(arena_t *arena) {
683 	return arena_decay_ms_get(&arena->decay_muzzy);
684 }
685 
686 static bool
arena_decay_ms_set(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,ssize_t decay_ms)687 arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
688     extents_t *extents, ssize_t decay_ms) {
689 	if (!arena_decay_ms_valid(decay_ms)) {
690 		return true;
691 	}
692 
693 	malloc_mutex_lock(tsdn, &decay->mtx);
694 	/*
695 	 * Restart decay backlog from scratch, which may cause many dirty pages
696 	 * to be immediately purged.  It would conceptually be possible to map
697 	 * the old backlog onto the new backlog, but there is no justification
698 	 * for such complexity since decay_ms changes are intended to be
699 	 * infrequent, either between the {-1, 0, >0} states, or a one-time
700 	 * arbitrary change during initial arena configuration.
701 	 */
702 	arena_decay_reinit(decay, decay_ms);
703 	arena_maybe_decay(tsdn, arena, decay, extents, false);
704 	malloc_mutex_unlock(tsdn, &decay->mtx);
705 
706 	return false;
707 }
708 
709 bool
arena_dirty_decay_ms_set(tsdn_t * tsdn,arena_t * arena,ssize_t decay_ms)710 arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
711     ssize_t decay_ms) {
712 	return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
713 	    &arena->extents_dirty, decay_ms);
714 }
715 
716 bool
arena_muzzy_decay_ms_set(tsdn_t * tsdn,arena_t * arena,ssize_t decay_ms)717 arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
718     ssize_t decay_ms) {
719 	return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
720 	    &arena->extents_muzzy, decay_ms);
721 }
722 
723 static size_t
arena_stash_decayed(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,size_t npages_limit,size_t npages_decay_max,extent_list_t * decay_extents)724 arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
725     extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
726 	size_t npages_decay_max, extent_list_t *decay_extents) {
727 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
728 	    WITNESS_RANK_CORE, 0);
729 
730 	/* Stash extents according to npages_limit. */
731 	size_t nstashed = 0;
732 	extent_t *extent;
733 	while (nstashed < npages_decay_max &&
734 	    (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
735 	    npages_limit)) != NULL) {
736 		extent_list_append(decay_extents, extent);
737 		nstashed += extent_size_get(extent) >> LG_PAGE;
738 	}
739 	return nstashed;
740 }
741 
742 static size_t
arena_decay_stashed(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,arena_decay_t * decay,extents_t * extents,bool all,extent_list_t * decay_extents,bool is_background_thread)743 arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
744     extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
745     bool all, extent_list_t *decay_extents, bool is_background_thread) {
746 	UNUSED size_t nmadvise, nunmapped;
747 	size_t npurged;
748 
749 	if (config_stats) {
750 		nmadvise = 0;
751 		nunmapped = 0;
752 	}
753 	npurged = 0;
754 
755 	ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
756 	for (extent_t *extent = extent_list_first(decay_extents); extent !=
757 	    NULL; extent = extent_list_first(decay_extents)) {
758 		if (config_stats) {
759 			nmadvise++;
760 		}
761 		size_t npages = extent_size_get(extent) >> LG_PAGE;
762 		npurged += npages;
763 		extent_list_remove(decay_extents, extent);
764 		switch (extents_state_get(extents)) {
765 		case extent_state_active:
766 			not_reached();
767 		case extent_state_dirty:
768 			if (!all && muzzy_decay_ms != 0 &&
769 			    !extent_purge_lazy_wrapper(tsdn, arena,
770 			    r_extent_hooks, extent, 0,
771 			    extent_size_get(extent))) {
772 				extents_dalloc(tsdn, arena, r_extent_hooks,
773 				    &arena->extents_muzzy, extent);
774 				arena_background_thread_inactivity_check(tsdn,
775 				    arena, is_background_thread);
776 				break;
777 			}
778 			/* Fall through. */
779 		case extent_state_muzzy:
780 			extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
781 			    extent);
782 			if (config_stats) {
783 				nunmapped += npages;
784 			}
785 			break;
786 		case extent_state_retained:
787 		default:
788 			not_reached();
789 		}
790 	}
791 
792 	if (config_stats) {
793 		arena_stats_lock(tsdn, &arena->stats);
794 		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
795 		    1);
796 		arena_stats_add_u64(tsdn, &arena->stats,
797 		    &decay->stats->nmadvise, nmadvise);
798 		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
799 		    npurged);
800 		arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
801 		    nunmapped << LG_PAGE);
802 		arena_stats_unlock(tsdn, &arena->stats);
803 	}
804 
805 	return npurged;
806 }
807 
808 /*
809  * npages_limit: Decay at most npages_decay_max pages without violating the
810  * invariant: (extents_npages_get(extents) >= npages_limit).  We need an upper
811  * bound on number of pages in order to prevent unbounded growth (namely in
812  * stashed), otherwise unbounded new pages could be added to extents during the
813  * current decay run, so that the purging thread never finishes.
814  */
815 static void
arena_decay_to_limit(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool all,size_t npages_limit,size_t npages_decay_max,bool is_background_thread)816 arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
817     extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
818     bool is_background_thread) {
819 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
820 	    WITNESS_RANK_CORE, 1);
821 	malloc_mutex_assert_owner(tsdn, &decay->mtx);
822 
823 	if (decay->purging) {
824 		return;
825 	}
826 	decay->purging = true;
827 	malloc_mutex_unlock(tsdn, &decay->mtx);
828 
829 	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
830 
831 	extent_list_t decay_extents;
832 	extent_list_init(&decay_extents);
833 
834 	size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
835 	    npages_limit, npages_decay_max, &decay_extents);
836 	if (npurge != 0) {
837 		UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
838 		    &extent_hooks, decay, extents, all, &decay_extents,
839 		    is_background_thread);
840 		assert(npurged == npurge);
841 	}
842 
843 	malloc_mutex_lock(tsdn, &decay->mtx);
844 	decay->purging = false;
845 }
846 
847 static bool
arena_decay_impl(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool is_background_thread,bool all)848 arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
849     extents_t *extents, bool is_background_thread, bool all) {
850 	if (all) {
851 		malloc_mutex_lock(tsdn, &decay->mtx);
852 		arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
853 		    extents_npages_get(extents), is_background_thread);
854 		malloc_mutex_unlock(tsdn, &decay->mtx);
855 
856 		return false;
857 	}
858 
859 	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
860 		/* No need to wait if another thread is in progress. */
861 		return true;
862 	}
863 
864 	bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
865 	    is_background_thread);
866 	UNUSED size_t npages_new;
867 	if (epoch_advanced) {
868 		/* Backlog is updated on epoch advance. */
869 		npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
870 	}
871 	malloc_mutex_unlock(tsdn, &decay->mtx);
872 
873 	if (have_background_thread && background_thread_enabled() &&
874 	    epoch_advanced && !is_background_thread) {
875 		background_thread_interval_check(tsdn, arena, decay,
876 		    npages_new);
877 	}
878 
879 	return false;
880 }
881 
882 static bool
arena_decay_dirty(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)883 arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
884     bool all) {
885 	return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
886 	    &arena->extents_dirty, is_background_thread, all);
887 }
888 
889 static bool
arena_decay_muzzy(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)890 arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
891     bool all) {
892 	return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
893 	    &arena->extents_muzzy, is_background_thread, all);
894 }
895 
896 void
arena_decay(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)897 arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
898 	if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
899 		return;
900 	}
901 	arena_decay_muzzy(tsdn, arena, is_background_thread, all);
902 }
903 
904 static void
arena_slab_dalloc(tsdn_t * tsdn,arena_t * arena,extent_t * slab)905 arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
906 	arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
907 
908 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
909 	arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
910 }
911 
912 static void
arena_bin_slabs_nonfull_insert(bin_t * bin,extent_t * slab)913 arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
914 	assert(extent_nfree_get(slab) > 0);
915 	extent_heap_insert(&bin->slabs_nonfull, slab);
916 }
917 
918 static void
arena_bin_slabs_nonfull_remove(bin_t * bin,extent_t * slab)919 arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
920 	extent_heap_remove(&bin->slabs_nonfull, slab);
921 }
922 
923 static extent_t *
arena_bin_slabs_nonfull_tryget(bin_t * bin)924 arena_bin_slabs_nonfull_tryget(bin_t *bin) {
925 	extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
926 	if (slab == NULL) {
927 		return NULL;
928 	}
929 	if (config_stats) {
930 		bin->stats.reslabs++;
931 	}
932 	return slab;
933 }
934 
935 static void
arena_bin_slabs_full_insert(arena_t * arena,bin_t * bin,extent_t * slab)936 arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
937 	assert(extent_nfree_get(slab) == 0);
938 	/*
939 	 *  Tracking extents is required by arena_reset, which is not allowed
940 	 *  for auto arenas.  Bypass this step to avoid touching the extent
941 	 *  linkage (often results in cache misses) for auto arenas.
942 	 */
943 	if (arena_is_auto(arena)) {
944 		return;
945 	}
946 	extent_list_append(&bin->slabs_full, slab);
947 }
948 
949 static void
arena_bin_slabs_full_remove(arena_t * arena,bin_t * bin,extent_t * slab)950 arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
951 	if (arena_is_auto(arena)) {
952 		return;
953 	}
954 	extent_list_remove(&bin->slabs_full, slab);
955 }
956 
957 void
arena_reset(tsd_t * tsd,arena_t * arena)958 arena_reset(tsd_t *tsd, arena_t *arena) {
959 	/*
960 	 * Locking in this function is unintuitive.  The caller guarantees that
961 	 * no concurrent operations are happening in this arena, but there are
962 	 * still reasons that some locking is necessary:
963 	 *
964 	 * - Some of the functions in the transitive closure of calls assume
965 	 *   appropriate locks are held, and in some cases these locks are
966 	 *   temporarily dropped to avoid lock order reversal or deadlock due to
967 	 *   reentry.
968 	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
969 	 *   strictly speaking this is a "concurrent operation", disallowing
970 	 *   stats refreshes would impose an inconvenient burden.
971 	 */
972 
973 	/* Large allocations. */
974 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
975 
976 	for (extent_t *extent = extent_list_first(&arena->large); extent !=
977 	    NULL; extent = extent_list_first(&arena->large)) {
978 		void *ptr = extent_base_get(extent);
979 		size_t usize;
980 
981 		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
982 		alloc_ctx_t alloc_ctx;
983 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
984 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
985 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
986 		assert(alloc_ctx.szind != NSIZES);
987 
988 		if (config_stats || (config_prof && opt_prof)) {
989 			usize = sz_index2size(alloc_ctx.szind);
990 			assert(usize == isalloc(tsd_tsdn(tsd), ptr));
991 		}
992 		/* Remove large allocation from prof sample set. */
993 		if (config_prof && opt_prof) {
994 			prof_free(tsd, ptr, usize, &alloc_ctx);
995 		}
996 		large_dalloc(tsd_tsdn(tsd), extent);
997 		malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
998 	}
999 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1000 
1001 	/* Bins. */
1002 	for (unsigned i = 0; i < NBINS; i++) {
1003 		extent_t *slab;
1004 		bin_t *bin = &arena->bins[i];
1005 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1006 		if (bin->slabcur != NULL) {
1007 			slab = bin->slabcur;
1008 			bin->slabcur = NULL;
1009 			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1010 			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1011 			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1012 		}
1013 		while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
1014 		    NULL) {
1015 			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1016 			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1017 			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1018 		}
1019 		for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
1020 		    slab = extent_list_first(&bin->slabs_full)) {
1021 			arena_bin_slabs_full_remove(arena, bin, slab);
1022 			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1023 			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1024 			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1025 		}
1026 		if (config_stats) {
1027 			bin->stats.curregs = 0;
1028 			bin->stats.curslabs = 0;
1029 		}
1030 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1031 	}
1032 
1033 	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1034 }
1035 
1036 static void
arena_destroy_retained(tsdn_t * tsdn,arena_t * arena)1037 arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
1038 	/*
1039 	 * Iterate over the retained extents and destroy them.  This gives the
1040 	 * extent allocator underlying the extent hooks an opportunity to unmap
1041 	 * all retained memory without having to keep its own metadata
1042 	 * structures.  In practice, virtual memory for dss-allocated extents is
1043 	 * leaked here, so best practice is to avoid dss for arenas to be
1044 	 * destroyed, or provide custom extent hooks that track retained
1045 	 * dss-based extents for later reuse.
1046 	 */
1047 	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1048 	extent_t *extent;
1049 	while ((extent = extents_evict(tsdn, arena, &extent_hooks,
1050 	    &arena->extents_retained, 0)) != NULL) {
1051 		extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
1052 	}
1053 }
1054 
1055 void
arena_destroy(tsd_t * tsd,arena_t * arena)1056 arena_destroy(tsd_t *tsd, arena_t *arena) {
1057 	assert(base_ind_get(arena->base) >= narenas_auto);
1058 	assert(arena_nthreads_get(arena, false) == 0);
1059 	assert(arena_nthreads_get(arena, true) == 0);
1060 
1061 	/*
1062 	 * No allocations have occurred since arena_reset() was called.
1063 	 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
1064 	 * extents, so only retained extents may remain.
1065 	 */
1066 	assert(extents_npages_get(&arena->extents_dirty) == 0);
1067 	assert(extents_npages_get(&arena->extents_muzzy) == 0);
1068 
1069 	/* Deallocate retained memory. */
1070 	arena_destroy_retained(tsd_tsdn(tsd), arena);
1071 
1072 	/*
1073 	 * Remove the arena pointer from the arenas array.  We rely on the fact
1074 	 * that there is no way for the application to get a dirty read from the
1075 	 * arenas array unless there is an inherent race in the application
1076 	 * involving access of an arena being concurrently destroyed.  The
1077 	 * application must synchronize knowledge of the arena's validity, so as
1078 	 * long as we use an atomic write to update the arenas array, the
1079 	 * application will get a clean read any time after it synchronizes
1080 	 * knowledge that the arena is no longer valid.
1081 	 */
1082 	arena_set(base_ind_get(arena->base), NULL);
1083 
1084 	/*
1085 	 * Destroy the base allocator, which manages all metadata ever mapped by
1086 	 * this arena.
1087 	 */
1088 	base_delete(tsd_tsdn(tsd), arena->base);
1089 }
1090 
1091 static extent_t *
arena_slab_alloc_hard(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,const bin_info_t * bin_info,szind_t szind)1092 arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
1093     extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
1094     szind_t szind) {
1095 	extent_t *slab;
1096 	bool zero, commit;
1097 
1098 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1099 	    WITNESS_RANK_CORE, 0);
1100 
1101 	zero = false;
1102 	commit = true;
1103 	slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
1104 	    bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
1105 
1106 	if (config_stats && slab != NULL) {
1107 		arena_stats_mapped_add(tsdn, &arena->stats,
1108 		    bin_info->slab_size);
1109 	}
1110 
1111 	return slab;
1112 }
1113 
1114 static extent_t *
arena_slab_alloc(tsdn_t * tsdn,arena_t * arena,szind_t binind,const bin_info_t * bin_info)1115 arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1116     const bin_info_t *bin_info) {
1117 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1118 	    WITNESS_RANK_CORE, 0);
1119 
1120 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1121 	szind_t szind = sz_size2index(bin_info->reg_size);
1122 	bool zero = false;
1123 	bool commit = true;
1124 	extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
1125 	    &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
1126 	    binind, &zero, &commit);
1127 	if (slab == NULL) {
1128 		slab = extents_alloc(tsdn, arena, &extent_hooks,
1129 		    &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
1130 		    true, binind, &zero, &commit);
1131 	}
1132 	if (slab == NULL) {
1133 		slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
1134 		    bin_info, szind);
1135 		if (slab == NULL) {
1136 			return NULL;
1137 		}
1138 	}
1139 	assert(extent_slab_get(slab));
1140 
1141 	/* Initialize slab internals. */
1142 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1143 	extent_nfree_set(slab, bin_info->nregs);
1144 	bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
1145 
1146 	arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
1147 
1148 	return slab;
1149 }
1150 
1151 static extent_t *
arena_bin_nonfull_slab_get(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind)1152 arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1153     szind_t binind) {
1154 	extent_t *slab;
1155 	const bin_info_t *bin_info;
1156 
1157 	/* Look for a usable slab. */
1158 	slab = arena_bin_slabs_nonfull_tryget(bin);
1159 	if (slab != NULL) {
1160 		return slab;
1161 	}
1162 	/* No existing slabs have any space available. */
1163 
1164 	bin_info = &bin_infos[binind];
1165 
1166 	/* Allocate a new slab. */
1167 	malloc_mutex_unlock(tsdn, &bin->lock);
1168 	/******************************/
1169 	slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
1170 	/********************************/
1171 	malloc_mutex_lock(tsdn, &bin->lock);
1172 	if (slab != NULL) {
1173 		if (config_stats) {
1174 			bin->stats.nslabs++;
1175 			bin->stats.curslabs++;
1176 		}
1177 		return slab;
1178 	}
1179 
1180 	/*
1181 	 * arena_slab_alloc() failed, but another thread may have made
1182 	 * sufficient memory available while this one dropped bin->lock above,
1183 	 * so search one more time.
1184 	 */
1185 	slab = arena_bin_slabs_nonfull_tryget(bin);
1186 	if (slab != NULL) {
1187 		return slab;
1188 	}
1189 
1190 	return NULL;
1191 }
1192 
1193 /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
1194 static void *
arena_bin_malloc_hard(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind)1195 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1196     szind_t binind) {
1197 	const bin_info_t *bin_info;
1198 	extent_t *slab;
1199 
1200 	bin_info = &bin_infos[binind];
1201 	if (!arena_is_auto(arena) && bin->slabcur != NULL) {
1202 		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1203 		bin->slabcur = NULL;
1204 	}
1205 	slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
1206 	if (bin->slabcur != NULL) {
1207 		/*
1208 		 * Another thread updated slabcur while this one ran without the
1209 		 * bin lock in arena_bin_nonfull_slab_get().
1210 		 */
1211 		if (extent_nfree_get(bin->slabcur) > 0) {
1212 			void *ret = arena_slab_reg_alloc(bin->slabcur,
1213 			    bin_info);
1214 			if (slab != NULL) {
1215 				/*
1216 				 * arena_slab_alloc() may have allocated slab,
1217 				 * or it may have been pulled from
1218 				 * slabs_nonfull.  Therefore it is unsafe to
1219 				 * make any assumptions about how slab has
1220 				 * previously been used, and
1221 				 * arena_bin_lower_slab() must be called, as if
1222 				 * a region were just deallocated from the slab.
1223 				 */
1224 				if (extent_nfree_get(slab) == bin_info->nregs) {
1225 					arena_dalloc_bin_slab(tsdn, arena, slab,
1226 					    bin);
1227 				} else {
1228 					arena_bin_lower_slab(tsdn, arena, slab,
1229 					    bin);
1230 				}
1231 			}
1232 			return ret;
1233 		}
1234 
1235 		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1236 		bin->slabcur = NULL;
1237 	}
1238 
1239 	if (slab == NULL) {
1240 		return NULL;
1241 	}
1242 	bin->slabcur = slab;
1243 
1244 	assert(extent_nfree_get(bin->slabcur) > 0);
1245 
1246 	return arena_slab_reg_alloc(slab, bin_info);
1247 }
1248 
1249 void
arena_tcache_fill_small(tsdn_t * tsdn,arena_t * arena,tcache_t * tcache,cache_bin_t * tbin,szind_t binind,uint64_t prof_accumbytes)1250 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
1251     cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
1252 	unsigned i, nfill;
1253 	bin_t *bin;
1254 
1255 	assert(tbin->ncached == 0);
1256 
1257 	if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
1258 		prof_idump(tsdn);
1259 	}
1260 	bin = &arena->bins[binind];
1261 	malloc_mutex_lock(tsdn, &bin->lock);
1262 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1263 	    tcache->lg_fill_div[binind]); i < nfill; i++) {
1264 		extent_t *slab;
1265 		void *ptr;
1266 		if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
1267 		    0) {
1268 			ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1269 		} else {
1270 			ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1271 		}
1272 		if (ptr == NULL) {
1273 			/*
1274 			 * OOM.  tbin->avail isn't yet filled down to its first
1275 			 * element, so the successful allocations (if any) must
1276 			 * be moved just before tbin->avail before bailing out.
1277 			 */
1278 			if (i > 0) {
1279 				memmove(tbin->avail - i, tbin->avail - nfill,
1280 				    i * sizeof(void *));
1281 			}
1282 			break;
1283 		}
1284 		if (config_fill && unlikely(opt_junk_alloc)) {
1285 			arena_alloc_junk_small(ptr, &bin_infos[binind], true);
1286 		}
1287 		/* Insert such that low regions get used first. */
1288 		*(tbin->avail - nfill + i) = ptr;
1289 	}
1290 	if (config_stats) {
1291 		bin->stats.nmalloc += i;
1292 		bin->stats.nrequests += tbin->tstats.nrequests;
1293 		bin->stats.curregs += i;
1294 		bin->stats.nfills++;
1295 		tbin->tstats.nrequests = 0;
1296 	}
1297 	malloc_mutex_unlock(tsdn, &bin->lock);
1298 	tbin->ncached = i;
1299 	arena_decay_tick(tsdn, arena);
1300 }
1301 
1302 void
arena_alloc_junk_small(void * ptr,const bin_info_t * bin_info,bool zero)1303 arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
1304 	if (!zero) {
1305 		memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
1306 	}
1307 }
1308 
1309 static void
arena_dalloc_junk_small_impl(void * ptr,const bin_info_t * bin_info)1310 arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
1311 	memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
1312 }
1313 arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
1314     arena_dalloc_junk_small_impl;
1315 
1316 static void *
arena_malloc_small(tsdn_t * tsdn,arena_t * arena,szind_t binind,bool zero)1317 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
1318 	void *ret;
1319 	bin_t *bin;
1320 	size_t usize;
1321 	extent_t *slab;
1322 
1323 	assert(binind < NBINS);
1324 	bin = &arena->bins[binind];
1325 	usize = sz_index2size(binind);
1326 
1327 	malloc_mutex_lock(tsdn, &bin->lock);
1328 	if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
1329 		ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1330 	} else {
1331 		ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1332 	}
1333 
1334 	if (ret == NULL) {
1335 		malloc_mutex_unlock(tsdn, &bin->lock);
1336 		return NULL;
1337 	}
1338 
1339 	if (config_stats) {
1340 		bin->stats.nmalloc++;
1341 		bin->stats.nrequests++;
1342 		bin->stats.curregs++;
1343 	}
1344 	malloc_mutex_unlock(tsdn, &bin->lock);
1345 	if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
1346 		prof_idump(tsdn);
1347 	}
1348 
1349 	if (!zero) {
1350 		if (config_fill) {
1351 			if (unlikely(opt_junk_alloc)) {
1352 				arena_alloc_junk_small(ret,
1353 				    &bin_infos[binind], false);
1354 			} else if (unlikely(opt_zero)) {
1355 				memset(ret, 0, usize);
1356 			}
1357 		}
1358 	} else {
1359 		if (config_fill && unlikely(opt_junk_alloc)) {
1360 			arena_alloc_junk_small(ret, &bin_infos[binind],
1361 			    true);
1362 		}
1363 		memset(ret, 0, usize);
1364 	}
1365 
1366 	arena_decay_tick(tsdn, arena);
1367 	return ret;
1368 }
1369 
1370 void *
arena_malloc_hard(tsdn_t * tsdn,arena_t * arena,size_t size,szind_t ind,bool zero)1371 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1372     bool zero) {
1373 	assert(!tsdn_null(tsdn) || arena != NULL);
1374 
1375 	if (likely(!tsdn_null(tsdn))) {
1376 		arena = arena_choose(tsdn_tsd(tsdn), arena);
1377 	}
1378 	if (unlikely(arena == NULL)) {
1379 		return NULL;
1380 	}
1381 
1382 	if (likely(size <= SMALL_MAXCLASS)) {
1383 		return arena_malloc_small(tsdn, arena, ind, zero);
1384 	}
1385 	return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1386 }
1387 
1388 void *
arena_palloc(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)1389 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1390     bool zero, tcache_t *tcache) {
1391 	void *ret;
1392 
1393 	if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
1394 	    && (usize & PAGE_MASK) == 0))) {
1395 		/* Small; alignment doesn't require special slab placement. */
1396 		ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1397 		    zero, tcache, true);
1398 	} else {
1399 		if (likely(alignment <= CACHELINE)) {
1400 			ret = large_malloc(tsdn, arena, usize, zero);
1401 		} else {
1402 			ret = large_palloc(tsdn, arena, usize, alignment, zero);
1403 		}
1404 	}
1405 	return ret;
1406 }
1407 
1408 void
arena_prof_promote(tsdn_t * tsdn,const void * ptr,size_t usize)1409 arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
1410 	cassert(config_prof);
1411 	assert(ptr != NULL);
1412 	assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1413 	assert(usize <= SMALL_MAXCLASS);
1414 
1415 	rtree_ctx_t rtree_ctx_fallback;
1416 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1417 
1418 	extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1419 	    (uintptr_t)ptr, true);
1420 	arena_t *arena = extent_arena_get(extent);
1421 
1422 	szind_t szind = sz_size2index(usize);
1423 	extent_szind_set(extent, szind);
1424 	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1425 	    szind, false);
1426 
1427 	prof_accum_cancel(tsdn, &arena->prof_accum, usize);
1428 
1429 	assert(isalloc(tsdn, ptr) == usize);
1430 }
1431 
1432 static size_t
arena_prof_demote(tsdn_t * tsdn,extent_t * extent,const void * ptr)1433 arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
1434 	cassert(config_prof);
1435 	assert(ptr != NULL);
1436 
1437 	extent_szind_set(extent, NBINS);
1438 	rtree_ctx_t rtree_ctx_fallback;
1439 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1440 	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1441 	    NBINS, false);
1442 
1443 	assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1444 
1445 	return LARGE_MINCLASS;
1446 }
1447 
1448 void
arena_dalloc_promoted(tsdn_t * tsdn,void * ptr,tcache_t * tcache,bool slow_path)1449 arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1450     bool slow_path) {
1451 	cassert(config_prof);
1452 	assert(opt_prof);
1453 
1454 	extent_t *extent = iealloc(tsdn, ptr);
1455 	size_t usize = arena_prof_demote(tsdn, extent, ptr);
1456 	if (usize <= tcache_maxclass) {
1457 		tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1458 		    sz_size2index(usize), slow_path);
1459 	} else {
1460 		large_dalloc(tsdn, extent);
1461 	}
1462 }
1463 
1464 static void
arena_dissociate_bin_slab(arena_t * arena,extent_t * slab,bin_t * bin)1465 arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
1466 	/* Dissociate slab from bin. */
1467 	if (slab == bin->slabcur) {
1468 		bin->slabcur = NULL;
1469 	} else {
1470 		szind_t binind = extent_szind_get(slab);
1471 		const bin_info_t *bin_info = &bin_infos[binind];
1472 
1473 		/*
1474 		 * The following block's conditional is necessary because if the
1475 		 * slab only contains one region, then it never gets inserted
1476 		 * into the non-full slabs heap.
1477 		 */
1478 		if (bin_info->nregs == 1) {
1479 			arena_bin_slabs_full_remove(arena, bin, slab);
1480 		} else {
1481 			arena_bin_slabs_nonfull_remove(bin, slab);
1482 		}
1483 	}
1484 }
1485 
1486 static void
arena_dalloc_bin_slab(tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin)1487 arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1488     bin_t *bin) {
1489 	assert(slab != bin->slabcur);
1490 
1491 	malloc_mutex_unlock(tsdn, &bin->lock);
1492 	/******************************/
1493 	arena_slab_dalloc(tsdn, arena, slab);
1494 	/****************************/
1495 	malloc_mutex_lock(tsdn, &bin->lock);
1496 	if (config_stats) {
1497 		bin->stats.curslabs--;
1498 	}
1499 }
1500 
1501 static void
arena_bin_lower_slab(UNUSED tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin)1502 arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1503     bin_t *bin) {
1504 	assert(extent_nfree_get(slab) > 0);
1505 
1506 	/*
1507 	 * Make sure that if bin->slabcur is non-NULL, it refers to the
1508 	 * oldest/lowest non-full slab.  It is okay to NULL slabcur out rather
1509 	 * than proactively keeping it pointing at the oldest/lowest non-full
1510 	 * slab.
1511 	 */
1512 	if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
1513 		/* Switch slabcur. */
1514 		if (extent_nfree_get(bin->slabcur) > 0) {
1515 			arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1516 		} else {
1517 			arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1518 		}
1519 		bin->slabcur = slab;
1520 		if (config_stats) {
1521 			bin->stats.reslabs++;
1522 		}
1523 	} else {
1524 		arena_bin_slabs_nonfull_insert(bin, slab);
1525 	}
1526 }
1527 
1528 static void
arena_dalloc_bin_locked_impl(tsdn_t * tsdn,arena_t * arena,extent_t * slab,void * ptr,bool junked)1529 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1530     void *ptr, bool junked) {
1531 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1532 	szind_t binind = extent_szind_get(slab);
1533 	bin_t *bin = &arena->bins[binind];
1534 	const bin_info_t *bin_info = &bin_infos[binind];
1535 
1536 	if (!junked && config_fill && unlikely(opt_junk_free)) {
1537 		arena_dalloc_junk_small(ptr, bin_info);
1538 	}
1539 
1540 	arena_slab_reg_dalloc(slab, slab_data, ptr);
1541 	unsigned nfree = extent_nfree_get(slab);
1542 	if (nfree == bin_info->nregs) {
1543 		arena_dissociate_bin_slab(arena, slab, bin);
1544 		arena_dalloc_bin_slab(tsdn, arena, slab, bin);
1545 	} else if (nfree == 1 && slab != bin->slabcur) {
1546 		arena_bin_slabs_full_remove(arena, bin, slab);
1547 		arena_bin_lower_slab(tsdn, arena, slab, bin);
1548 	}
1549 
1550 	if (config_stats) {
1551 		bin->stats.ndalloc++;
1552 		bin->stats.curregs--;
1553 	}
1554 }
1555 
1556 void
arena_dalloc_bin_junked_locked(tsdn_t * tsdn,arena_t * arena,extent_t * extent,void * ptr)1557 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
1558     void *ptr) {
1559 	arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
1560 }
1561 
1562 static void
arena_dalloc_bin(tsdn_t * tsdn,arena_t * arena,extent_t * extent,void * ptr)1563 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
1564 	szind_t binind = extent_szind_get(extent);
1565 	bin_t *bin = &arena->bins[binind];
1566 
1567 	malloc_mutex_lock(tsdn, &bin->lock);
1568 	arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
1569 	malloc_mutex_unlock(tsdn, &bin->lock);
1570 }
1571 
1572 void
arena_dalloc_small(tsdn_t * tsdn,void * ptr)1573 arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1574 	extent_t *extent = iealloc(tsdn, ptr);
1575 	arena_t *arena = extent_arena_get(extent);
1576 
1577 	arena_dalloc_bin(tsdn, arena, extent, ptr);
1578 	arena_decay_tick(tsdn, arena);
1579 }
1580 
1581 bool
arena_ralloc_no_move(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t size,size_t extra,bool zero)1582 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1583     size_t extra, bool zero) {
1584 	/* Calls with non-zero extra had to clamp extra. */
1585 	assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
1586 
1587 	if (unlikely(size > LARGE_MAXCLASS)) {
1588 		return true;
1589 	}
1590 
1591 	extent_t *extent = iealloc(tsdn, ptr);
1592 	size_t usize_min = sz_s2u(size);
1593 	size_t usize_max = sz_s2u(size + extra);
1594 	if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
1595 		/*
1596 		 * Avoid moving the allocation if the size class can be left the
1597 		 * same.
1598 		 */
1599 		assert(bin_infos[sz_size2index(oldsize)].reg_size ==
1600 		    oldsize);
1601 		if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
1602 		    sz_size2index(oldsize)) && (size > oldsize || usize_max <
1603 		    oldsize)) {
1604 			return true;
1605 		}
1606 
1607 		arena_decay_tick(tsdn, extent_arena_get(extent));
1608 		return false;
1609 	} else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
1610 		return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
1611 		    zero);
1612 	}
1613 
1614 	return true;
1615 }
1616 
1617 static void *
arena_ralloc_move_helper(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)1618 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1619     size_t alignment, bool zero, tcache_t *tcache) {
1620 	if (alignment == 0) {
1621 		return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1622 		    zero, tcache, true);
1623 	}
1624 	usize = sz_sa2u(usize, alignment);
1625 	if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1626 		return NULL;
1627 	}
1628 	return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1629 }
1630 
1631 void *
arena_ralloc(tsdn_t * tsdn,arena_t * arena,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero,tcache_t * tcache)1632 arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1633     size_t size, size_t alignment, bool zero, tcache_t *tcache) {
1634 	size_t usize = sz_s2u(size);
1635 	if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
1636 		return NULL;
1637 	}
1638 
1639 	if (likely(usize <= SMALL_MAXCLASS)) {
1640 		/* Try to avoid moving the allocation. */
1641 		if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
1642 			return ptr;
1643 		}
1644 	}
1645 
1646 	if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
1647 		return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
1648 		    alignment, zero, tcache);
1649 	}
1650 
1651 	/*
1652 	 * size and oldsize are different enough that we need to move the
1653 	 * object.  In that case, fall back to allocating new space and copying.
1654 	 */
1655 	void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1656 	    zero, tcache);
1657 	if (ret == NULL) {
1658 		return NULL;
1659 	}
1660 
1661 	/*
1662 	 * Junk/zero-filling were already done by
1663 	 * ipalloc()/arena_malloc().
1664 	 */
1665 
1666 	size_t copysize = (usize < oldsize) ? usize : oldsize;
1667 	memcpy(ret, ptr, copysize);
1668 	isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
1669 	return ret;
1670 }
1671 
1672 dss_prec_t
arena_dss_prec_get(arena_t * arena)1673 arena_dss_prec_get(arena_t *arena) {
1674 	return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1675 }
1676 
1677 bool
arena_dss_prec_set(arena_t * arena,dss_prec_t dss_prec)1678 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1679 	if (!have_dss) {
1680 		return (dss_prec != dss_prec_disabled);
1681 	}
1682 	atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1683 	return false;
1684 }
1685 
1686 ssize_t
arena_dirty_decay_ms_default_get(void)1687 arena_dirty_decay_ms_default_get(void) {
1688 	return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
1689 }
1690 
1691 bool
arena_dirty_decay_ms_default_set(ssize_t decay_ms)1692 arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1693 	if (!arena_decay_ms_valid(decay_ms)) {
1694 		return true;
1695 	}
1696 	atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1697 	return false;
1698 }
1699 
1700 ssize_t
arena_muzzy_decay_ms_default_get(void)1701 arena_muzzy_decay_ms_default_get(void) {
1702 	return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
1703 }
1704 
1705 bool
arena_muzzy_decay_ms_default_set(ssize_t decay_ms)1706 arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1707 	if (!arena_decay_ms_valid(decay_ms)) {
1708 		return true;
1709 	}
1710 	atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1711 	return false;
1712 }
1713 
1714 bool
arena_retain_grow_limit_get_set(tsd_t * tsd,arena_t * arena,size_t * old_limit,size_t * new_limit)1715 arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
1716     size_t *new_limit) {
1717 	assert(opt_retain);
1718 
1719 	pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
1720 	if (new_limit != NULL) {
1721 		size_t limit = *new_limit;
1722 		/* Grow no more than the new limit. */
1723 		if ((new_ind = sz_psz2ind(limit + 1) - 1) >
1724 		     EXTENT_GROW_MAX_PIND) {
1725 			return true;
1726 		}
1727 	}
1728 
1729 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1730 	if (old_limit != NULL) {
1731 		*old_limit = sz_pind2sz(arena->retain_grow_limit);
1732 	}
1733 	if (new_limit != NULL) {
1734 		arena->retain_grow_limit = new_ind;
1735 	}
1736 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1737 
1738 	return false;
1739 }
1740 
1741 unsigned
arena_nthreads_get(arena_t * arena,bool internal)1742 arena_nthreads_get(arena_t *arena, bool internal) {
1743 	return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1744 }
1745 
1746 void
arena_nthreads_inc(arena_t * arena,bool internal)1747 arena_nthreads_inc(arena_t *arena, bool internal) {
1748 	atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1749 }
1750 
1751 void
arena_nthreads_dec(arena_t * arena,bool internal)1752 arena_nthreads_dec(arena_t *arena, bool internal) {
1753 	atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1754 }
1755 
1756 size_t
arena_extent_sn_next(arena_t * arena)1757 arena_extent_sn_next(arena_t *arena) {
1758 	return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
1759 }
1760 
1761 arena_t *
arena_new(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)1762 arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
1763 	arena_t *arena;
1764 	base_t *base;
1765 	unsigned i;
1766 
1767 	if (ind == 0) {
1768 		base = b0get();
1769 	} else {
1770 		base = base_new(tsdn, ind, extent_hooks);
1771 		if (base == NULL) {
1772 			return NULL;
1773 		}
1774 	}
1775 
1776 	arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
1777 	if (arena == NULL) {
1778 		goto label_error;
1779 	}
1780 
1781 	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1782 	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1783 	arena->last_thd = NULL;
1784 
1785 	if (config_stats) {
1786 		if (arena_stats_init(tsdn, &arena->stats)) {
1787 			goto label_error;
1788 		}
1789 
1790 		ql_new(&arena->tcache_ql);
1791 		ql_new(&arena->cache_bin_array_descriptor_ql);
1792 		if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1793 		    WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
1794 			goto label_error;
1795 		}
1796 	}
1797 
1798 	if (config_prof) {
1799 		if (prof_accum_init(tsdn, &arena->prof_accum)) {
1800 			goto label_error;
1801 		}
1802 	}
1803 
1804 	if (config_cache_oblivious) {
1805 		/*
1806 		 * A nondeterministic seed based on the address of arena reduces
1807 		 * the likelihood of lockstep non-uniform cache index
1808 		 * utilization among identical concurrent processes, but at the
1809 		 * cost of test repeatability.  For debug builds, instead use a
1810 		 * deterministic seed.
1811 		 */
1812 		atomic_store_zu(&arena->offset_state, config_debug ? ind :
1813 		    (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
1814 	}
1815 
1816 	atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
1817 
1818 	atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
1819 	    ATOMIC_RELAXED);
1820 
1821 	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1822 
1823 	extent_list_init(&arena->large);
1824 	if (malloc_mutex_init(&arena->large_mtx, "arena_large",
1825 	    WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
1826 		goto label_error;
1827 	}
1828 
1829 	/*
1830 	 * Delay coalescing for dirty extents despite the disruptive effect on
1831 	 * memory layout for best-fit extent allocation, since cached extents
1832 	 * are likely to be reused soon after deallocation, and the cost of
1833 	 * merging/splitting extents is non-trivial.
1834 	 */
1835 	if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
1836 	    true)) {
1837 		goto label_error;
1838 	}
1839 	/*
1840 	 * Coalesce muzzy extents immediately, because operations on them are in
1841 	 * the critical path much less often than for dirty extents.
1842 	 */
1843 	if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
1844 	    false)) {
1845 		goto label_error;
1846 	}
1847 	/*
1848 	 * Coalesce retained extents immediately, in part because they will
1849 	 * never be evicted (and therefore there's no opportunity for delayed
1850 	 * coalescing), but also because operations on retained extents are not
1851 	 * in the critical path.
1852 	 */
1853 	if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
1854 	    false)) {
1855 		goto label_error;
1856 	}
1857 
1858 	if (arena_decay_init(&arena->decay_dirty,
1859 	    arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
1860 		goto label_error;
1861 	}
1862 	if (arena_decay_init(&arena->decay_muzzy,
1863 	    arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
1864 		goto label_error;
1865 	}
1866 
1867 	arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
1868 	arena->retain_grow_limit = EXTENT_GROW_MAX_PIND;
1869 	if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
1870 	    WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
1871 		goto label_error;
1872 	}
1873 
1874 	extent_avail_new(&arena->extent_avail);
1875 	if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
1876 	    WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
1877 		goto label_error;
1878 	}
1879 
1880 	/* Initialize bins. */
1881 	for (i = 0; i < NBINS; i++) {
1882 		bool err = bin_init(&arena->bins[i]);
1883 		if (err) {
1884 			goto label_error;
1885 		}
1886 	}
1887 
1888 	arena->base = base;
1889 	/* Set arena before creating background threads. */
1890 	arena_set(ind, arena);
1891 
1892 	nstime_init(&arena->create_time, 0);
1893 	nstime_update(&arena->create_time);
1894 
1895 	/* We don't support reentrancy for arena 0 bootstrapping. */
1896 	if (ind != 0) {
1897 		/*
1898 		 * If we're here, then arena 0 already exists, so bootstrapping
1899 		 * is done enough that we should have tsd.
1900 		 */
1901 		assert(!tsdn_null(tsdn));
1902 		pre_reentrancy(tsdn_tsd(tsdn), arena);
1903 		if (hooks_arena_new_hook) {
1904 			hooks_arena_new_hook();
1905 		}
1906 		post_reentrancy(tsdn_tsd(tsdn));
1907 	}
1908 
1909 	return arena;
1910 label_error:
1911 	if (ind != 0) {
1912 		base_delete(tsdn, base);
1913 	}
1914 	return NULL;
1915 }
1916 
1917 void
arena_boot(void)1918 arena_boot(void) {
1919 	arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
1920 	arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
1921 #define REGIND_bin_yes(index, reg_size) 				\
1922 	div_init(&arena_binind_div_info[(index)], (reg_size));
1923 #define REGIND_bin_no(index, reg_size)
1924 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs,		\
1925     lg_delta_lookup)							\
1926 	REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
1927 	SIZE_CLASSES
1928 #undef REGIND_bin_yes
1929 #undef REGIND_bin_no
1930 #undef SC
1931 }
1932 
1933 void
arena_prefork0(tsdn_t * tsdn,arena_t * arena)1934 arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
1935 	malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
1936 	malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
1937 }
1938 
1939 void
arena_prefork1(tsdn_t * tsdn,arena_t * arena)1940 arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
1941 	if (config_stats) {
1942 		malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
1943 	}
1944 }
1945 
1946 void
arena_prefork2(tsdn_t * tsdn,arena_t * arena)1947 arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
1948 	malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
1949 }
1950 
1951 void
arena_prefork3(tsdn_t * tsdn,arena_t * arena)1952 arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
1953 	extents_prefork(tsdn, &arena->extents_dirty);
1954 	extents_prefork(tsdn, &arena->extents_muzzy);
1955 	extents_prefork(tsdn, &arena->extents_retained);
1956 }
1957 
1958 void
arena_prefork4(tsdn_t * tsdn,arena_t * arena)1959 arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
1960 	malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
1961 }
1962 
1963 void
arena_prefork5(tsdn_t * tsdn,arena_t * arena)1964 arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
1965 	base_prefork(tsdn, arena->base);
1966 }
1967 
1968 void
arena_prefork6(tsdn_t * tsdn,arena_t * arena)1969 arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
1970 	malloc_mutex_prefork(tsdn, &arena->large_mtx);
1971 }
1972 
1973 void
arena_prefork7(tsdn_t * tsdn,arena_t * arena)1974 arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
1975 	for (unsigned i = 0; i < NBINS; i++) {
1976 		bin_prefork(tsdn, &arena->bins[i]);
1977 	}
1978 }
1979 
1980 void
arena_postfork_parent(tsdn_t * tsdn,arena_t * arena)1981 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
1982 	unsigned i;
1983 
1984 	for (i = 0; i < NBINS; i++) {
1985 		bin_postfork_parent(tsdn, &arena->bins[i]);
1986 	}
1987 	malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
1988 	base_postfork_parent(tsdn, arena->base);
1989 	malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
1990 	extents_postfork_parent(tsdn, &arena->extents_dirty);
1991 	extents_postfork_parent(tsdn, &arena->extents_muzzy);
1992 	extents_postfork_parent(tsdn, &arena->extents_retained);
1993 	malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
1994 	malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
1995 	malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
1996 	if (config_stats) {
1997 		malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
1998 	}
1999 }
2000 
2001 void
arena_postfork_child(tsdn_t * tsdn,arena_t * arena)2002 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
2003 	unsigned i;
2004 
2005 	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
2006 	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
2007 	if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
2008 		arena_nthreads_inc(arena, false);
2009 	}
2010 	if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
2011 		arena_nthreads_inc(arena, true);
2012 	}
2013 	if (config_stats) {
2014 		ql_new(&arena->tcache_ql);
2015 		ql_new(&arena->cache_bin_array_descriptor_ql);
2016 		tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
2017 		if (tcache != NULL && tcache->arena == arena) {
2018 			ql_elm_new(tcache, link);
2019 			ql_tail_insert(&arena->tcache_ql, tcache, link);
2020 			cache_bin_array_descriptor_init(
2021 			    &tcache->cache_bin_array_descriptor,
2022 			    tcache->bins_small, tcache->bins_large);
2023 			ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
2024 			    &tcache->cache_bin_array_descriptor, link);
2025 		}
2026 	}
2027 
2028 	for (i = 0; i < NBINS; i++) {
2029 		bin_postfork_child(tsdn, &arena->bins[i]);
2030 	}
2031 	malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
2032 	base_postfork_child(tsdn, arena->base);
2033 	malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
2034 	extents_postfork_child(tsdn, &arena->extents_dirty);
2035 	extents_postfork_child(tsdn, &arena->extents_muzzy);
2036 	extents_postfork_child(tsdn, &arena->extents_retained);
2037 	malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
2038 	malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
2039 	malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
2040 	if (config_stats) {
2041 		malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
2042 	}
2043 }
2044