1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/div.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/rtree.h"
11 #include "jemalloc/internal/util.h"
12 
13 JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
14 
15 /******************************************************************************/
16 /* Data. */
17 
18 /*
19  * Define names for both unininitialized and initialized phases, so that
20  * options and mallctl processing are straightforward.
21  */
22 const char *percpu_arena_mode_names[] = {
23 	"percpu",
24 	"phycpu",
25 	"disabled",
26 	"percpu",
27 	"phycpu"
28 };
29 percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
30 
31 ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
32 ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
33 
34 static atomic_zd_t dirty_decay_ms_default;
35 static atomic_zd_t muzzy_decay_ms_default;
36 
37 const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
38 #define STEP(step, h, x, y)			\
39 		h,
40 		SMOOTHSTEP
41 #undef STEP
42 };
43 
44 static div_info_t arena_binind_div_info[SC_NBINS];
45 
46 size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
47 size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
48 static unsigned huge_arena_ind;
49 
50 /******************************************************************************/
51 /*
52  * Function prototypes for static functions that are referenced prior to
53  * definition.
54  */
55 
56 static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
57     arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
58     size_t npages_decay_max, bool is_background_thread);
59 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
60     bool is_background_thread, bool all);
61 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
62     bin_t *bin);
63 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
64     bin_t *bin);
65 
66 /******************************************************************************/
67 
68 void
arena_basic_stats_merge(tsdn_t * tsdn,arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * dirty_decay_ms,ssize_t * muzzy_decay_ms,size_t * nactive,size_t * ndirty,size_t * nmuzzy)69 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
70     const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
71     size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
72 	*nthreads += arena_nthreads_get(arena, false);
73 	*dss = dss_prec_names[arena_dss_prec_get(arena)];
74 	*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
75 	*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
76 	*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
77 	*ndirty += extents_npages_get(&arena->extents_dirty);
78 	*nmuzzy += extents_npages_get(&arena->extents_muzzy);
79 }
80 
81 void
arena_stats_merge(tsdn_t * tsdn,arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * dirty_decay_ms,ssize_t * muzzy_decay_ms,size_t * nactive,size_t * ndirty,size_t * nmuzzy,arena_stats_t * astats,bin_stats_t * bstats,arena_stats_large_t * lstats,arena_stats_extents_t * estats)82 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
83     const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
84     size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
85     bin_stats_t *bstats, arena_stats_large_t *lstats,
86     arena_stats_extents_t *estats) {
87 	cassert(config_stats);
88 
89 	arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
90 	    muzzy_decay_ms, nactive, ndirty, nmuzzy);
91 
92 	size_t base_allocated, base_resident, base_mapped, metadata_thp;
93 	base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
94 	    &base_mapped, &metadata_thp);
95 
96 	arena_stats_lock(tsdn, &arena->stats);
97 
98 	arena_stats_accum_zu(&astats->mapped, base_mapped
99 	    + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
100 	arena_stats_accum_zu(&astats->retained,
101 	    extents_npages_get(&arena->extents_retained) << LG_PAGE);
102 
103 	atomic_store_zu(&astats->extent_avail,
104 	    atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
105 	    ATOMIC_RELAXED);
106 
107 	arena_stats_accum_u64(&astats->decay_dirty.npurge,
108 	    arena_stats_read_u64(tsdn, &arena->stats,
109 	    &arena->stats.decay_dirty.npurge));
110 	arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
111 	    arena_stats_read_u64(tsdn, &arena->stats,
112 	    &arena->stats.decay_dirty.nmadvise));
113 	arena_stats_accum_u64(&astats->decay_dirty.purged,
114 	    arena_stats_read_u64(tsdn, &arena->stats,
115 	    &arena->stats.decay_dirty.purged));
116 
117 	arena_stats_accum_u64(&astats->decay_muzzy.npurge,
118 	    arena_stats_read_u64(tsdn, &arena->stats,
119 	    &arena->stats.decay_muzzy.npurge));
120 	arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
121 	    arena_stats_read_u64(tsdn, &arena->stats,
122 	    &arena->stats.decay_muzzy.nmadvise));
123 	arena_stats_accum_u64(&astats->decay_muzzy.purged,
124 	    arena_stats_read_u64(tsdn, &arena->stats,
125 	    &arena->stats.decay_muzzy.purged));
126 
127 	arena_stats_accum_zu(&astats->base, base_allocated);
128 	arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
129 	arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
130 	arena_stats_accum_zu(&astats->resident, base_resident +
131 	    (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
132 	    extents_npages_get(&arena->extents_dirty) +
133 	    extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
134 
135 	for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
136 		uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
137 		    &arena->stats.lstats[i].nmalloc);
138 		arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
139 		arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
140 
141 		uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
142 		    &arena->stats.lstats[i].ndalloc);
143 		arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
144 		arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
145 
146 		uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
147 		    &arena->stats.lstats[i].nrequests);
148 		arena_stats_accum_u64(&lstats[i].nrequests,
149 		    nmalloc + nrequests);
150 		arena_stats_accum_u64(&astats->nrequests_large,
151 		    nmalloc + nrequests);
152 
153 		assert(nmalloc >= ndalloc);
154 		assert(nmalloc - ndalloc <= SIZE_T_MAX);
155 		size_t curlextents = (size_t)(nmalloc - ndalloc);
156 		lstats[i].curlextents += curlextents;
157 		arena_stats_accum_zu(&astats->allocated_large,
158 		    curlextents * sz_index2size(SC_NBINS + i));
159 	}
160 
161 	for (pszind_t i = 0; i < SC_NPSIZES; i++) {
162 		size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
163 		    retained_bytes;
164 		dirty = extents_nextents_get(&arena->extents_dirty, i);
165 		muzzy = extents_nextents_get(&arena->extents_muzzy, i);
166 		retained = extents_nextents_get(&arena->extents_retained, i);
167 		dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i);
168 		muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i);
169 		retained_bytes =
170 		    extents_nbytes_get(&arena->extents_retained, i);
171 
172 		atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
173 		atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
174 		atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED);
175 		atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes,
176 		    ATOMIC_RELAXED);
177 		atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes,
178 		    ATOMIC_RELAXED);
179 		atomic_store_zu(&estats[i].retained_bytes, retained_bytes,
180 		    ATOMIC_RELAXED);
181 	}
182 
183 	arena_stats_unlock(tsdn, &arena->stats);
184 
185 	/* tcache_bytes counts currently cached bytes. */
186 	atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
187 	malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
188 	cache_bin_array_descriptor_t *descriptor;
189 	ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
190 		szind_t i = 0;
191 		for (; i < SC_NBINS; i++) {
192 			cache_bin_t *tbin = &descriptor->bins_small[i];
193 			arena_stats_accum_zu(&astats->tcache_bytes,
194 			    tbin->ncached * sz_index2size(i));
195 		}
196 		for (; i < nhbins; i++) {
197 			cache_bin_t *tbin = &descriptor->bins_large[i];
198 			arena_stats_accum_zu(&astats->tcache_bytes,
199 			    tbin->ncached * sz_index2size(i));
200 		}
201 	}
202 	malloc_mutex_prof_read(tsdn,
203 	    &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
204 	    &arena->tcache_ql_mtx);
205 	malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
206 
207 #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind)				\
208     malloc_mutex_lock(tsdn, &arena->mtx);				\
209     malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind],		\
210         &arena->mtx);							\
211     malloc_mutex_unlock(tsdn, &arena->mtx);
212 
213 	/* Gather per arena mutex profiling data. */
214 	READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
215 	READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
216 	    arena_prof_mutex_extent_avail)
217 	READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
218 	    arena_prof_mutex_extents_dirty)
219 	READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
220 	    arena_prof_mutex_extents_muzzy)
221 	READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
222 	    arena_prof_mutex_extents_retained)
223 	READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
224 	    arena_prof_mutex_decay_dirty)
225 	READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
226 	    arena_prof_mutex_decay_muzzy)
227 	READ_ARENA_MUTEX_PROF_DATA(base->mtx,
228 	    arena_prof_mutex_base)
229 #undef READ_ARENA_MUTEX_PROF_DATA
230 
231 	nstime_copy(&astats->uptime, &arena->create_time);
232 	nstime_update(&astats->uptime);
233 	nstime_subtract(&astats->uptime, &arena->create_time);
234 
235 	for (szind_t i = 0; i < SC_NBINS; i++) {
236 		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
237 			bin_stats_merge(tsdn, &bstats[i],
238 			    &arena->bins[i].bin_shards[j]);
239 		}
240 	}
241 }
242 
243 void
arena_extents_dirty_dalloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)244 arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
245     extent_hooks_t **r_extent_hooks, extent_t *extent) {
246 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
247 	    WITNESS_RANK_CORE, 0);
248 
249 	extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
250 	    extent);
251 	if (arena_dirty_decay_ms_get(arena) == 0) {
252 		arena_decay_dirty(tsdn, arena, false, true);
253 	} else {
254 		arena_background_thread_inactivity_check(tsdn, arena, false);
255 	}
256 }
257 
258 static void *
arena_slab_reg_alloc(extent_t * slab,const bin_info_t * bin_info)259 arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
260 	void *ret;
261 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
262 	size_t regind;
263 
264 	assert(extent_nfree_get(slab) > 0);
265 	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
266 
267 	regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
268 	ret = (void *)((uintptr_t)extent_addr_get(slab) +
269 	    (uintptr_t)(bin_info->reg_size * regind));
270 	extent_nfree_dec(slab);
271 	return ret;
272 }
273 
274 static void
arena_slab_reg_alloc_batch(extent_t * slab,const bin_info_t * bin_info,unsigned cnt,void ** ptrs)275 arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
276 			   unsigned cnt, void** ptrs) {
277 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
278 
279 	assert(extent_nfree_get(slab) >= cnt);
280 	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
281 
282 #if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
283 	for (unsigned i = 0; i < cnt; i++) {
284 		size_t regind = bitmap_sfu(slab_data->bitmap,
285 					   &bin_info->bitmap_info);
286 		*(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
287 		    (uintptr_t)(bin_info->reg_size * regind));
288 	}
289 #else
290 	unsigned group = 0;
291 	bitmap_t g = slab_data->bitmap[group];
292 	unsigned i = 0;
293 	while (i < cnt) {
294 		while (g == 0) {
295 			g = slab_data->bitmap[++group];
296 		}
297 		size_t shift = group << LG_BITMAP_GROUP_NBITS;
298 		size_t pop = popcount_lu(g);
299 		if (pop > (cnt - i)) {
300 			pop = cnt - i;
301 		}
302 
303 		/*
304 		 * Load from memory locations only once, outside the
305 		 * hot loop below.
306 		 */
307 		uintptr_t base = (uintptr_t)extent_addr_get(slab);
308 		uintptr_t regsize = (uintptr_t)bin_info->reg_size;
309 		while (pop--) {
310 			size_t bit = cfs_lu(&g);
311 			size_t regind = shift + bit;
312 			*(ptrs + i) = (void *)(base + regsize * regind);
313 
314 			i++;
315 		}
316 		slab_data->bitmap[group] = g;
317 	}
318 #endif
319 	extent_nfree_sub(slab, cnt);
320 }
321 
322 #ifndef JEMALLOC_JET
323 static
324 #endif
325 size_t
arena_slab_regind(extent_t * slab,szind_t binind,const void * ptr)326 arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
327 	size_t diff, regind;
328 
329 	/* Freeing a pointer outside the slab can cause assertion failure. */
330 	assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
331 	assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
332 	/* Freeing an interior pointer can cause assertion failure. */
333 	assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
334 	    (uintptr_t)bin_infos[binind].reg_size == 0);
335 
336 	diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
337 
338 	/* Avoid doing division with a variable divisor. */
339 	regind = div_compute(&arena_binind_div_info[binind], diff);
340 
341 	assert(regind < bin_infos[binind].nregs);
342 
343 	return regind;
344 }
345 
346 static void
arena_slab_reg_dalloc(extent_t * slab,arena_slab_data_t * slab_data,void * ptr)347 arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
348 	szind_t binind = extent_szind_get(slab);
349 	const bin_info_t *bin_info = &bin_infos[binind];
350 	size_t regind = arena_slab_regind(slab, binind, ptr);
351 
352 	assert(extent_nfree_get(slab) < bin_info->nregs);
353 	/* Freeing an unallocated pointer can cause assertion failure. */
354 	assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
355 
356 	bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
357 	extent_nfree_inc(slab);
358 }
359 
360 static void
arena_nactive_add(arena_t * arena,size_t add_pages)361 arena_nactive_add(arena_t *arena, size_t add_pages) {
362 	atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
363 }
364 
365 static void
arena_nactive_sub(arena_t * arena,size_t sub_pages)366 arena_nactive_sub(arena_t *arena, size_t sub_pages) {
367 	assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
368 	atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
369 }
370 
371 static void
arena_large_malloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t usize)372 arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
373 	szind_t index, hindex;
374 
375 	cassert(config_stats);
376 
377 	if (usize < SC_LARGE_MINCLASS) {
378 		usize = SC_LARGE_MINCLASS;
379 	}
380 	index = sz_size2index(usize);
381 	hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
382 
383 	arena_stats_add_u64(tsdn, &arena->stats,
384 	    &arena->stats.lstats[hindex].nmalloc, 1);
385 }
386 
387 static void
arena_large_dalloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t usize)388 arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
389 	szind_t index, hindex;
390 
391 	cassert(config_stats);
392 
393 	if (usize < SC_LARGE_MINCLASS) {
394 		usize = SC_LARGE_MINCLASS;
395 	}
396 	index = sz_size2index(usize);
397 	hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
398 
399 	arena_stats_add_u64(tsdn, &arena->stats,
400 	    &arena->stats.lstats[hindex].ndalloc, 1);
401 }
402 
403 static void
arena_large_ralloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t oldusize,size_t usize)404 arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
405     size_t usize) {
406 	arena_large_dalloc_stats_update(tsdn, arena, oldusize);
407 	arena_large_malloc_stats_update(tsdn, arena, usize);
408 }
409 
410 static bool
arena_may_have_muzzy(arena_t * arena)411 arena_may_have_muzzy(arena_t *arena) {
412 	return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0));
413 }
414 
415 extent_t *
arena_extent_alloc_large(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool * zero)416 arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
417     size_t alignment, bool *zero) {
418 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
419 
420 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
421 	    WITNESS_RANK_CORE, 0);
422 
423 	szind_t szind = sz_size2index(usize);
424 	size_t mapped_add;
425 	bool commit = true;
426 	extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
427 	    &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
428 	    szind, zero, &commit);
429 	if (extent == NULL && arena_may_have_muzzy(arena)) {
430 		extent = extents_alloc(tsdn, arena, &extent_hooks,
431 		    &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
432 		    false, szind, zero, &commit);
433 	}
434 	size_t size = usize + sz_large_pad;
435 	if (extent == NULL) {
436 		extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
437 		    usize, sz_large_pad, alignment, false, szind, zero,
438 		    &commit);
439 		if (config_stats) {
440 			/*
441 			 * extent may be NULL on OOM, but in that case
442 			 * mapped_add isn't used below, so there's no need to
443 			 * conditionlly set it to 0 here.
444 			 */
445 			mapped_add = size;
446 		}
447 	} else if (config_stats) {
448 		mapped_add = 0;
449 	}
450 
451 	if (extent != NULL) {
452 		if (config_stats) {
453 			arena_stats_lock(tsdn, &arena->stats);
454 			arena_large_malloc_stats_update(tsdn, arena, usize);
455 			if (mapped_add != 0) {
456 				arena_stats_add_zu(tsdn, &arena->stats,
457 				    &arena->stats.mapped, mapped_add);
458 			}
459 			arena_stats_unlock(tsdn, &arena->stats);
460 		}
461 		arena_nactive_add(arena, size >> LG_PAGE);
462 	}
463 
464 	return extent;
465 }
466 
467 void
arena_extent_dalloc_large_prep(tsdn_t * tsdn,arena_t * arena,extent_t * extent)468 arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
469 	if (config_stats) {
470 		arena_stats_lock(tsdn, &arena->stats);
471 		arena_large_dalloc_stats_update(tsdn, arena,
472 		    extent_usize_get(extent));
473 		arena_stats_unlock(tsdn, &arena->stats);
474 	}
475 	arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
476 }
477 
478 void
arena_extent_ralloc_large_shrink(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t oldusize)479 arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
480     size_t oldusize) {
481 	size_t usize = extent_usize_get(extent);
482 	size_t udiff = oldusize - usize;
483 
484 	if (config_stats) {
485 		arena_stats_lock(tsdn, &arena->stats);
486 		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
487 		arena_stats_unlock(tsdn, &arena->stats);
488 	}
489 	arena_nactive_sub(arena, udiff >> LG_PAGE);
490 }
491 
492 void
arena_extent_ralloc_large_expand(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t oldusize)493 arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
494     size_t oldusize) {
495 	size_t usize = extent_usize_get(extent);
496 	size_t udiff = usize - oldusize;
497 
498 	if (config_stats) {
499 		arena_stats_lock(tsdn, &arena->stats);
500 		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
501 		arena_stats_unlock(tsdn, &arena->stats);
502 	}
503 	arena_nactive_add(arena, udiff >> LG_PAGE);
504 }
505 
506 static ssize_t
arena_decay_ms_read(arena_decay_t * decay)507 arena_decay_ms_read(arena_decay_t *decay) {
508 	return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
509 }
510 
511 static void
arena_decay_ms_write(arena_decay_t * decay,ssize_t decay_ms)512 arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
513 	atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
514 }
515 
516 static void
arena_decay_deadline_init(arena_decay_t * decay)517 arena_decay_deadline_init(arena_decay_t *decay) {
518 	/*
519 	 * Generate a new deadline that is uniformly random within the next
520 	 * epoch after the current one.
521 	 */
522 	nstime_copy(&decay->deadline, &decay->epoch);
523 	nstime_add(&decay->deadline, &decay->interval);
524 	if (arena_decay_ms_read(decay) > 0) {
525 		nstime_t jitter;
526 
527 		nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
528 		    nstime_ns(&decay->interval)));
529 		nstime_add(&decay->deadline, &jitter);
530 	}
531 }
532 
533 static bool
arena_decay_deadline_reached(const arena_decay_t * decay,const nstime_t * time)534 arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
535 	return (nstime_compare(&decay->deadline, time) <= 0);
536 }
537 
538 static size_t
arena_decay_backlog_npages_limit(const arena_decay_t * decay)539 arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
540 	uint64_t sum;
541 	size_t npages_limit_backlog;
542 	unsigned i;
543 
544 	/*
545 	 * For each element of decay_backlog, multiply by the corresponding
546 	 * fixed-point smoothstep decay factor.  Sum the products, then divide
547 	 * to round down to the nearest whole number of pages.
548 	 */
549 	sum = 0;
550 	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
551 		sum += decay->backlog[i] * h_steps[i];
552 	}
553 	npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
554 
555 	return npages_limit_backlog;
556 }
557 
558 static void
arena_decay_backlog_update_last(arena_decay_t * decay,size_t current_npages)559 arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
560 	size_t npages_delta = (current_npages > decay->nunpurged) ?
561 	    current_npages - decay->nunpurged : 0;
562 	decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
563 
564 	if (config_debug) {
565 		if (current_npages > decay->ceil_npages) {
566 			decay->ceil_npages = current_npages;
567 		}
568 		size_t npages_limit = arena_decay_backlog_npages_limit(decay);
569 		assert(decay->ceil_npages >= npages_limit);
570 		if (decay->ceil_npages > npages_limit) {
571 			decay->ceil_npages = npages_limit;
572 		}
573 	}
574 }
575 
576 static void
arena_decay_backlog_update(arena_decay_t * decay,uint64_t nadvance_u64,size_t current_npages)577 arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
578     size_t current_npages) {
579 	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
580 		memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
581 		    sizeof(size_t));
582 	} else {
583 		size_t nadvance_z = (size_t)nadvance_u64;
584 
585 		assert((uint64_t)nadvance_z == nadvance_u64);
586 
587 		memmove(decay->backlog, &decay->backlog[nadvance_z],
588 		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
589 		if (nadvance_z > 1) {
590 			memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
591 			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
592 		}
593 	}
594 
595 	arena_decay_backlog_update_last(decay, current_npages);
596 }
597 
598 static void
arena_decay_try_purge(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,size_t current_npages,size_t npages_limit,bool is_background_thread)599 arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
600     extents_t *extents, size_t current_npages, size_t npages_limit,
601     bool is_background_thread) {
602 	if (current_npages > npages_limit) {
603 		arena_decay_to_limit(tsdn, arena, decay, extents, false,
604 		    npages_limit, current_npages - npages_limit,
605 		    is_background_thread);
606 	}
607 }
608 
609 static void
arena_decay_epoch_advance_helper(arena_decay_t * decay,const nstime_t * time,size_t current_npages)610 arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
611     size_t current_npages) {
612 	assert(arena_decay_deadline_reached(decay, time));
613 
614 	nstime_t delta;
615 	nstime_copy(&delta, time);
616 	nstime_subtract(&delta, &decay->epoch);
617 
618 	uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
619 	assert(nadvance_u64 > 0);
620 
621 	/* Add nadvance_u64 decay intervals to epoch. */
622 	nstime_copy(&delta, &decay->interval);
623 	nstime_imultiply(&delta, nadvance_u64);
624 	nstime_add(&decay->epoch, &delta);
625 
626 	/* Set a new deadline. */
627 	arena_decay_deadline_init(decay);
628 
629 	/* Update the backlog. */
630 	arena_decay_backlog_update(decay, nadvance_u64, current_npages);
631 }
632 
633 static void
arena_decay_epoch_advance(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,const nstime_t * time,bool is_background_thread)634 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
635     extents_t *extents, const nstime_t *time, bool is_background_thread) {
636 	size_t current_npages = extents_npages_get(extents);
637 	arena_decay_epoch_advance_helper(decay, time, current_npages);
638 
639 	size_t npages_limit = arena_decay_backlog_npages_limit(decay);
640 	/* We may unlock decay->mtx when try_purge(). Finish logging first. */
641 	decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
642 	    current_npages;
643 
644 	if (!background_thread_enabled() || is_background_thread) {
645 		arena_decay_try_purge(tsdn, arena, decay, extents,
646 		    current_npages, npages_limit, is_background_thread);
647 	}
648 }
649 
650 static void
arena_decay_reinit(arena_decay_t * decay,ssize_t decay_ms)651 arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
652 	arena_decay_ms_write(decay, decay_ms);
653 	if (decay_ms > 0) {
654 		nstime_init(&decay->interval, (uint64_t)decay_ms *
655 		    KQU(1000000));
656 		nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
657 	}
658 
659 	nstime_init(&decay->epoch, 0);
660 	nstime_update(&decay->epoch);
661 	decay->jitter_state = (uint64_t)(uintptr_t)decay;
662 	arena_decay_deadline_init(decay);
663 	decay->nunpurged = 0;
664 	memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
665 }
666 
667 static bool
arena_decay_init(arena_decay_t * decay,ssize_t decay_ms,arena_stats_decay_t * stats)668 arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
669     arena_stats_decay_t *stats) {
670 	if (config_debug) {
671 		for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
672 			assert(((char *)decay)[i] == 0);
673 		}
674 		decay->ceil_npages = 0;
675 	}
676 	if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
677 	    malloc_mutex_rank_exclusive)) {
678 		return true;
679 	}
680 	decay->purging = false;
681 	arena_decay_reinit(decay, decay_ms);
682 	/* Memory is zeroed, so there is no need to clear stats. */
683 	if (config_stats) {
684 		decay->stats = stats;
685 	}
686 	return false;
687 }
688 
689 static bool
arena_decay_ms_valid(ssize_t decay_ms)690 arena_decay_ms_valid(ssize_t decay_ms) {
691 	if (decay_ms < -1) {
692 		return false;
693 	}
694 	if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
695 	    KQU(1000)) {
696 		return true;
697 	}
698 	return false;
699 }
700 
701 static bool
arena_maybe_decay(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool is_background_thread)702 arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
703     extents_t *extents, bool is_background_thread) {
704 	malloc_mutex_assert_owner(tsdn, &decay->mtx);
705 
706 	/* Purge all or nothing if the option is disabled. */
707 	ssize_t decay_ms = arena_decay_ms_read(decay);
708 	if (decay_ms <= 0) {
709 		if (decay_ms == 0) {
710 			arena_decay_to_limit(tsdn, arena, decay, extents, false,
711 			    0, extents_npages_get(extents),
712 			    is_background_thread);
713 		}
714 		return false;
715 	}
716 
717 	nstime_t time;
718 	nstime_init(&time, 0);
719 	nstime_update(&time);
720 	if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
721 	    > 0)) {
722 		/*
723 		 * Time went backwards.  Move the epoch back in time and
724 		 * generate a new deadline, with the expectation that time
725 		 * typically flows forward for long enough periods of time that
726 		 * epochs complete.  Unfortunately, this strategy is susceptible
727 		 * to clock jitter triggering premature epoch advances, but
728 		 * clock jitter estimation and compensation isn't feasible here
729 		 * because calls into this code are event-driven.
730 		 */
731 		nstime_copy(&decay->epoch, &time);
732 		arena_decay_deadline_init(decay);
733 	} else {
734 		/* Verify that time does not go backwards. */
735 		assert(nstime_compare(&decay->epoch, &time) <= 0);
736 	}
737 
738 	/*
739 	 * If the deadline has been reached, advance to the current epoch and
740 	 * purge to the new limit if necessary.  Note that dirty pages created
741 	 * during the current epoch are not subject to purge until a future
742 	 * epoch, so as a result purging only happens during epoch advances, or
743 	 * being triggered by background threads (scheduled event).
744 	 */
745 	bool advance_epoch = arena_decay_deadline_reached(decay, &time);
746 	if (advance_epoch) {
747 		arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
748 		    is_background_thread);
749 	} else if (is_background_thread) {
750 		arena_decay_try_purge(tsdn, arena, decay, extents,
751 		    extents_npages_get(extents),
752 		    arena_decay_backlog_npages_limit(decay),
753 		    is_background_thread);
754 	}
755 
756 	return advance_epoch;
757 }
758 
759 static ssize_t
arena_decay_ms_get(arena_decay_t * decay)760 arena_decay_ms_get(arena_decay_t *decay) {
761 	return arena_decay_ms_read(decay);
762 }
763 
764 ssize_t
arena_dirty_decay_ms_get(arena_t * arena)765 arena_dirty_decay_ms_get(arena_t *arena) {
766 	return arena_decay_ms_get(&arena->decay_dirty);
767 }
768 
769 ssize_t
arena_muzzy_decay_ms_get(arena_t * arena)770 arena_muzzy_decay_ms_get(arena_t *arena) {
771 	return arena_decay_ms_get(&arena->decay_muzzy);
772 }
773 
774 static bool
arena_decay_ms_set(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,ssize_t decay_ms)775 arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
776     extents_t *extents, ssize_t decay_ms) {
777 	if (!arena_decay_ms_valid(decay_ms)) {
778 		return true;
779 	}
780 
781 	malloc_mutex_lock(tsdn, &decay->mtx);
782 	/*
783 	 * Restart decay backlog from scratch, which may cause many dirty pages
784 	 * to be immediately purged.  It would conceptually be possible to map
785 	 * the old backlog onto the new backlog, but there is no justification
786 	 * for such complexity since decay_ms changes are intended to be
787 	 * infrequent, either between the {-1, 0, >0} states, or a one-time
788 	 * arbitrary change during initial arena configuration.
789 	 */
790 	arena_decay_reinit(decay, decay_ms);
791 	arena_maybe_decay(tsdn, arena, decay, extents, false);
792 	malloc_mutex_unlock(tsdn, &decay->mtx);
793 
794 	return false;
795 }
796 
797 bool
arena_dirty_decay_ms_set(tsdn_t * tsdn,arena_t * arena,ssize_t decay_ms)798 arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
799     ssize_t decay_ms) {
800 	return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
801 	    &arena->extents_dirty, decay_ms);
802 }
803 
804 bool
arena_muzzy_decay_ms_set(tsdn_t * tsdn,arena_t * arena,ssize_t decay_ms)805 arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
806     ssize_t decay_ms) {
807 	return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
808 	    &arena->extents_muzzy, decay_ms);
809 }
810 
811 static size_t
arena_stash_decayed(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,size_t npages_limit,size_t npages_decay_max,extent_list_t * decay_extents)812 arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
813     extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
814 	size_t npages_decay_max, extent_list_t *decay_extents) {
815 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
816 	    WITNESS_RANK_CORE, 0);
817 
818 	/* Stash extents according to npages_limit. */
819 	size_t nstashed = 0;
820 	extent_t *extent;
821 	while (nstashed < npages_decay_max &&
822 	    (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
823 	    npages_limit)) != NULL) {
824 		extent_list_append(decay_extents, extent);
825 		nstashed += extent_size_get(extent) >> LG_PAGE;
826 	}
827 	return nstashed;
828 }
829 
830 static size_t
arena_decay_stashed(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,arena_decay_t * decay,extents_t * extents,bool all,extent_list_t * decay_extents,bool is_background_thread)831 arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
832     extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
833     bool all, extent_list_t *decay_extents, bool is_background_thread) {
834 	size_t nmadvise, nunmapped;
835 	size_t npurged;
836 
837 	if (config_stats) {
838 		nmadvise = 0;
839 		nunmapped = 0;
840 	}
841 	npurged = 0;
842 
843 	ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
844 	for (extent_t *extent = extent_list_first(decay_extents); extent !=
845 	    NULL; extent = extent_list_first(decay_extents)) {
846 		if (config_stats) {
847 			nmadvise++;
848 		}
849 		size_t npages = extent_size_get(extent) >> LG_PAGE;
850 		npurged += npages;
851 		extent_list_remove(decay_extents, extent);
852 		switch (extents_state_get(extents)) {
853 		case extent_state_active:
854 			not_reached();
855 		case extent_state_dirty:
856 			if (!all && muzzy_decay_ms != 0 &&
857 			    !extent_purge_lazy_wrapper(tsdn, arena,
858 			    r_extent_hooks, extent, 0,
859 			    extent_size_get(extent))) {
860 				extents_dalloc(tsdn, arena, r_extent_hooks,
861 				    &arena->extents_muzzy, extent);
862 				arena_background_thread_inactivity_check(tsdn,
863 				    arena, is_background_thread);
864 				break;
865 			}
866 			/* Fall through. */
867 		case extent_state_muzzy:
868 			extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
869 			    extent);
870 			if (config_stats) {
871 				nunmapped += npages;
872 			}
873 			break;
874 		case extent_state_retained:
875 		default:
876 			not_reached();
877 		}
878 	}
879 
880 	if (config_stats) {
881 		arena_stats_lock(tsdn, &arena->stats);
882 		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
883 		    1);
884 		arena_stats_add_u64(tsdn, &arena->stats,
885 		    &decay->stats->nmadvise, nmadvise);
886 		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
887 		    npurged);
888 		arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
889 		    nunmapped << LG_PAGE);
890 		arena_stats_unlock(tsdn, &arena->stats);
891 	}
892 
893 	return npurged;
894 }
895 
896 /*
897  * npages_limit: Decay at most npages_decay_max pages without violating the
898  * invariant: (extents_npages_get(extents) >= npages_limit).  We need an upper
899  * bound on number of pages in order to prevent unbounded growth (namely in
900  * stashed), otherwise unbounded new pages could be added to extents during the
901  * current decay run, so that the purging thread never finishes.
902  */
903 static void
arena_decay_to_limit(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool all,size_t npages_limit,size_t npages_decay_max,bool is_background_thread)904 arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
905     extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
906     bool is_background_thread) {
907 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
908 	    WITNESS_RANK_CORE, 1);
909 	malloc_mutex_assert_owner(tsdn, &decay->mtx);
910 
911 	if (decay->purging) {
912 		return;
913 	}
914 	decay->purging = true;
915 	malloc_mutex_unlock(tsdn, &decay->mtx);
916 
917 	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
918 
919 	extent_list_t decay_extents;
920 	extent_list_init(&decay_extents);
921 
922 	size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
923 	    npages_limit, npages_decay_max, &decay_extents);
924 	if (npurge != 0) {
925 		size_t npurged = arena_decay_stashed(tsdn, arena,
926 		    &extent_hooks, decay, extents, all, &decay_extents,
927 		    is_background_thread);
928 		assert(npurged == npurge);
929 	}
930 
931 	malloc_mutex_lock(tsdn, &decay->mtx);
932 	decay->purging = false;
933 }
934 
935 static bool
arena_decay_impl(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool is_background_thread,bool all)936 arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
937     extents_t *extents, bool is_background_thread, bool all) {
938 	if (all) {
939 		malloc_mutex_lock(tsdn, &decay->mtx);
940 		arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
941 		    extents_npages_get(extents), is_background_thread);
942 		malloc_mutex_unlock(tsdn, &decay->mtx);
943 
944 		return false;
945 	}
946 
947 	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
948 		/* No need to wait if another thread is in progress. */
949 		return true;
950 	}
951 
952 	bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
953 	    is_background_thread);
954 	size_t npages_new;
955 	if (epoch_advanced) {
956 		/* Backlog is updated on epoch advance. */
957 		npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
958 	}
959 	malloc_mutex_unlock(tsdn, &decay->mtx);
960 
961 	if (have_background_thread && background_thread_enabled() &&
962 	    epoch_advanced && !is_background_thread) {
963 		background_thread_interval_check(tsdn, arena, decay,
964 		    npages_new);
965 	}
966 
967 	return false;
968 }
969 
970 static bool
arena_decay_dirty(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)971 arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
972     bool all) {
973 	return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
974 	    &arena->extents_dirty, is_background_thread, all);
975 }
976 
977 static bool
arena_decay_muzzy(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)978 arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
979     bool all) {
980 	return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
981 	    &arena->extents_muzzy, is_background_thread, all);
982 }
983 
984 void
arena_decay(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)985 arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
986 	if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
987 		return;
988 	}
989 	arena_decay_muzzy(tsdn, arena, is_background_thread, all);
990 }
991 
992 static void
arena_slab_dalloc(tsdn_t * tsdn,arena_t * arena,extent_t * slab)993 arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
994 	arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
995 
996 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
997 	arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
998 }
999 
1000 static void
arena_bin_slabs_nonfull_insert(bin_t * bin,extent_t * slab)1001 arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
1002 	assert(extent_nfree_get(slab) > 0);
1003 	extent_heap_insert(&bin->slabs_nonfull, slab);
1004 }
1005 
1006 static void
arena_bin_slabs_nonfull_remove(bin_t * bin,extent_t * slab)1007 arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
1008 	extent_heap_remove(&bin->slabs_nonfull, slab);
1009 }
1010 
1011 static extent_t *
arena_bin_slabs_nonfull_tryget(bin_t * bin)1012 arena_bin_slabs_nonfull_tryget(bin_t *bin) {
1013 	extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
1014 	if (slab == NULL) {
1015 		return NULL;
1016 	}
1017 	if (config_stats) {
1018 		bin->stats.reslabs++;
1019 	}
1020 	return slab;
1021 }
1022 
1023 static void
arena_bin_slabs_full_insert(arena_t * arena,bin_t * bin,extent_t * slab)1024 arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
1025 	assert(extent_nfree_get(slab) == 0);
1026 	/*
1027 	 *  Tracking extents is required by arena_reset, which is not allowed
1028 	 *  for auto arenas.  Bypass this step to avoid touching the extent
1029 	 *  linkage (often results in cache misses) for auto arenas.
1030 	 */
1031 	if (arena_is_auto(arena)) {
1032 		return;
1033 	}
1034 	extent_list_append(&bin->slabs_full, slab);
1035 }
1036 
1037 static void
arena_bin_slabs_full_remove(arena_t * arena,bin_t * bin,extent_t * slab)1038 arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
1039 	if (arena_is_auto(arena)) {
1040 		return;
1041 	}
1042 	extent_list_remove(&bin->slabs_full, slab);
1043 }
1044 
1045 static void
arena_bin_reset(tsd_t * tsd,arena_t * arena,bin_t * bin)1046 arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
1047 	extent_t *slab;
1048 
1049 	malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1050 	if (bin->slabcur != NULL) {
1051 		slab = bin->slabcur;
1052 		bin->slabcur = NULL;
1053 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1054 		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1055 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1056 	}
1057 	while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
1058 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1059 		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1060 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1061 	}
1062 	for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
1063 	     slab = extent_list_first(&bin->slabs_full)) {
1064 		arena_bin_slabs_full_remove(arena, bin, slab);
1065 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1066 		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1067 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1068 	}
1069 	if (config_stats) {
1070 		bin->stats.curregs = 0;
1071 		bin->stats.curslabs = 0;
1072 	}
1073 	malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1074 }
1075 
1076 void
arena_reset(tsd_t * tsd,arena_t * arena)1077 arena_reset(tsd_t *tsd, arena_t *arena) {
1078 	/*
1079 	 * Locking in this function is unintuitive.  The caller guarantees that
1080 	 * no concurrent operations are happening in this arena, but there are
1081 	 * still reasons that some locking is necessary:
1082 	 *
1083 	 * - Some of the functions in the transitive closure of calls assume
1084 	 *   appropriate locks are held, and in some cases these locks are
1085 	 *   temporarily dropped to avoid lock order reversal or deadlock due to
1086 	 *   reentry.
1087 	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
1088 	 *   strictly speaking this is a "concurrent operation", disallowing
1089 	 *   stats refreshes would impose an inconvenient burden.
1090 	 */
1091 
1092 	/* Large allocations. */
1093 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1094 
1095 	for (extent_t *extent = extent_list_first(&arena->large); extent !=
1096 	    NULL; extent = extent_list_first(&arena->large)) {
1097 		void *ptr = extent_base_get(extent);
1098 		size_t usize;
1099 
1100 		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1101 		alloc_ctx_t alloc_ctx;
1102 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
1103 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
1104 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
1105 		assert(alloc_ctx.szind != SC_NSIZES);
1106 
1107 		if (config_stats || (config_prof && opt_prof)) {
1108 			usize = sz_index2size(alloc_ctx.szind);
1109 			assert(usize == isalloc(tsd_tsdn(tsd), ptr));
1110 		}
1111 		/* Remove large allocation from prof sample set. */
1112 		if (config_prof && opt_prof) {
1113 			prof_free(tsd, ptr, usize, &alloc_ctx);
1114 		}
1115 		large_dalloc(tsd_tsdn(tsd), extent);
1116 		malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1117 	}
1118 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1119 
1120 	/* Bins. */
1121 	for (unsigned i = 0; i < SC_NBINS; i++) {
1122 		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
1123 			arena_bin_reset(tsd, arena,
1124 			    &arena->bins[i].bin_shards[j]);
1125 		}
1126 	}
1127 
1128 	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1129 }
1130 
1131 static void
arena_destroy_retained(tsdn_t * tsdn,arena_t * arena)1132 arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
1133 	/*
1134 	 * Iterate over the retained extents and destroy them.  This gives the
1135 	 * extent allocator underlying the extent hooks an opportunity to unmap
1136 	 * all retained memory without having to keep its own metadata
1137 	 * structures.  In practice, virtual memory for dss-allocated extents is
1138 	 * leaked here, so best practice is to avoid dss for arenas to be
1139 	 * destroyed, or provide custom extent hooks that track retained
1140 	 * dss-based extents for later reuse.
1141 	 */
1142 	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1143 	extent_t *extent;
1144 	while ((extent = extents_evict(tsdn, arena, &extent_hooks,
1145 	    &arena->extents_retained, 0)) != NULL) {
1146 		extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
1147 	}
1148 }
1149 
1150 void
arena_destroy(tsd_t * tsd,arena_t * arena)1151 arena_destroy(tsd_t *tsd, arena_t *arena) {
1152 	assert(base_ind_get(arena->base) >= narenas_auto);
1153 	assert(arena_nthreads_get(arena, false) == 0);
1154 	assert(arena_nthreads_get(arena, true) == 0);
1155 
1156 	/*
1157 	 * No allocations have occurred since arena_reset() was called.
1158 	 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
1159 	 * extents, so only retained extents may remain.
1160 	 */
1161 	assert(extents_npages_get(&arena->extents_dirty) == 0);
1162 	assert(extents_npages_get(&arena->extents_muzzy) == 0);
1163 
1164 	/* Deallocate retained memory. */
1165 	arena_destroy_retained(tsd_tsdn(tsd), arena);
1166 
1167 	/*
1168 	 * Remove the arena pointer from the arenas array.  We rely on the fact
1169 	 * that there is no way for the application to get a dirty read from the
1170 	 * arenas array unless there is an inherent race in the application
1171 	 * involving access of an arena being concurrently destroyed.  The
1172 	 * application must synchronize knowledge of the arena's validity, so as
1173 	 * long as we use an atomic write to update the arenas array, the
1174 	 * application will get a clean read any time after it synchronizes
1175 	 * knowledge that the arena is no longer valid.
1176 	 */
1177 	arena_set(base_ind_get(arena->base), NULL);
1178 
1179 	/*
1180 	 * Destroy the base allocator, which manages all metadata ever mapped by
1181 	 * this arena.
1182 	 */
1183 	base_delete(tsd_tsdn(tsd), arena->base);
1184 }
1185 
1186 static extent_t *
arena_slab_alloc_hard(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,const bin_info_t * bin_info,szind_t szind)1187 arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
1188     extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
1189     szind_t szind) {
1190 	extent_t *slab;
1191 	bool zero, commit;
1192 
1193 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1194 	    WITNESS_RANK_CORE, 0);
1195 
1196 	zero = false;
1197 	commit = true;
1198 	slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
1199 	    bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
1200 
1201 	if (config_stats && slab != NULL) {
1202 		arena_stats_mapped_add(tsdn, &arena->stats,
1203 		    bin_info->slab_size);
1204 	}
1205 
1206 	return slab;
1207 }
1208 
1209 static extent_t *
arena_slab_alloc(tsdn_t * tsdn,arena_t * arena,szind_t binind,unsigned binshard,const bin_info_t * bin_info)1210 arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
1211     const bin_info_t *bin_info) {
1212 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1213 	    WITNESS_RANK_CORE, 0);
1214 
1215 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1216 	szind_t szind = sz_size2index(bin_info->reg_size);
1217 	bool zero = false;
1218 	bool commit = true;
1219 	extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
1220 	    &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
1221 	    binind, &zero, &commit);
1222 	if (slab == NULL && arena_may_have_muzzy(arena)) {
1223 		slab = extents_alloc(tsdn, arena, &extent_hooks,
1224 		    &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
1225 		    true, binind, &zero, &commit);
1226 	}
1227 	if (slab == NULL) {
1228 		slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
1229 		    bin_info, szind);
1230 		if (slab == NULL) {
1231 			return NULL;
1232 		}
1233 	}
1234 	assert(extent_slab_get(slab));
1235 
1236 	/* Initialize slab internals. */
1237 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1238 	extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
1239 	bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
1240 
1241 	arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
1242 
1243 	return slab;
1244 }
1245 
1246 static extent_t *
arena_bin_nonfull_slab_get(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind,unsigned binshard)1247 arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1248     szind_t binind, unsigned binshard) {
1249 	extent_t *slab;
1250 	const bin_info_t *bin_info;
1251 
1252 	/* Look for a usable slab. */
1253 	slab = arena_bin_slabs_nonfull_tryget(bin);
1254 	if (slab != NULL) {
1255 		return slab;
1256 	}
1257 	/* No existing slabs have any space available. */
1258 
1259 	bin_info = &bin_infos[binind];
1260 
1261 	/* Allocate a new slab. */
1262 	malloc_mutex_unlock(tsdn, &bin->lock);
1263 	/******************************/
1264 	slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
1265 	/********************************/
1266 	malloc_mutex_lock(tsdn, &bin->lock);
1267 	if (slab != NULL) {
1268 		if (config_stats) {
1269 			bin->stats.nslabs++;
1270 			bin->stats.curslabs++;
1271 		}
1272 		return slab;
1273 	}
1274 
1275 	/*
1276 	 * arena_slab_alloc() failed, but another thread may have made
1277 	 * sufficient memory available while this one dropped bin->lock above,
1278 	 * so search one more time.
1279 	 */
1280 	slab = arena_bin_slabs_nonfull_tryget(bin);
1281 	if (slab != NULL) {
1282 		return slab;
1283 	}
1284 
1285 	return NULL;
1286 }
1287 
1288 /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
1289 static void *
arena_bin_malloc_hard(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind,unsigned binshard)1290 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1291     szind_t binind, unsigned binshard) {
1292 	const bin_info_t *bin_info;
1293 	extent_t *slab;
1294 
1295 	bin_info = &bin_infos[binind];
1296 	if (!arena_is_auto(arena) && bin->slabcur != NULL) {
1297 		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1298 		bin->slabcur = NULL;
1299 	}
1300 	slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
1301 	if (bin->slabcur != NULL) {
1302 		/*
1303 		 * Another thread updated slabcur while this one ran without the
1304 		 * bin lock in arena_bin_nonfull_slab_get().
1305 		 */
1306 		if (extent_nfree_get(bin->slabcur) > 0) {
1307 			void *ret = arena_slab_reg_alloc(bin->slabcur,
1308 			    bin_info);
1309 			if (slab != NULL) {
1310 				/*
1311 				 * arena_slab_alloc() may have allocated slab,
1312 				 * or it may have been pulled from
1313 				 * slabs_nonfull.  Therefore it is unsafe to
1314 				 * make any assumptions about how slab has
1315 				 * previously been used, and
1316 				 * arena_bin_lower_slab() must be called, as if
1317 				 * a region were just deallocated from the slab.
1318 				 */
1319 				if (extent_nfree_get(slab) == bin_info->nregs) {
1320 					arena_dalloc_bin_slab(tsdn, arena, slab,
1321 					    bin);
1322 				} else {
1323 					arena_bin_lower_slab(tsdn, arena, slab,
1324 					    bin);
1325 				}
1326 			}
1327 			return ret;
1328 		}
1329 
1330 		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1331 		bin->slabcur = NULL;
1332 	}
1333 
1334 	if (slab == NULL) {
1335 		return NULL;
1336 	}
1337 	bin->slabcur = slab;
1338 
1339 	assert(extent_nfree_get(bin->slabcur) > 0);
1340 
1341 	return arena_slab_reg_alloc(slab, bin_info);
1342 }
1343 
1344 /* Choose a bin shard and return the locked bin. */
1345 bin_t *
arena_bin_choose_lock(tsdn_t * tsdn,arena_t * arena,szind_t binind,unsigned * binshard)1346 arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1347     unsigned *binshard) {
1348 	bin_t *bin;
1349 	if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
1350 		*binshard = 0;
1351 	} else {
1352 		*binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
1353 	}
1354 	assert(*binshard < bin_infos[binind].n_shards);
1355 	bin = &arena->bins[binind].bin_shards[*binshard];
1356 	malloc_mutex_lock(tsdn, &bin->lock);
1357 
1358 	return bin;
1359 }
1360 
1361 void
arena_tcache_fill_small(tsdn_t * tsdn,arena_t * arena,tcache_t * tcache,cache_bin_t * tbin,szind_t binind,uint64_t prof_accumbytes)1362 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
1363     cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
1364 	unsigned i, nfill, cnt;
1365 
1366 	assert(tbin->ncached == 0);
1367 
1368 	if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
1369 		prof_idump(tsdn);
1370 	}
1371 
1372 	unsigned binshard;
1373 	bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
1374 
1375 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1376 	    tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
1377 		extent_t *slab;
1378 		if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
1379 		    0) {
1380 			unsigned tofill = nfill - i;
1381 			cnt = tofill < extent_nfree_get(slab) ?
1382 				tofill : extent_nfree_get(slab);
1383 			arena_slab_reg_alloc_batch(
1384 			   slab, &bin_infos[binind], cnt,
1385 			   tbin->avail - nfill + i);
1386 		} else {
1387 			cnt = 1;
1388 			void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
1389 			    binind, binshard);
1390 			/*
1391 			 * OOM.  tbin->avail isn't yet filled down to its first
1392 			 * element, so the successful allocations (if any) must
1393 			 * be moved just before tbin->avail before bailing out.
1394 			 */
1395 			if (ptr == NULL) {
1396 				if (i > 0) {
1397 					memmove(tbin->avail - i,
1398 						tbin->avail - nfill,
1399 						i * sizeof(void *));
1400 				}
1401 				break;
1402 			}
1403 			/* Insert such that low regions get used first. */
1404 			*(tbin->avail - nfill + i) = ptr;
1405 		}
1406 		if (config_fill && unlikely(opt_junk_alloc)) {
1407 			for (unsigned j = 0; j < cnt; j++) {
1408 				void* ptr = *(tbin->avail - nfill + i + j);
1409 				arena_alloc_junk_small(ptr, &bin_infos[binind],
1410 							true);
1411 			}
1412 		}
1413 	}
1414 	if (config_stats) {
1415 		bin->stats.nmalloc += i;
1416 		bin->stats.nrequests += tbin->tstats.nrequests;
1417 		bin->stats.curregs += i;
1418 		bin->stats.nfills++;
1419 		tbin->tstats.nrequests = 0;
1420 	}
1421 	malloc_mutex_unlock(tsdn, &bin->lock);
1422 	tbin->ncached = i;
1423 	arena_decay_tick(tsdn, arena);
1424 }
1425 
1426 void
arena_alloc_junk_small(void * ptr,const bin_info_t * bin_info,bool zero)1427 arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
1428 	if (!zero) {
1429 		memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
1430 	}
1431 }
1432 
1433 static void
arena_dalloc_junk_small_impl(void * ptr,const bin_info_t * bin_info)1434 arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
1435 	memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
1436 }
1437 arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
1438     arena_dalloc_junk_small_impl;
1439 
1440 static void *
arena_malloc_small(tsdn_t * tsdn,arena_t * arena,szind_t binind,bool zero)1441 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
1442 	void *ret;
1443 	bin_t *bin;
1444 	size_t usize;
1445 	extent_t *slab;
1446 
1447 	assert(binind < SC_NBINS);
1448 	usize = sz_index2size(binind);
1449 	unsigned binshard;
1450 	bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
1451 
1452 	if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
1453 		ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1454 	} else {
1455 		ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
1456 	}
1457 
1458 	if (ret == NULL) {
1459 		malloc_mutex_unlock(tsdn, &bin->lock);
1460 		return NULL;
1461 	}
1462 
1463 	if (config_stats) {
1464 		bin->stats.nmalloc++;
1465 		bin->stats.nrequests++;
1466 		bin->stats.curregs++;
1467 	}
1468 	malloc_mutex_unlock(tsdn, &bin->lock);
1469 	if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
1470 		prof_idump(tsdn);
1471 	}
1472 
1473 	if (!zero) {
1474 		if (config_fill) {
1475 			if (unlikely(opt_junk_alloc)) {
1476 				arena_alloc_junk_small(ret,
1477 				    &bin_infos[binind], false);
1478 			} else if (unlikely(opt_zero)) {
1479 				memset(ret, 0, usize);
1480 			}
1481 		}
1482 	} else {
1483 		if (config_fill && unlikely(opt_junk_alloc)) {
1484 			arena_alloc_junk_small(ret, &bin_infos[binind],
1485 			    true);
1486 		}
1487 		memset(ret, 0, usize);
1488 	}
1489 
1490 	arena_decay_tick(tsdn, arena);
1491 	return ret;
1492 }
1493 
1494 void *
arena_malloc_hard(tsdn_t * tsdn,arena_t * arena,size_t size,szind_t ind,bool zero)1495 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1496     bool zero) {
1497 	assert(!tsdn_null(tsdn) || arena != NULL);
1498 
1499 	if (likely(!tsdn_null(tsdn))) {
1500 		arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
1501 	}
1502 	if (unlikely(arena == NULL)) {
1503 		return NULL;
1504 	}
1505 
1506 	if (likely(size <= SC_SMALL_MAXCLASS)) {
1507 		return arena_malloc_small(tsdn, arena, ind, zero);
1508 	}
1509 	return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1510 }
1511 
1512 void *
arena_palloc(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)1513 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1514     bool zero, tcache_t *tcache) {
1515 	void *ret;
1516 
1517 	if (usize <= SC_SMALL_MAXCLASS
1518 	    && (alignment < PAGE
1519 	    || (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
1520 		/* Small; alignment doesn't require special slab placement. */
1521 		ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1522 		    zero, tcache, true);
1523 	} else {
1524 		if (likely(alignment <= CACHELINE)) {
1525 			ret = large_malloc(tsdn, arena, usize, zero);
1526 		} else {
1527 			ret = large_palloc(tsdn, arena, usize, alignment, zero);
1528 		}
1529 	}
1530 	return ret;
1531 }
1532 
1533 void
arena_prof_promote(tsdn_t * tsdn,const void * ptr,size_t usize)1534 arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
1535 	cassert(config_prof);
1536 	assert(ptr != NULL);
1537 	assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
1538 	assert(usize <= SC_SMALL_MAXCLASS);
1539 
1540 	rtree_ctx_t rtree_ctx_fallback;
1541 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1542 
1543 	extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1544 	    (uintptr_t)ptr, true);
1545 	arena_t *arena = extent_arena_get(extent);
1546 
1547 	szind_t szind = sz_size2index(usize);
1548 	extent_szind_set(extent, szind);
1549 	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1550 	    szind, false);
1551 
1552 	prof_accum_cancel(tsdn, &arena->prof_accum, usize);
1553 
1554 	assert(isalloc(tsdn, ptr) == usize);
1555 }
1556 
1557 static size_t
arena_prof_demote(tsdn_t * tsdn,extent_t * extent,const void * ptr)1558 arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
1559 	cassert(config_prof);
1560 	assert(ptr != NULL);
1561 
1562 	extent_szind_set(extent, SC_NBINS);
1563 	rtree_ctx_t rtree_ctx_fallback;
1564 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1565 	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1566 	    SC_NBINS, false);
1567 
1568 	assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
1569 
1570 	return SC_LARGE_MINCLASS;
1571 }
1572 
1573 void
arena_dalloc_promoted(tsdn_t * tsdn,void * ptr,tcache_t * tcache,bool slow_path)1574 arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1575     bool slow_path) {
1576 	cassert(config_prof);
1577 	assert(opt_prof);
1578 
1579 	extent_t *extent = iealloc(tsdn, ptr);
1580 	size_t usize = arena_prof_demote(tsdn, extent, ptr);
1581 	if (usize <= tcache_maxclass) {
1582 		tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1583 		    sz_size2index(usize), slow_path);
1584 	} else {
1585 		large_dalloc(tsdn, extent);
1586 	}
1587 }
1588 
1589 static void
arena_dissociate_bin_slab(arena_t * arena,extent_t * slab,bin_t * bin)1590 arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
1591 	/* Dissociate slab from bin. */
1592 	if (slab == bin->slabcur) {
1593 		bin->slabcur = NULL;
1594 	} else {
1595 		szind_t binind = extent_szind_get(slab);
1596 		const bin_info_t *bin_info = &bin_infos[binind];
1597 
1598 		/*
1599 		 * The following block's conditional is necessary because if the
1600 		 * slab only contains one region, then it never gets inserted
1601 		 * into the non-full slabs heap.
1602 		 */
1603 		if (bin_info->nregs == 1) {
1604 			arena_bin_slabs_full_remove(arena, bin, slab);
1605 		} else {
1606 			arena_bin_slabs_nonfull_remove(bin, slab);
1607 		}
1608 	}
1609 }
1610 
1611 static void
arena_dalloc_bin_slab(tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin)1612 arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1613     bin_t *bin) {
1614 	assert(slab != bin->slabcur);
1615 
1616 	malloc_mutex_unlock(tsdn, &bin->lock);
1617 	/******************************/
1618 	arena_slab_dalloc(tsdn, arena, slab);
1619 	/****************************/
1620 	malloc_mutex_lock(tsdn, &bin->lock);
1621 	if (config_stats) {
1622 		bin->stats.curslabs--;
1623 	}
1624 }
1625 
1626 static void
arena_bin_lower_slab(tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin)1627 arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1628     bin_t *bin) {
1629 	assert(extent_nfree_get(slab) > 0);
1630 
1631 	/*
1632 	 * Make sure that if bin->slabcur is non-NULL, it refers to the
1633 	 * oldest/lowest non-full slab.  It is okay to NULL slabcur out rather
1634 	 * than proactively keeping it pointing at the oldest/lowest non-full
1635 	 * slab.
1636 	 */
1637 	if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
1638 		/* Switch slabcur. */
1639 		if (extent_nfree_get(bin->slabcur) > 0) {
1640 			arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1641 		} else {
1642 			arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1643 		}
1644 		bin->slabcur = slab;
1645 		if (config_stats) {
1646 			bin->stats.reslabs++;
1647 		}
1648 	} else {
1649 		arena_bin_slabs_nonfull_insert(bin, slab);
1650 	}
1651 }
1652 
1653 static void
arena_dalloc_bin_locked_impl(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind,extent_t * slab,void * ptr,bool junked)1654 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1655     szind_t binind, extent_t *slab, void *ptr, bool junked) {
1656 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1657 	const bin_info_t *bin_info = &bin_infos[binind];
1658 
1659 	if (!junked && config_fill && unlikely(opt_junk_free)) {
1660 		arena_dalloc_junk_small(ptr, bin_info);
1661 	}
1662 
1663 	arena_slab_reg_dalloc(slab, slab_data, ptr);
1664 	unsigned nfree = extent_nfree_get(slab);
1665 	if (nfree == bin_info->nregs) {
1666 		arena_dissociate_bin_slab(arena, slab, bin);
1667 		arena_dalloc_bin_slab(tsdn, arena, slab, bin);
1668 	} else if (nfree == 1 && slab != bin->slabcur) {
1669 		arena_bin_slabs_full_remove(arena, bin, slab);
1670 		arena_bin_lower_slab(tsdn, arena, slab, bin);
1671 	}
1672 
1673 	if (config_stats) {
1674 		bin->stats.ndalloc++;
1675 		bin->stats.curregs--;
1676 	}
1677 }
1678 
1679 void
arena_dalloc_bin_junked_locked(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind,extent_t * extent,void * ptr)1680 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1681     szind_t binind, extent_t *extent, void *ptr) {
1682 	arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
1683 	    true);
1684 }
1685 
1686 static void
arena_dalloc_bin(tsdn_t * tsdn,arena_t * arena,extent_t * extent,void * ptr)1687 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
1688 	szind_t binind = extent_szind_get(extent);
1689 	unsigned binshard = extent_binshard_get(extent);
1690 	bin_t *bin = &arena->bins[binind].bin_shards[binshard];
1691 
1692 	malloc_mutex_lock(tsdn, &bin->lock);
1693 	arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
1694 	    false);
1695 	malloc_mutex_unlock(tsdn, &bin->lock);
1696 }
1697 
1698 void
arena_dalloc_small(tsdn_t * tsdn,void * ptr)1699 arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1700 	extent_t *extent = iealloc(tsdn, ptr);
1701 	arena_t *arena = extent_arena_get(extent);
1702 
1703 	arena_dalloc_bin(tsdn, arena, extent, ptr);
1704 	arena_decay_tick(tsdn, arena);
1705 }
1706 
1707 bool
arena_ralloc_no_move(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t size,size_t extra,bool zero,size_t * newsize)1708 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1709     size_t extra, bool zero, size_t *newsize) {
1710 	bool ret;
1711 	/* Calls with non-zero extra had to clamp extra. */
1712 	assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
1713 
1714 	extent_t *extent = iealloc(tsdn, ptr);
1715 	if (unlikely(size > SC_LARGE_MAXCLASS)) {
1716 		ret = true;
1717 		goto done;
1718 	}
1719 
1720 	size_t usize_min = sz_s2u(size);
1721 	size_t usize_max = sz_s2u(size + extra);
1722 	if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
1723 	    <= SC_SMALL_MAXCLASS)) {
1724 		/*
1725 		 * Avoid moving the allocation if the size class can be left the
1726 		 * same.
1727 		 */
1728 		assert(bin_infos[sz_size2index(oldsize)].reg_size ==
1729 		    oldsize);
1730 		if ((usize_max > SC_SMALL_MAXCLASS
1731 		    || sz_size2index(usize_max) != sz_size2index(oldsize))
1732 		    && (size > oldsize || usize_max < oldsize)) {
1733 			ret = true;
1734 			goto done;
1735 		}
1736 
1737 		arena_decay_tick(tsdn, extent_arena_get(extent));
1738 		ret = false;
1739 	} else if (oldsize >= SC_LARGE_MINCLASS
1740 	    && usize_max >= SC_LARGE_MINCLASS) {
1741 		ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
1742 		    zero);
1743 	} else {
1744 		ret = true;
1745 	}
1746 done:
1747 	assert(extent == iealloc(tsdn, ptr));
1748 	*newsize = extent_usize_get(extent);
1749 
1750 	return ret;
1751 }
1752 
1753 static void *
arena_ralloc_move_helper(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)1754 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1755     size_t alignment, bool zero, tcache_t *tcache) {
1756 	if (alignment == 0) {
1757 		return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1758 		    zero, tcache, true);
1759 	}
1760 	usize = sz_sa2u(usize, alignment);
1761 	if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
1762 		return NULL;
1763 	}
1764 	return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1765 }
1766 
1767 void *
arena_ralloc(tsdn_t * tsdn,arena_t * arena,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero,tcache_t * tcache,hook_ralloc_args_t * hook_args)1768 arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1769     size_t size, size_t alignment, bool zero, tcache_t *tcache,
1770     hook_ralloc_args_t *hook_args) {
1771 	size_t usize = sz_s2u(size);
1772 	if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
1773 		return NULL;
1774 	}
1775 
1776 	if (likely(usize <= SC_SMALL_MAXCLASS)) {
1777 		/* Try to avoid moving the allocation. */
1778 		UNUSED size_t newsize;
1779 		if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
1780 		    &newsize)) {
1781 			hook_invoke_expand(hook_args->is_realloc
1782 			    ? hook_expand_realloc : hook_expand_rallocx,
1783 			    ptr, oldsize, usize, (uintptr_t)ptr,
1784 			    hook_args->args);
1785 			return ptr;
1786 		}
1787 	}
1788 
1789 	if (oldsize >= SC_LARGE_MINCLASS
1790 	    && usize >= SC_LARGE_MINCLASS) {
1791 		return large_ralloc(tsdn, arena, ptr, usize,
1792 		    alignment, zero, tcache, hook_args);
1793 	}
1794 
1795 	/*
1796 	 * size and oldsize are different enough that we need to move the
1797 	 * object.  In that case, fall back to allocating new space and copying.
1798 	 */
1799 	void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1800 	    zero, tcache);
1801 	if (ret == NULL) {
1802 		return NULL;
1803 	}
1804 
1805 	hook_invoke_alloc(hook_args->is_realloc
1806 	    ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
1807 	    hook_args->args);
1808 	hook_invoke_dalloc(hook_args->is_realloc
1809 	    ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
1810 
1811 	/*
1812 	 * Junk/zero-filling were already done by
1813 	 * ipalloc()/arena_malloc().
1814 	 */
1815 	size_t copysize = (usize < oldsize) ? usize : oldsize;
1816 	memcpy(ret, ptr, copysize);
1817 	isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
1818 	return ret;
1819 }
1820 
1821 dss_prec_t
arena_dss_prec_get(arena_t * arena)1822 arena_dss_prec_get(arena_t *arena) {
1823 	return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1824 }
1825 
1826 bool
arena_dss_prec_set(arena_t * arena,dss_prec_t dss_prec)1827 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1828 	if (!have_dss) {
1829 		return (dss_prec != dss_prec_disabled);
1830 	}
1831 	atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1832 	return false;
1833 }
1834 
1835 ssize_t
arena_dirty_decay_ms_default_get(void)1836 arena_dirty_decay_ms_default_get(void) {
1837 	return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
1838 }
1839 
1840 bool
arena_dirty_decay_ms_default_set(ssize_t decay_ms)1841 arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1842 	if (!arena_decay_ms_valid(decay_ms)) {
1843 		return true;
1844 	}
1845 	atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1846 	return false;
1847 }
1848 
1849 ssize_t
arena_muzzy_decay_ms_default_get(void)1850 arena_muzzy_decay_ms_default_get(void) {
1851 	return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
1852 }
1853 
1854 bool
arena_muzzy_decay_ms_default_set(ssize_t decay_ms)1855 arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1856 	if (!arena_decay_ms_valid(decay_ms)) {
1857 		return true;
1858 	}
1859 	atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1860 	return false;
1861 }
1862 
1863 bool
arena_retain_grow_limit_get_set(tsd_t * tsd,arena_t * arena,size_t * old_limit,size_t * new_limit)1864 arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
1865     size_t *new_limit) {
1866 	assert(opt_retain);
1867 
1868 	pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
1869 	if (new_limit != NULL) {
1870 		size_t limit = *new_limit;
1871 		/* Grow no more than the new limit. */
1872 		if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
1873 			return true;
1874 		}
1875 	}
1876 
1877 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1878 	if (old_limit != NULL) {
1879 		*old_limit = sz_pind2sz(arena->retain_grow_limit);
1880 	}
1881 	if (new_limit != NULL) {
1882 		arena->retain_grow_limit = new_ind;
1883 	}
1884 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1885 
1886 	return false;
1887 }
1888 
1889 unsigned
arena_nthreads_get(arena_t * arena,bool internal)1890 arena_nthreads_get(arena_t *arena, bool internal) {
1891 	return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1892 }
1893 
1894 void
arena_nthreads_inc(arena_t * arena,bool internal)1895 arena_nthreads_inc(arena_t *arena, bool internal) {
1896 	atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1897 }
1898 
1899 void
arena_nthreads_dec(arena_t * arena,bool internal)1900 arena_nthreads_dec(arena_t *arena, bool internal) {
1901 	atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1902 }
1903 
1904 size_t
arena_extent_sn_next(arena_t * arena)1905 arena_extent_sn_next(arena_t *arena) {
1906 	return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
1907 }
1908 
1909 arena_t *
arena_new(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)1910 arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
1911 	arena_t *arena;
1912 	base_t *base;
1913 	unsigned i;
1914 
1915 	if (ind == 0) {
1916 		base = b0get();
1917 	} else {
1918 		base = base_new(tsdn, ind, extent_hooks);
1919 		if (base == NULL) {
1920 			return NULL;
1921 		}
1922 	}
1923 
1924 	unsigned nbins_total = 0;
1925 	for (i = 0; i < SC_NBINS; i++) {
1926 		nbins_total += bin_infos[i].n_shards;
1927 	}
1928 	size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
1929 	arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
1930 	if (arena == NULL) {
1931 		goto label_error;
1932 	}
1933 
1934 	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1935 	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1936 	arena->last_thd = NULL;
1937 
1938 	if (config_stats) {
1939 		if (arena_stats_init(tsdn, &arena->stats)) {
1940 			goto label_error;
1941 		}
1942 
1943 		ql_new(&arena->tcache_ql);
1944 		ql_new(&arena->cache_bin_array_descriptor_ql);
1945 		if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1946 		    WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
1947 			goto label_error;
1948 		}
1949 	}
1950 
1951 	if (config_prof) {
1952 		if (prof_accum_init(tsdn, &arena->prof_accum)) {
1953 			goto label_error;
1954 		}
1955 	}
1956 
1957 	if (config_cache_oblivious) {
1958 		/*
1959 		 * A nondeterministic seed based on the address of arena reduces
1960 		 * the likelihood of lockstep non-uniform cache index
1961 		 * utilization among identical concurrent processes, but at the
1962 		 * cost of test repeatability.  For debug builds, instead use a
1963 		 * deterministic seed.
1964 		 */
1965 		atomic_store_zu(&arena->offset_state, config_debug ? ind :
1966 		    (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
1967 	}
1968 
1969 	atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
1970 
1971 	atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
1972 	    ATOMIC_RELAXED);
1973 
1974 	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1975 
1976 	extent_list_init(&arena->large);
1977 	if (malloc_mutex_init(&arena->large_mtx, "arena_large",
1978 	    WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
1979 		goto label_error;
1980 	}
1981 
1982 	/*
1983 	 * Delay coalescing for dirty extents despite the disruptive effect on
1984 	 * memory layout for best-fit extent allocation, since cached extents
1985 	 * are likely to be reused soon after deallocation, and the cost of
1986 	 * merging/splitting extents is non-trivial.
1987 	 */
1988 	if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
1989 	    true)) {
1990 		goto label_error;
1991 	}
1992 	/*
1993 	 * Coalesce muzzy extents immediately, because operations on them are in
1994 	 * the critical path much less often than for dirty extents.
1995 	 */
1996 	if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
1997 	    false)) {
1998 		goto label_error;
1999 	}
2000 	/*
2001 	 * Coalesce retained extents immediately, in part because they will
2002 	 * never be evicted (and therefore there's no opportunity for delayed
2003 	 * coalescing), but also because operations on retained extents are not
2004 	 * in the critical path.
2005 	 */
2006 	if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
2007 	    false)) {
2008 		goto label_error;
2009 	}
2010 
2011 	if (arena_decay_init(&arena->decay_dirty,
2012 	    arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
2013 		goto label_error;
2014 	}
2015 	if (arena_decay_init(&arena->decay_muzzy,
2016 	    arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
2017 		goto label_error;
2018 	}
2019 
2020 	arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
2021 	arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
2022 	if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
2023 	    WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
2024 		goto label_error;
2025 	}
2026 
2027 	extent_avail_new(&arena->extent_avail);
2028 	if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
2029 	    WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
2030 		goto label_error;
2031 	}
2032 
2033 	/* Initialize bins. */
2034 	uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
2035 	atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
2036 	for (i = 0; i < SC_NBINS; i++) {
2037 		unsigned nshards = bin_infos[i].n_shards;
2038 		arena->bins[i].bin_shards = (bin_t *)bin_addr;
2039 		bin_addr += nshards * sizeof(bin_t);
2040 		for (unsigned j = 0; j < nshards; j++) {
2041 			bool err = bin_init(&arena->bins[i].bin_shards[j]);
2042 			if (err) {
2043 				goto label_error;
2044 			}
2045 		}
2046 	}
2047 	assert(bin_addr == (uintptr_t)arena + arena_size);
2048 
2049 	arena->base = base;
2050 	/* Set arena before creating background threads. */
2051 	arena_set(ind, arena);
2052 
2053 	nstime_init(&arena->create_time, 0);
2054 	nstime_update(&arena->create_time);
2055 
2056 	/* We don't support reentrancy for arena 0 bootstrapping. */
2057 	if (ind != 0) {
2058 		/*
2059 		 * If we're here, then arena 0 already exists, so bootstrapping
2060 		 * is done enough that we should have tsd.
2061 		 */
2062 		assert(!tsdn_null(tsdn));
2063 		pre_reentrancy(tsdn_tsd(tsdn), arena);
2064 		if (test_hooks_arena_new_hook) {
2065 			test_hooks_arena_new_hook();
2066 		}
2067 		post_reentrancy(tsdn_tsd(tsdn));
2068 	}
2069 
2070 	return arena;
2071 label_error:
2072 	if (ind != 0) {
2073 		base_delete(tsdn, base);
2074 	}
2075 	return NULL;
2076 }
2077 
2078 arena_t *
arena_choose_huge(tsd_t * tsd)2079 arena_choose_huge(tsd_t *tsd) {
2080 	/* huge_arena_ind can be 0 during init (will use a0). */
2081 	if (huge_arena_ind == 0) {
2082 		assert(!malloc_initialized());
2083 	}
2084 
2085 	arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
2086 	if (huge_arena == NULL) {
2087 		/* Create the huge arena on demand. */
2088 		assert(huge_arena_ind != 0);
2089 		huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
2090 		if (huge_arena == NULL) {
2091 			return NULL;
2092 		}
2093 		/*
2094 		 * Purge eagerly for huge allocations, because: 1) number of
2095 		 * huge allocations is usually small, which means ticker based
2096 		 * decay is not reliable; and 2) less immediate reuse is
2097 		 * expected for huge allocations.
2098 		 */
2099 		if (arena_dirty_decay_ms_default_get() > 0) {
2100 			arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
2101 		}
2102 		if (arena_muzzy_decay_ms_default_get() > 0) {
2103 			arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
2104 		}
2105 	}
2106 
2107 	return huge_arena;
2108 }
2109 
2110 bool
arena_init_huge(void)2111 arena_init_huge(void) {
2112 	bool huge_enabled;
2113 
2114 	/* The threshold should be large size class. */
2115 	if (opt_oversize_threshold > SC_LARGE_MAXCLASS ||
2116 	    opt_oversize_threshold < SC_LARGE_MINCLASS) {
2117 		opt_oversize_threshold = 0;
2118 		oversize_threshold = SC_LARGE_MAXCLASS + PAGE;
2119 		huge_enabled = false;
2120 	} else {
2121 		/* Reserve the index for the huge arena. */
2122 		huge_arena_ind = narenas_total_get();
2123 		oversize_threshold = opt_oversize_threshold;
2124 		huge_enabled = true;
2125 	}
2126 
2127 	return huge_enabled;
2128 }
2129 
2130 bool
arena_is_huge(unsigned arena_ind)2131 arena_is_huge(unsigned arena_ind) {
2132 	if (huge_arena_ind == 0) {
2133 		return false;
2134 	}
2135 	return (arena_ind == huge_arena_ind);
2136 }
2137 
2138 void
arena_boot(sc_data_t * sc_data)2139 arena_boot(sc_data_t *sc_data) {
2140 	arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
2141 	arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
2142 	for (unsigned i = 0; i < SC_NBINS; i++) {
2143 		sc_t *sc = &sc_data->sc[i];
2144 		div_init(&arena_binind_div_info[i],
2145 		    (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
2146 	}
2147 }
2148 
2149 void
arena_prefork0(tsdn_t * tsdn,arena_t * arena)2150 arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
2151 	malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
2152 	malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
2153 }
2154 
2155 void
arena_prefork1(tsdn_t * tsdn,arena_t * arena)2156 arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
2157 	if (config_stats) {
2158 		malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
2159 	}
2160 }
2161 
2162 void
arena_prefork2(tsdn_t * tsdn,arena_t * arena)2163 arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
2164 	malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
2165 }
2166 
2167 void
arena_prefork3(tsdn_t * tsdn,arena_t * arena)2168 arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
2169 	extents_prefork(tsdn, &arena->extents_dirty);
2170 	extents_prefork(tsdn, &arena->extents_muzzy);
2171 	extents_prefork(tsdn, &arena->extents_retained);
2172 }
2173 
2174 void
arena_prefork4(tsdn_t * tsdn,arena_t * arena)2175 arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
2176 	malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
2177 }
2178 
2179 void
arena_prefork5(tsdn_t * tsdn,arena_t * arena)2180 arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
2181 	base_prefork(tsdn, arena->base);
2182 }
2183 
2184 void
arena_prefork6(tsdn_t * tsdn,arena_t * arena)2185 arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
2186 	malloc_mutex_prefork(tsdn, &arena->large_mtx);
2187 }
2188 
2189 void
arena_prefork7(tsdn_t * tsdn,arena_t * arena)2190 arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
2191 	for (unsigned i = 0; i < SC_NBINS; i++) {
2192 		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2193 			bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
2194 		}
2195 	}
2196 }
2197 
2198 void
arena_postfork_parent(tsdn_t * tsdn,arena_t * arena)2199 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
2200 	unsigned i;
2201 
2202 	for (i = 0; i < SC_NBINS; i++) {
2203 		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2204 			bin_postfork_parent(tsdn,
2205 			    &arena->bins[i].bin_shards[j]);
2206 		}
2207 	}
2208 	malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
2209 	base_postfork_parent(tsdn, arena->base);
2210 	malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
2211 	extents_postfork_parent(tsdn, &arena->extents_dirty);
2212 	extents_postfork_parent(tsdn, &arena->extents_muzzy);
2213 	extents_postfork_parent(tsdn, &arena->extents_retained);
2214 	malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
2215 	malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
2216 	malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
2217 	if (config_stats) {
2218 		malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
2219 	}
2220 }
2221 
2222 void
arena_postfork_child(tsdn_t * tsdn,arena_t * arena)2223 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
2224 	unsigned i;
2225 
2226 	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
2227 	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
2228 	if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
2229 		arena_nthreads_inc(arena, false);
2230 	}
2231 	if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
2232 		arena_nthreads_inc(arena, true);
2233 	}
2234 	if (config_stats) {
2235 		ql_new(&arena->tcache_ql);
2236 		ql_new(&arena->cache_bin_array_descriptor_ql);
2237 		tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
2238 		if (tcache != NULL && tcache->arena == arena) {
2239 			ql_elm_new(tcache, link);
2240 			ql_tail_insert(&arena->tcache_ql, tcache, link);
2241 			cache_bin_array_descriptor_init(
2242 			    &tcache->cache_bin_array_descriptor,
2243 			    tcache->bins_small, tcache->bins_large);
2244 			ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
2245 			    &tcache->cache_bin_array_descriptor, link);
2246 		}
2247 	}
2248 
2249 	for (i = 0; i < SC_NBINS; i++) {
2250 		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2251 			bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
2252 		}
2253 	}
2254 	malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
2255 	base_postfork_child(tsdn, arena->base);
2256 	malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
2257 	extents_postfork_child(tsdn, &arena->extents_dirty);
2258 	extents_postfork_child(tsdn, &arena->extents_muzzy);
2259 	extents_postfork_child(tsdn, &arena->extents_retained);
2260 	malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
2261 	malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
2262 	malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
2263 	if (config_stats) {
2264 		malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
2265 	}
2266 }
2267