1 #define JEMALLOC_CTL_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/ctl.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/nstime.h"
11 #include "jemalloc/internal/sc.h"
12 #include "jemalloc/internal/util.h"
13 
14 /******************************************************************************/
15 /* Data. */
16 
17 /*
18  * ctl_mtx protects the following:
19  * - ctl_stats->*
20  */
21 static malloc_mutex_t	ctl_mtx;
22 static bool		ctl_initialized;
23 static ctl_stats_t	*ctl_stats;
24 static ctl_arenas_t	*ctl_arenas;
25 
26 /******************************************************************************/
27 /* Helpers for named and indexed nodes. */
28 
29 static const ctl_named_node_t *
ctl_named_node(const ctl_node_t * node)30 ctl_named_node(const ctl_node_t *node) {
31 	return ((node->named) ? (const ctl_named_node_t *)node : NULL);
32 }
33 
34 static const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t * node,size_t index)35 ctl_named_children(const ctl_named_node_t *node, size_t index) {
36 	const ctl_named_node_t *children = ctl_named_node(node->children);
37 
38 	return (children ? &children[index] : NULL);
39 }
40 
41 static const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t * node)42 ctl_indexed_node(const ctl_node_t *node) {
43 	return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
44 }
45 
46 /******************************************************************************/
47 /* Function prototypes for non-inline static functions. */
48 
49 #define CTL_PROTO(n)							\
50 static int	n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,	\
51     void *oldp, size_t *oldlenp, void *newp, size_t newlen);
52 
53 #define INDEX_PROTO(n)							\
54 static const ctl_named_node_t	*n##_index(tsdn_t *tsdn,		\
55     const size_t *mib, size_t miblen, size_t i);
56 
57 CTL_PROTO(version)
58 CTL_PROTO(epoch)
59 CTL_PROTO(background_thread)
60 CTL_PROTO(max_background_threads)
61 CTL_PROTO(thread_tcache_enabled)
62 CTL_PROTO(thread_tcache_flush)
63 CTL_PROTO(thread_prof_name)
64 CTL_PROTO(thread_prof_active)
65 CTL_PROTO(thread_arena)
66 CTL_PROTO(thread_allocated)
67 CTL_PROTO(thread_allocatedp)
68 CTL_PROTO(thread_deallocated)
69 CTL_PROTO(thread_deallocatedp)
70 CTL_PROTO(config_cache_oblivious)
71 CTL_PROTO(config_debug)
72 CTL_PROTO(config_fill)
73 CTL_PROTO(config_lazy_lock)
74 CTL_PROTO(config_malloc_conf)
75 CTL_PROTO(config_prof)
76 CTL_PROTO(config_prof_libgcc)
77 CTL_PROTO(config_prof_libunwind)
78 CTL_PROTO(config_stats)
79 CTL_PROTO(config_utrace)
80 CTL_PROTO(config_xmalloc)
81 CTL_PROTO(opt_abort)
82 CTL_PROTO(opt_abort_conf)
83 CTL_PROTO(opt_metadata_thp)
84 CTL_PROTO(opt_retain)
85 CTL_PROTO(opt_dss)
86 CTL_PROTO(opt_narenas)
87 CTL_PROTO(opt_percpu_arena)
88 CTL_PROTO(opt_oversize_threshold)
89 CTL_PROTO(opt_background_thread)
90 CTL_PROTO(opt_max_background_threads)
91 CTL_PROTO(opt_dirty_decay_ms)
92 CTL_PROTO(opt_muzzy_decay_ms)
93 CTL_PROTO(opt_stats_print)
94 CTL_PROTO(opt_stats_print_opts)
95 CTL_PROTO(opt_junk)
96 CTL_PROTO(opt_zero)
97 CTL_PROTO(opt_utrace)
98 CTL_PROTO(opt_xmalloc)
99 CTL_PROTO(opt_tcache)
100 CTL_PROTO(opt_thp)
101 CTL_PROTO(opt_lg_extent_max_active_fit)
102 CTL_PROTO(opt_lg_tcache_max)
103 CTL_PROTO(opt_prof)
104 CTL_PROTO(opt_prof_prefix)
105 CTL_PROTO(opt_prof_active)
106 CTL_PROTO(opt_prof_thread_active_init)
107 CTL_PROTO(opt_lg_prof_sample)
108 CTL_PROTO(opt_lg_prof_interval)
109 CTL_PROTO(opt_prof_gdump)
110 CTL_PROTO(opt_prof_final)
111 CTL_PROTO(opt_prof_leak)
112 CTL_PROTO(opt_prof_accum)
113 CTL_PROTO(tcache_create)
114 CTL_PROTO(tcache_flush)
115 CTL_PROTO(tcache_destroy)
116 CTL_PROTO(arena_i_initialized)
117 CTL_PROTO(arena_i_decay)
118 CTL_PROTO(arena_i_purge)
119 CTL_PROTO(arena_i_reset)
120 CTL_PROTO(arena_i_destroy)
121 CTL_PROTO(arena_i_dss)
122 CTL_PROTO(arena_i_dirty_decay_ms)
123 CTL_PROTO(arena_i_muzzy_decay_ms)
124 CTL_PROTO(arena_i_extent_hooks)
125 CTL_PROTO(arena_i_retain_grow_limit)
126 INDEX_PROTO(arena_i)
127 CTL_PROTO(arenas_bin_i_size)
128 CTL_PROTO(arenas_bin_i_nregs)
129 CTL_PROTO(arenas_bin_i_slab_size)
130 CTL_PROTO(arenas_bin_i_nshards)
131 INDEX_PROTO(arenas_bin_i)
132 CTL_PROTO(arenas_lextent_i_size)
133 INDEX_PROTO(arenas_lextent_i)
134 CTL_PROTO(arenas_narenas)
135 CTL_PROTO(arenas_dirty_decay_ms)
136 CTL_PROTO(arenas_muzzy_decay_ms)
137 CTL_PROTO(arenas_quantum)
138 CTL_PROTO(arenas_page)
139 CTL_PROTO(arenas_tcache_max)
140 CTL_PROTO(arenas_nbins)
141 CTL_PROTO(arenas_nhbins)
142 CTL_PROTO(arenas_nlextents)
143 CTL_PROTO(arenas_create)
144 CTL_PROTO(arenas_lookup)
145 CTL_PROTO(prof_thread_active_init)
146 CTL_PROTO(prof_active)
147 CTL_PROTO(prof_dump)
148 CTL_PROTO(prof_gdump)
149 CTL_PROTO(prof_reset)
150 CTL_PROTO(prof_interval)
151 CTL_PROTO(lg_prof_sample)
152 CTL_PROTO(prof_log_start)
153 CTL_PROTO(prof_log_stop)
154 CTL_PROTO(stats_arenas_i_small_allocated)
155 CTL_PROTO(stats_arenas_i_small_nmalloc)
156 CTL_PROTO(stats_arenas_i_small_ndalloc)
157 CTL_PROTO(stats_arenas_i_small_nrequests)
158 CTL_PROTO(stats_arenas_i_large_allocated)
159 CTL_PROTO(stats_arenas_i_large_nmalloc)
160 CTL_PROTO(stats_arenas_i_large_ndalloc)
161 CTL_PROTO(stats_arenas_i_large_nrequests)
162 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
163 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
164 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
165 CTL_PROTO(stats_arenas_i_bins_j_curregs)
166 CTL_PROTO(stats_arenas_i_bins_j_nfills)
167 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
168 CTL_PROTO(stats_arenas_i_bins_j_nslabs)
169 CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
170 CTL_PROTO(stats_arenas_i_bins_j_curslabs)
171 INDEX_PROTO(stats_arenas_i_bins_j)
172 CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
173 CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
174 CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
175 CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
176 INDEX_PROTO(stats_arenas_i_lextents_j)
177 CTL_PROTO(stats_arenas_i_extents_j_ndirty)
178 CTL_PROTO(stats_arenas_i_extents_j_nmuzzy)
179 CTL_PROTO(stats_arenas_i_extents_j_nretained)
180 CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
181 CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
182 CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
183 INDEX_PROTO(stats_arenas_i_extents_j)
184 CTL_PROTO(stats_arenas_i_nthreads)
185 CTL_PROTO(stats_arenas_i_uptime)
186 CTL_PROTO(stats_arenas_i_dss)
187 CTL_PROTO(stats_arenas_i_dirty_decay_ms)
188 CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
189 CTL_PROTO(stats_arenas_i_pactive)
190 CTL_PROTO(stats_arenas_i_pdirty)
191 CTL_PROTO(stats_arenas_i_pmuzzy)
192 CTL_PROTO(stats_arenas_i_mapped)
193 CTL_PROTO(stats_arenas_i_retained)
194 CTL_PROTO(stats_arenas_i_extent_avail)
195 CTL_PROTO(stats_arenas_i_dirty_npurge)
196 CTL_PROTO(stats_arenas_i_dirty_nmadvise)
197 CTL_PROTO(stats_arenas_i_dirty_purged)
198 CTL_PROTO(stats_arenas_i_muzzy_npurge)
199 CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
200 CTL_PROTO(stats_arenas_i_muzzy_purged)
201 CTL_PROTO(stats_arenas_i_base)
202 CTL_PROTO(stats_arenas_i_internal)
203 CTL_PROTO(stats_arenas_i_metadata_thp)
204 CTL_PROTO(stats_arenas_i_tcache_bytes)
205 CTL_PROTO(stats_arenas_i_resident)
206 INDEX_PROTO(stats_arenas_i)
207 CTL_PROTO(stats_allocated)
208 CTL_PROTO(stats_active)
209 CTL_PROTO(stats_background_thread_num_threads)
210 CTL_PROTO(stats_background_thread_num_runs)
211 CTL_PROTO(stats_background_thread_run_interval)
212 CTL_PROTO(stats_metadata)
213 CTL_PROTO(stats_metadata_thp)
214 CTL_PROTO(stats_resident)
215 CTL_PROTO(stats_mapped)
216 CTL_PROTO(stats_retained)
217 CTL_PROTO(experimental_hooks_install)
218 CTL_PROTO(experimental_hooks_remove)
219 
220 #define MUTEX_STATS_CTL_PROTO_GEN(n)					\
221 CTL_PROTO(stats_##n##_num_ops)						\
222 CTL_PROTO(stats_##n##_num_wait)						\
223 CTL_PROTO(stats_##n##_num_spin_acq)					\
224 CTL_PROTO(stats_##n##_num_owner_switch)					\
225 CTL_PROTO(stats_##n##_total_wait_time)					\
226 CTL_PROTO(stats_##n##_max_wait_time)					\
227 CTL_PROTO(stats_##n##_max_num_thds)
228 
229 /* Global mutexes. */
230 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
231 MUTEX_PROF_GLOBAL_MUTEXES
232 #undef OP
233 
234 /* Per arena mutexes. */
235 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
236 MUTEX_PROF_ARENA_MUTEXES
237 #undef OP
238 
239 /* Arena bin mutexes. */
240 MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
241 #undef MUTEX_STATS_CTL_PROTO_GEN
242 
243 CTL_PROTO(stats_mutexes_reset)
244 
245 /******************************************************************************/
246 /* mallctl tree. */
247 
248 #define NAME(n)	{true},	n
249 #define CHILD(t, c)							\
250 	sizeof(c##_node) / sizeof(ctl_##t##_node_t),			\
251 	(ctl_node_t *)c##_node,						\
252 	NULL
253 #define CTL(c)	0, NULL, c##_ctl
254 
255 /*
256  * Only handles internal indexed nodes, since there are currently no external
257  * ones.
258  */
259 #define INDEX(i)	{false},	i##_index
260 
261 static const ctl_named_node_t	thread_tcache_node[] = {
262 	{NAME("enabled"),	CTL(thread_tcache_enabled)},
263 	{NAME("flush"),		CTL(thread_tcache_flush)}
264 };
265 
266 static const ctl_named_node_t	thread_prof_node[] = {
267 	{NAME("name"),		CTL(thread_prof_name)},
268 	{NAME("active"),	CTL(thread_prof_active)}
269 };
270 
271 static const ctl_named_node_t	thread_node[] = {
272 	{NAME("arena"),		CTL(thread_arena)},
273 	{NAME("allocated"),	CTL(thread_allocated)},
274 	{NAME("allocatedp"),	CTL(thread_allocatedp)},
275 	{NAME("deallocated"),	CTL(thread_deallocated)},
276 	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
277 	{NAME("tcache"),	CHILD(named, thread_tcache)},
278 	{NAME("prof"),		CHILD(named, thread_prof)}
279 };
280 
281 static const ctl_named_node_t	config_node[] = {
282 	{NAME("cache_oblivious"), CTL(config_cache_oblivious)},
283 	{NAME("debug"),		CTL(config_debug)},
284 	{NAME("fill"),		CTL(config_fill)},
285 	{NAME("lazy_lock"),	CTL(config_lazy_lock)},
286 	{NAME("malloc_conf"),	CTL(config_malloc_conf)},
287 	{NAME("prof"),		CTL(config_prof)},
288 	{NAME("prof_libgcc"),	CTL(config_prof_libgcc)},
289 	{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
290 	{NAME("stats"),		CTL(config_stats)},
291 	{NAME("utrace"),	CTL(config_utrace)},
292 	{NAME("xmalloc"),	CTL(config_xmalloc)}
293 };
294 
295 static const ctl_named_node_t opt_node[] = {
296 	{NAME("abort"),		CTL(opt_abort)},
297 	{NAME("abort_conf"),	CTL(opt_abort_conf)},
298 	{NAME("metadata_thp"),	CTL(opt_metadata_thp)},
299 	{NAME("retain"),	CTL(opt_retain)},
300 	{NAME("dss"),		CTL(opt_dss)},
301 	{NAME("narenas"),	CTL(opt_narenas)},
302 	{NAME("percpu_arena"),	CTL(opt_percpu_arena)},
303 	{NAME("oversize_threshold"),	CTL(opt_oversize_threshold)},
304 	{NAME("background_thread"),	CTL(opt_background_thread)},
305 	{NAME("max_background_threads"),	CTL(opt_max_background_threads)},
306 	{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
307 	{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
308 	{NAME("stats_print"),	CTL(opt_stats_print)},
309 	{NAME("stats_print_opts"),	CTL(opt_stats_print_opts)},
310 	{NAME("junk"),		CTL(opt_junk)},
311 	{NAME("zero"),		CTL(opt_zero)},
312 	{NAME("utrace"),	CTL(opt_utrace)},
313 	{NAME("xmalloc"),	CTL(opt_xmalloc)},
314 	{NAME("tcache"),	CTL(opt_tcache)},
315 	{NAME("thp"),		CTL(opt_thp)},
316 	{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
317 	{NAME("lg_tcache_max"),	CTL(opt_lg_tcache_max)},
318 	{NAME("prof"),		CTL(opt_prof)},
319 	{NAME("prof_prefix"),	CTL(opt_prof_prefix)},
320 	{NAME("prof_active"),	CTL(opt_prof_active)},
321 	{NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
322 	{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
323 	{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
324 	{NAME("prof_gdump"),	CTL(opt_prof_gdump)},
325 	{NAME("prof_final"),	CTL(opt_prof_final)},
326 	{NAME("prof_leak"),	CTL(opt_prof_leak)},
327 	{NAME("prof_accum"),	CTL(opt_prof_accum)}
328 };
329 
330 static const ctl_named_node_t	tcache_node[] = {
331 	{NAME("create"),	CTL(tcache_create)},
332 	{NAME("flush"),		CTL(tcache_flush)},
333 	{NAME("destroy"),	CTL(tcache_destroy)}
334 };
335 
336 static const ctl_named_node_t arena_i_node[] = {
337 	{NAME("initialized"),	CTL(arena_i_initialized)},
338 	{NAME("decay"),		CTL(arena_i_decay)},
339 	{NAME("purge"),		CTL(arena_i_purge)},
340 	{NAME("reset"),		CTL(arena_i_reset)},
341 	{NAME("destroy"),	CTL(arena_i_destroy)},
342 	{NAME("dss"),		CTL(arena_i_dss)},
343 	{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
344 	{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
345 	{NAME("extent_hooks"),	CTL(arena_i_extent_hooks)},
346 	{NAME("retain_grow_limit"),	CTL(arena_i_retain_grow_limit)}
347 };
348 static const ctl_named_node_t super_arena_i_node[] = {
349 	{NAME(""),		CHILD(named, arena_i)}
350 };
351 
352 static const ctl_indexed_node_t arena_node[] = {
353 	{INDEX(arena_i)}
354 };
355 
356 static const ctl_named_node_t arenas_bin_i_node[] = {
357 	{NAME("size"),		CTL(arenas_bin_i_size)},
358 	{NAME("nregs"),		CTL(arenas_bin_i_nregs)},
359 	{NAME("slab_size"),	CTL(arenas_bin_i_slab_size)},
360 	{NAME("nshards"),	CTL(arenas_bin_i_nshards)}
361 };
362 static const ctl_named_node_t super_arenas_bin_i_node[] = {
363 	{NAME(""),		CHILD(named, arenas_bin_i)}
364 };
365 
366 static const ctl_indexed_node_t arenas_bin_node[] = {
367 	{INDEX(arenas_bin_i)}
368 };
369 
370 static const ctl_named_node_t arenas_lextent_i_node[] = {
371 	{NAME("size"),		CTL(arenas_lextent_i_size)}
372 };
373 static const ctl_named_node_t super_arenas_lextent_i_node[] = {
374 	{NAME(""),		CHILD(named, arenas_lextent_i)}
375 };
376 
377 static const ctl_indexed_node_t arenas_lextent_node[] = {
378 	{INDEX(arenas_lextent_i)}
379 };
380 
381 static const ctl_named_node_t arenas_node[] = {
382 	{NAME("narenas"),	CTL(arenas_narenas)},
383 	{NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
384 	{NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
385 	{NAME("quantum"),	CTL(arenas_quantum)},
386 	{NAME("page"),		CTL(arenas_page)},
387 	{NAME("tcache_max"),	CTL(arenas_tcache_max)},
388 	{NAME("nbins"),		CTL(arenas_nbins)},
389 	{NAME("nhbins"),	CTL(arenas_nhbins)},
390 	{NAME("bin"),		CHILD(indexed, arenas_bin)},
391 	{NAME("nlextents"),	CTL(arenas_nlextents)},
392 	{NAME("lextent"),	CHILD(indexed, arenas_lextent)},
393 	{NAME("create"),	CTL(arenas_create)},
394 	{NAME("lookup"),	CTL(arenas_lookup)}
395 };
396 
397 static const ctl_named_node_t	prof_node[] = {
398 	{NAME("thread_active_init"), CTL(prof_thread_active_init)},
399 	{NAME("active"),	CTL(prof_active)},
400 	{NAME("dump"),		CTL(prof_dump)},
401 	{NAME("gdump"),		CTL(prof_gdump)},
402 	{NAME("reset"),		CTL(prof_reset)},
403 	{NAME("interval"),	CTL(prof_interval)},
404 	{NAME("lg_sample"),	CTL(lg_prof_sample)},
405 	{NAME("log_start"),	CTL(prof_log_start)},
406 	{NAME("log_stop"),	CTL(prof_log_stop)}
407 };
408 static const ctl_named_node_t stats_arenas_i_small_node[] = {
409 	{NAME("allocated"),	CTL(stats_arenas_i_small_allocated)},
410 	{NAME("nmalloc"),	CTL(stats_arenas_i_small_nmalloc)},
411 	{NAME("ndalloc"),	CTL(stats_arenas_i_small_ndalloc)},
412 	{NAME("nrequests"),	CTL(stats_arenas_i_small_nrequests)}
413 };
414 
415 static const ctl_named_node_t stats_arenas_i_large_node[] = {
416 	{NAME("allocated"),	CTL(stats_arenas_i_large_allocated)},
417 	{NAME("nmalloc"),	CTL(stats_arenas_i_large_nmalloc)},
418 	{NAME("ndalloc"),	CTL(stats_arenas_i_large_ndalloc)},
419 	{NAME("nrequests"),	CTL(stats_arenas_i_large_nrequests)}
420 };
421 
422 #define MUTEX_PROF_DATA_NODE(prefix)					\
423 static const ctl_named_node_t stats_##prefix##_node[] = {		\
424 	{NAME("num_ops"),						\
425 	 CTL(stats_##prefix##_num_ops)},				\
426 	{NAME("num_wait"),						\
427 	 CTL(stats_##prefix##_num_wait)},				\
428 	{NAME("num_spin_acq"),						\
429 	 CTL(stats_##prefix##_num_spin_acq)},				\
430 	{NAME("num_owner_switch"),					\
431 	 CTL(stats_##prefix##_num_owner_switch)},			\
432 	{NAME("total_wait_time"),					\
433 	 CTL(stats_##prefix##_total_wait_time)},			\
434 	{NAME("max_wait_time"),						\
435 	 CTL(stats_##prefix##_max_wait_time)},				\
436 	{NAME("max_num_thds"),						\
437 	 CTL(stats_##prefix##_max_num_thds)}				\
438 	/* Note that # of current waiting thread not provided. */	\
439 };
440 
441 MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
442 
443 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
444 	{NAME("nmalloc"),	CTL(stats_arenas_i_bins_j_nmalloc)},
445 	{NAME("ndalloc"),	CTL(stats_arenas_i_bins_j_ndalloc)},
446 	{NAME("nrequests"),	CTL(stats_arenas_i_bins_j_nrequests)},
447 	{NAME("curregs"),	CTL(stats_arenas_i_bins_j_curregs)},
448 	{NAME("nfills"),	CTL(stats_arenas_i_bins_j_nfills)},
449 	{NAME("nflushes"),	CTL(stats_arenas_i_bins_j_nflushes)},
450 	{NAME("nslabs"),	CTL(stats_arenas_i_bins_j_nslabs)},
451 	{NAME("nreslabs"),	CTL(stats_arenas_i_bins_j_nreslabs)},
452 	{NAME("curslabs"),	CTL(stats_arenas_i_bins_j_curslabs)},
453 	{NAME("mutex"),		CHILD(named, stats_arenas_i_bins_j_mutex)}
454 };
455 
456 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
457 	{NAME(""),		CHILD(named, stats_arenas_i_bins_j)}
458 };
459 
460 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
461 	{INDEX(stats_arenas_i_bins_j)}
462 };
463 
464 static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
465 	{NAME("nmalloc"),	CTL(stats_arenas_i_lextents_j_nmalloc)},
466 	{NAME("ndalloc"),	CTL(stats_arenas_i_lextents_j_ndalloc)},
467 	{NAME("nrequests"),	CTL(stats_arenas_i_lextents_j_nrequests)},
468 	{NAME("curlextents"),	CTL(stats_arenas_i_lextents_j_curlextents)}
469 };
470 static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
471 	{NAME(""),		CHILD(named, stats_arenas_i_lextents_j)}
472 };
473 
474 static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
475 	{INDEX(stats_arenas_i_lextents_j)}
476 };
477 
478 static const ctl_named_node_t stats_arenas_i_extents_j_node[] = {
479 	{NAME("ndirty"),	CTL(stats_arenas_i_extents_j_ndirty)},
480 	{NAME("nmuzzy"),	CTL(stats_arenas_i_extents_j_nmuzzy)},
481 	{NAME("nretained"),	CTL(stats_arenas_i_extents_j_nretained)},
482 	{NAME("dirty_bytes"),	CTL(stats_arenas_i_extents_j_dirty_bytes)},
483 	{NAME("muzzy_bytes"),	CTL(stats_arenas_i_extents_j_muzzy_bytes)},
484 	{NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}
485 };
486 
487 static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = {
488 	{NAME(""),		CHILD(named, stats_arenas_i_extents_j)}
489 };
490 
491 static const ctl_indexed_node_t stats_arenas_i_extents_node[] = {
492 	{INDEX(stats_arenas_i_extents_j)}
493 };
494 
495 #define OP(mtx)  MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
496 MUTEX_PROF_ARENA_MUTEXES
497 #undef OP
498 
499 static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
500 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
501 MUTEX_PROF_ARENA_MUTEXES
502 #undef OP
503 };
504 
505 static const ctl_named_node_t stats_arenas_i_node[] = {
506 	{NAME("nthreads"),	CTL(stats_arenas_i_nthreads)},
507 	{NAME("uptime"),	CTL(stats_arenas_i_uptime)},
508 	{NAME("dss"),		CTL(stats_arenas_i_dss)},
509 	{NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
510 	{NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
511 	{NAME("pactive"),	CTL(stats_arenas_i_pactive)},
512 	{NAME("pdirty"),	CTL(stats_arenas_i_pdirty)},
513 	{NAME("pmuzzy"),	CTL(stats_arenas_i_pmuzzy)},
514 	{NAME("mapped"),	CTL(stats_arenas_i_mapped)},
515 	{NAME("retained"),	CTL(stats_arenas_i_retained)},
516 	{NAME("extent_avail"),	CTL(stats_arenas_i_extent_avail)},
517 	{NAME("dirty_npurge"),	CTL(stats_arenas_i_dirty_npurge)},
518 	{NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
519 	{NAME("dirty_purged"),	CTL(stats_arenas_i_dirty_purged)},
520 	{NAME("muzzy_npurge"),	CTL(stats_arenas_i_muzzy_npurge)},
521 	{NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
522 	{NAME("muzzy_purged"),	CTL(stats_arenas_i_muzzy_purged)},
523 	{NAME("base"),		CTL(stats_arenas_i_base)},
524 	{NAME("internal"),	CTL(stats_arenas_i_internal)},
525 	{NAME("metadata_thp"),	CTL(stats_arenas_i_metadata_thp)},
526 	{NAME("tcache_bytes"),	CTL(stats_arenas_i_tcache_bytes)},
527 	{NAME("resident"),	CTL(stats_arenas_i_resident)},
528 	{NAME("small"),		CHILD(named, stats_arenas_i_small)},
529 	{NAME("large"),		CHILD(named, stats_arenas_i_large)},
530 	{NAME("bins"),		CHILD(indexed, stats_arenas_i_bins)},
531 	{NAME("lextents"),	CHILD(indexed, stats_arenas_i_lextents)},
532 	{NAME("extents"),	CHILD(indexed, stats_arenas_i_extents)},
533 	{NAME("mutexes"),	CHILD(named, stats_arenas_i_mutexes)}
534 };
535 static const ctl_named_node_t super_stats_arenas_i_node[] = {
536 	{NAME(""),		CHILD(named, stats_arenas_i)}
537 };
538 
539 static const ctl_indexed_node_t stats_arenas_node[] = {
540 	{INDEX(stats_arenas_i)}
541 };
542 
543 static const ctl_named_node_t stats_background_thread_node[] = {
544 	{NAME("num_threads"),	CTL(stats_background_thread_num_threads)},
545 	{NAME("num_runs"),	CTL(stats_background_thread_num_runs)},
546 	{NAME("run_interval"),	CTL(stats_background_thread_run_interval)}
547 };
548 
549 #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
550 MUTEX_PROF_GLOBAL_MUTEXES
551 #undef OP
552 
553 static const ctl_named_node_t stats_mutexes_node[] = {
554 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
555 MUTEX_PROF_GLOBAL_MUTEXES
556 #undef OP
557 	{NAME("reset"),		CTL(stats_mutexes_reset)}
558 };
559 #undef MUTEX_PROF_DATA_NODE
560 
561 static const ctl_named_node_t stats_node[] = {
562 	{NAME("allocated"),	CTL(stats_allocated)},
563 	{NAME("active"),	CTL(stats_active)},
564 	{NAME("metadata"),	CTL(stats_metadata)},
565 	{NAME("metadata_thp"),	CTL(stats_metadata_thp)},
566 	{NAME("resident"),	CTL(stats_resident)},
567 	{NAME("mapped"),	CTL(stats_mapped)},
568 	{NAME("retained"),	CTL(stats_retained)},
569 	{NAME("background_thread"),
570 	 CHILD(named, stats_background_thread)},
571 	{NAME("mutexes"),	CHILD(named, stats_mutexes)},
572 	{NAME("arenas"),	CHILD(indexed, stats_arenas)}
573 };
574 
575 static const ctl_named_node_t hooks_node[] = {
576 	{NAME("install"),	CTL(experimental_hooks_install)},
577 	{NAME("remove"),	CTL(experimental_hooks_remove)},
578 };
579 
580 static const ctl_named_node_t experimental_node[] = {
581 	{NAME("hooks"),		CHILD(named, hooks)}
582 };
583 
584 static const ctl_named_node_t	root_node[] = {
585 	{NAME("version"),	CTL(version)},
586 	{NAME("epoch"),		CTL(epoch)},
587 	{NAME("background_thread"),	CTL(background_thread)},
588 	{NAME("max_background_threads"),	CTL(max_background_threads)},
589 	{NAME("thread"),	CHILD(named, thread)},
590 	{NAME("config"),	CHILD(named, config)},
591 	{NAME("opt"),		CHILD(named, opt)},
592 	{NAME("tcache"),	CHILD(named, tcache)},
593 	{NAME("arena"),		CHILD(indexed, arena)},
594 	{NAME("arenas"),	CHILD(named, arenas)},
595 	{NAME("prof"),		CHILD(named, prof)},
596 	{NAME("stats"),		CHILD(named, stats)},
597 	{NAME("experimental"),	CHILD(named, experimental)}
598 };
599 static const ctl_named_node_t super_root_node[] = {
600 	{NAME(""),		CHILD(named, root)}
601 };
602 
603 #undef NAME
604 #undef CHILD
605 #undef CTL
606 #undef INDEX
607 
608 /******************************************************************************/
609 
610 /*
611  * Sets *dst + *src non-atomically.  This is safe, since everything is
612  * synchronized by the ctl mutex.
613  */
614 static void
ctl_accum_arena_stats_u64(arena_stats_u64_t * dst,arena_stats_u64_t * src)615 ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
616 #ifdef JEMALLOC_ATOMIC_U64
617 	uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
618 	uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
619 	atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
620 #else
621 	*dst += *src;
622 #endif
623 }
624 
625 /* Likewise: with ctl mutex synchronization, reading is simple. */
626 static uint64_t
ctl_arena_stats_read_u64(arena_stats_u64_t * p)627 ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
628 #ifdef JEMALLOC_ATOMIC_U64
629 	return atomic_load_u64(p, ATOMIC_RELAXED);
630 #else
631 	return *p;
632 #endif
633 }
634 
635 static void
accum_atomic_zu(atomic_zu_t * dst,atomic_zu_t * src)636 accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
637 	size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
638 	size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
639 	atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
640 }
641 
642 /******************************************************************************/
643 
644 static unsigned
arenas_i2a_impl(size_t i,bool compat,bool validate)645 arenas_i2a_impl(size_t i, bool compat, bool validate) {
646 	unsigned a;
647 
648 	switch (i) {
649 	case MALLCTL_ARENAS_ALL:
650 		a = 0;
651 		break;
652 	case MALLCTL_ARENAS_DESTROYED:
653 		a = 1;
654 		break;
655 	default:
656 		if (compat && i == ctl_arenas->narenas) {
657 			/*
658 			 * Provide deprecated backward compatibility for
659 			 * accessing the merged stats at index narenas rather
660 			 * than via MALLCTL_ARENAS_ALL.  This is scheduled for
661 			 * removal in 6.0.0.
662 			 */
663 			a = 0;
664 		} else if (validate && i >= ctl_arenas->narenas) {
665 			a = UINT_MAX;
666 		} else {
667 			/*
668 			 * This function should never be called for an index
669 			 * more than one past the range of indices that have
670 			 * initialized ctl data.
671 			 */
672 			assert(i < ctl_arenas->narenas || (!validate && i ==
673 			    ctl_arenas->narenas));
674 			a = (unsigned)i + 2;
675 		}
676 		break;
677 	}
678 
679 	return a;
680 }
681 
682 static unsigned
arenas_i2a(size_t i)683 arenas_i2a(size_t i) {
684 	return arenas_i2a_impl(i, true, false);
685 }
686 
687 static ctl_arena_t *
arenas_i_impl(tsd_t * tsd,size_t i,bool compat,bool init)688 arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
689 	ctl_arena_t *ret;
690 
691 	assert(!compat || !init);
692 
693 	ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
694 	if (init && ret == NULL) {
695 		if (config_stats) {
696 			struct container_s {
697 				ctl_arena_t		ctl_arena;
698 				ctl_arena_stats_t	astats;
699 			};
700 			struct container_s *cont =
701 			    (struct container_s *)base_alloc(tsd_tsdn(tsd),
702 			    b0get(), sizeof(struct container_s), QUANTUM);
703 			if (cont == NULL) {
704 				return NULL;
705 			}
706 			ret = &cont->ctl_arena;
707 			ret->astats = &cont->astats;
708 		} else {
709 			ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
710 			    sizeof(ctl_arena_t), QUANTUM);
711 			if (ret == NULL) {
712 				return NULL;
713 			}
714 		}
715 		ret->arena_ind = (unsigned)i;
716 		ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
717 	}
718 
719 	assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
720 	return ret;
721 }
722 
723 static ctl_arena_t *
arenas_i(size_t i)724 arenas_i(size_t i) {
725 	ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
726 	assert(ret != NULL);
727 	return ret;
728 }
729 
730 static void
ctl_arena_clear(ctl_arena_t * ctl_arena)731 ctl_arena_clear(ctl_arena_t *ctl_arena) {
732 	ctl_arena->nthreads = 0;
733 	ctl_arena->dss = dss_prec_names[dss_prec_limit];
734 	ctl_arena->dirty_decay_ms = -1;
735 	ctl_arena->muzzy_decay_ms = -1;
736 	ctl_arena->pactive = 0;
737 	ctl_arena->pdirty = 0;
738 	ctl_arena->pmuzzy = 0;
739 	if (config_stats) {
740 		memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
741 		ctl_arena->astats->allocated_small = 0;
742 		ctl_arena->astats->nmalloc_small = 0;
743 		ctl_arena->astats->ndalloc_small = 0;
744 		ctl_arena->astats->nrequests_small = 0;
745 		memset(ctl_arena->astats->bstats, 0, SC_NBINS *
746 		    sizeof(bin_stats_t));
747 		memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
748 		    sizeof(arena_stats_large_t));
749 		memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
750 		    sizeof(arena_stats_extents_t));
751 	}
752 }
753 
754 static void
ctl_arena_stats_amerge(tsdn_t * tsdn,ctl_arena_t * ctl_arena,arena_t * arena)755 ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
756 	unsigned i;
757 
758 	if (config_stats) {
759 		arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
760 		    &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
761 		    &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
762 		    &ctl_arena->pdirty, &ctl_arena->pmuzzy,
763 		    &ctl_arena->astats->astats, ctl_arena->astats->bstats,
764 		    ctl_arena->astats->lstats, ctl_arena->astats->estats);
765 
766 		for (i = 0; i < SC_NBINS; i++) {
767 			ctl_arena->astats->allocated_small +=
768 			    ctl_arena->astats->bstats[i].curregs *
769 			    sz_index2size(i);
770 			ctl_arena->astats->nmalloc_small +=
771 			    ctl_arena->astats->bstats[i].nmalloc;
772 			ctl_arena->astats->ndalloc_small +=
773 			    ctl_arena->astats->bstats[i].ndalloc;
774 			ctl_arena->astats->nrequests_small +=
775 			    ctl_arena->astats->bstats[i].nrequests;
776 		}
777 	} else {
778 		arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
779 		    &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
780 		    &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
781 		    &ctl_arena->pdirty, &ctl_arena->pmuzzy);
782 	}
783 }
784 
785 static void
ctl_arena_stats_sdmerge(ctl_arena_t * ctl_sdarena,ctl_arena_t * ctl_arena,bool destroyed)786 ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
787     bool destroyed) {
788 	unsigned i;
789 
790 	if (!destroyed) {
791 		ctl_sdarena->nthreads += ctl_arena->nthreads;
792 		ctl_sdarena->pactive += ctl_arena->pactive;
793 		ctl_sdarena->pdirty += ctl_arena->pdirty;
794 		ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
795 	} else {
796 		assert(ctl_arena->nthreads == 0);
797 		assert(ctl_arena->pactive == 0);
798 		assert(ctl_arena->pdirty == 0);
799 		assert(ctl_arena->pmuzzy == 0);
800 	}
801 
802 	if (config_stats) {
803 		ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
804 		ctl_arena_stats_t *astats = ctl_arena->astats;
805 
806 		if (!destroyed) {
807 			accum_atomic_zu(&sdstats->astats.mapped,
808 			    &astats->astats.mapped);
809 			accum_atomic_zu(&sdstats->astats.retained,
810 			    &astats->astats.retained);
811 			accum_atomic_zu(&sdstats->astats.extent_avail,
812 			    &astats->astats.extent_avail);
813 		}
814 
815 		ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
816 		    &astats->astats.decay_dirty.npurge);
817 		ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
818 		    &astats->astats.decay_dirty.nmadvise);
819 		ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
820 		    &astats->astats.decay_dirty.purged);
821 
822 		ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
823 		    &astats->astats.decay_muzzy.npurge);
824 		ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
825 		    &astats->astats.decay_muzzy.nmadvise);
826 		ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
827 		    &astats->astats.decay_muzzy.purged);
828 
829 #define OP(mtx) malloc_mutex_prof_merge(				\
830 		    &(sdstats->astats.mutex_prof_data[			\
831 		        arena_prof_mutex_##mtx]),			\
832 		    &(astats->astats.mutex_prof_data[			\
833 		        arena_prof_mutex_##mtx]));
834 MUTEX_PROF_ARENA_MUTEXES
835 #undef OP
836 		if (!destroyed) {
837 			accum_atomic_zu(&sdstats->astats.base,
838 			    &astats->astats.base);
839 			accum_atomic_zu(&sdstats->astats.internal,
840 			    &astats->astats.internal);
841 			accum_atomic_zu(&sdstats->astats.resident,
842 			    &astats->astats.resident);
843 			accum_atomic_zu(&sdstats->astats.metadata_thp,
844 			    &astats->astats.metadata_thp);
845 		} else {
846 			assert(atomic_load_zu(
847 			    &astats->astats.internal, ATOMIC_RELAXED) == 0);
848 		}
849 
850 		if (!destroyed) {
851 			sdstats->allocated_small += astats->allocated_small;
852 		} else {
853 			assert(astats->allocated_small == 0);
854 		}
855 		sdstats->nmalloc_small += astats->nmalloc_small;
856 		sdstats->ndalloc_small += astats->ndalloc_small;
857 		sdstats->nrequests_small += astats->nrequests_small;
858 
859 		if (!destroyed) {
860 			accum_atomic_zu(&sdstats->astats.allocated_large,
861 			    &astats->astats.allocated_large);
862 		} else {
863 			assert(atomic_load_zu(&astats->astats.allocated_large,
864 			    ATOMIC_RELAXED) == 0);
865 		}
866 		ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
867 		    &astats->astats.nmalloc_large);
868 		ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
869 		    &astats->astats.ndalloc_large);
870 		ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
871 		    &astats->astats.nrequests_large);
872 
873 		accum_atomic_zu(&sdstats->astats.tcache_bytes,
874 		    &astats->astats.tcache_bytes);
875 
876 		if (ctl_arena->arena_ind == 0) {
877 			sdstats->astats.uptime = astats->astats.uptime;
878 		}
879 
880 		/* Merge bin stats. */
881 		for (i = 0; i < SC_NBINS; i++) {
882 			sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
883 			sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
884 			sdstats->bstats[i].nrequests +=
885 			    astats->bstats[i].nrequests;
886 			if (!destroyed) {
887 				sdstats->bstats[i].curregs +=
888 				    astats->bstats[i].curregs;
889 			} else {
890 				assert(astats->bstats[i].curregs == 0);
891 			}
892 			sdstats->bstats[i].nfills += astats->bstats[i].nfills;
893 			sdstats->bstats[i].nflushes +=
894 			    astats->bstats[i].nflushes;
895 			sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
896 			sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
897 			if (!destroyed) {
898 				sdstats->bstats[i].curslabs +=
899 				    astats->bstats[i].curslabs;
900 			} else {
901 				assert(astats->bstats[i].curslabs == 0);
902 			}
903 			malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
904 			    &astats->bstats[i].mutex_data);
905 		}
906 
907 		/* Merge stats for large allocations. */
908 		for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
909 			ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
910 			    &astats->lstats[i].nmalloc);
911 			ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
912 			    &astats->lstats[i].ndalloc);
913 			ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
914 			    &astats->lstats[i].nrequests);
915 			if (!destroyed) {
916 				sdstats->lstats[i].curlextents +=
917 				    astats->lstats[i].curlextents;
918 			} else {
919 				assert(astats->lstats[i].curlextents == 0);
920 			}
921 		}
922 
923 		/* Merge extents stats. */
924 		for (i = 0; i < SC_NPSIZES; i++) {
925 			accum_atomic_zu(&sdstats->estats[i].ndirty,
926 			    &astats->estats[i].ndirty);
927 			accum_atomic_zu(&sdstats->estats[i].nmuzzy,
928 			    &astats->estats[i].nmuzzy);
929 			accum_atomic_zu(&sdstats->estats[i].nretained,
930 			    &astats->estats[i].nretained);
931 			accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
932 			    &astats->estats[i].dirty_bytes);
933 			accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
934 			    &astats->estats[i].muzzy_bytes);
935 			accum_atomic_zu(&sdstats->estats[i].retained_bytes,
936 			    &astats->estats[i].retained_bytes);
937 		}
938 	}
939 }
940 
941 static void
ctl_arena_refresh(tsdn_t * tsdn,arena_t * arena,ctl_arena_t * ctl_sdarena,unsigned i,bool destroyed)942 ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
943     unsigned i, bool destroyed) {
944 	ctl_arena_t *ctl_arena = arenas_i(i);
945 
946 	ctl_arena_clear(ctl_arena);
947 	ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
948 	/* Merge into sum stats as well. */
949 	ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
950 }
951 
952 static unsigned
ctl_arena_init(tsd_t * tsd,extent_hooks_t * extent_hooks)953 ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
954 	unsigned arena_ind;
955 	ctl_arena_t *ctl_arena;
956 
957 	if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
958 	    NULL) {
959 		ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
960 		arena_ind = ctl_arena->arena_ind;
961 	} else {
962 		arena_ind = ctl_arenas->narenas;
963 	}
964 
965 	/* Trigger stats allocation. */
966 	if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
967 		return UINT_MAX;
968 	}
969 
970 	/* Initialize new arena. */
971 	if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
972 		return UINT_MAX;
973 	}
974 
975 	if (arena_ind == ctl_arenas->narenas) {
976 		ctl_arenas->narenas++;
977 	}
978 
979 	return arena_ind;
980 }
981 
982 static void
ctl_background_thread_stats_read(tsdn_t * tsdn)983 ctl_background_thread_stats_read(tsdn_t *tsdn) {
984 	background_thread_stats_t *stats = &ctl_stats->background_thread;
985 	if (!have_background_thread ||
986 	    background_thread_stats_read(tsdn, stats)) {
987 		memset(stats, 0, sizeof(background_thread_stats_t));
988 		nstime_init(&stats->run_interval, 0);
989 	}
990 }
991 
992 static void
ctl_refresh(tsdn_t * tsdn)993 ctl_refresh(tsdn_t *tsdn) {
994 	unsigned i;
995 	ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
996 	VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
997 
998 	/*
999 	 * Clear sum stats, since they will be merged into by
1000 	 * ctl_arena_refresh().
1001 	 */
1002 	ctl_arena_clear(ctl_sarena);
1003 
1004 	for (i = 0; i < ctl_arenas->narenas; i++) {
1005 		tarenas[i] = arena_get(tsdn, i, false);
1006 	}
1007 
1008 	for (i = 0; i < ctl_arenas->narenas; i++) {
1009 		ctl_arena_t *ctl_arena = arenas_i(i);
1010 		bool initialized = (tarenas[i] != NULL);
1011 
1012 		ctl_arena->initialized = initialized;
1013 		if (initialized) {
1014 			ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
1015 			    false);
1016 		}
1017 	}
1018 
1019 	if (config_stats) {
1020 		ctl_stats->allocated = ctl_sarena->astats->allocated_small +
1021 		    atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
1022 			ATOMIC_RELAXED);
1023 		ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
1024 		ctl_stats->metadata = atomic_load_zu(
1025 		    &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
1026 		    atomic_load_zu(&ctl_sarena->astats->astats.internal,
1027 			ATOMIC_RELAXED);
1028 		ctl_stats->metadata_thp = atomic_load_zu(
1029 		    &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
1030 		ctl_stats->resident = atomic_load_zu(
1031 		    &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
1032 		ctl_stats->mapped = atomic_load_zu(
1033 		    &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
1034 		ctl_stats->retained = atomic_load_zu(
1035 		    &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
1036 
1037 		ctl_background_thread_stats_read(tsdn);
1038 
1039 #define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx)				\
1040     malloc_mutex_lock(tsdn, &mtx);					\
1041     malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx);	\
1042     malloc_mutex_unlock(tsdn, &mtx);
1043 
1044 		if (config_prof && opt_prof) {
1045 			READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
1046 			    bt2gctx_mtx);
1047 		}
1048 		if (have_background_thread) {
1049 			READ_GLOBAL_MUTEX_PROF_DATA(
1050 			    global_prof_mutex_background_thread,
1051 			    background_thread_lock);
1052 		} else {
1053 			memset(&ctl_stats->mutex_prof_data[
1054 			    global_prof_mutex_background_thread], 0,
1055 			    sizeof(mutex_prof_data_t));
1056 		}
1057 		/* We own ctl mutex already. */
1058 		malloc_mutex_prof_read(tsdn,
1059 		    &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
1060 		    &ctl_mtx);
1061 #undef READ_GLOBAL_MUTEX_PROF_DATA
1062 	}
1063 	ctl_arenas->epoch++;
1064 }
1065 
1066 static bool
ctl_init(tsd_t * tsd)1067 ctl_init(tsd_t *tsd) {
1068 	bool ret;
1069 	tsdn_t *tsdn = tsd_tsdn(tsd);
1070 
1071 	malloc_mutex_lock(tsdn, &ctl_mtx);
1072 	if (!ctl_initialized) {
1073 		ctl_arena_t *ctl_sarena, *ctl_darena;
1074 		unsigned i;
1075 
1076 		/*
1077 		 * Allocate demand-zeroed space for pointers to the full
1078 		 * range of supported arena indices.
1079 		 */
1080 		if (ctl_arenas == NULL) {
1081 			ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
1082 			    b0get(), sizeof(ctl_arenas_t), QUANTUM);
1083 			if (ctl_arenas == NULL) {
1084 				ret = true;
1085 				goto label_return;
1086 			}
1087 		}
1088 
1089 		if (config_stats && ctl_stats == NULL) {
1090 			ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
1091 			    sizeof(ctl_stats_t), QUANTUM);
1092 			if (ctl_stats == NULL) {
1093 				ret = true;
1094 				goto label_return;
1095 			}
1096 		}
1097 
1098 		/*
1099 		 * Allocate space for the current full range of arenas
1100 		 * here rather than doing it lazily elsewhere, in order
1101 		 * to limit when OOM-caused errors can occur.
1102 		 */
1103 		if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
1104 		    true)) == NULL) {
1105 			ret = true;
1106 			goto label_return;
1107 		}
1108 		ctl_sarena->initialized = true;
1109 
1110 		if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
1111 		    false, true)) == NULL) {
1112 			ret = true;
1113 			goto label_return;
1114 		}
1115 		ctl_arena_clear(ctl_darena);
1116 		/*
1117 		 * Don't toggle ctl_darena to initialized until an arena is
1118 		 * actually destroyed, so that arena.<i>.initialized can be used
1119 		 * to query whether the stats are relevant.
1120 		 */
1121 
1122 		ctl_arenas->narenas = narenas_total_get();
1123 		for (i = 0; i < ctl_arenas->narenas; i++) {
1124 			if (arenas_i_impl(tsd, i, false, true) == NULL) {
1125 				ret = true;
1126 				goto label_return;
1127 			}
1128 		}
1129 
1130 		ql_new(&ctl_arenas->destroyed);
1131 		ctl_refresh(tsdn);
1132 
1133 		ctl_initialized = true;
1134 	}
1135 
1136 	ret = false;
1137 label_return:
1138 	malloc_mutex_unlock(tsdn, &ctl_mtx);
1139 	return ret;
1140 }
1141 
1142 static int
ctl_lookup(tsdn_t * tsdn,const char * name,ctl_node_t const ** nodesp,size_t * mibp,size_t * depthp)1143 ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
1144     size_t *mibp, size_t *depthp) {
1145 	int ret;
1146 	const char *elm, *tdot, *dot;
1147 	size_t elen, i, j;
1148 	const ctl_named_node_t *node;
1149 
1150 	elm = name;
1151 	/* Equivalent to strchrnul(). */
1152 	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
1153 	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1154 	if (elen == 0) {
1155 		ret = ENOENT;
1156 		goto label_return;
1157 	}
1158 	node = super_root_node;
1159 	for (i = 0; i < *depthp; i++) {
1160 		assert(node);
1161 		assert(node->nchildren > 0);
1162 		if (ctl_named_node(node->children) != NULL) {
1163 			const ctl_named_node_t *pnode = node;
1164 
1165 			/* Children are named. */
1166 			for (j = 0; j < node->nchildren; j++) {
1167 				const ctl_named_node_t *child =
1168 				    ctl_named_children(node, j);
1169 				if (strlen(child->name) == elen &&
1170 				    strncmp(elm, child->name, elen) == 0) {
1171 					node = child;
1172 					if (nodesp != NULL) {
1173 						nodesp[i] =
1174 						    (const ctl_node_t *)node;
1175 					}
1176 					mibp[i] = j;
1177 					break;
1178 				}
1179 			}
1180 			if (node == pnode) {
1181 				ret = ENOENT;
1182 				goto label_return;
1183 			}
1184 		} else {
1185 			uintmax_t index;
1186 			const ctl_indexed_node_t *inode;
1187 
1188 			/* Children are indexed. */
1189 			index = malloc_strtoumax(elm, NULL, 10);
1190 			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
1191 				ret = ENOENT;
1192 				goto label_return;
1193 			}
1194 
1195 			inode = ctl_indexed_node(node->children);
1196 			node = inode->index(tsdn, mibp, *depthp, (size_t)index);
1197 			if (node == NULL) {
1198 				ret = ENOENT;
1199 				goto label_return;
1200 			}
1201 
1202 			if (nodesp != NULL) {
1203 				nodesp[i] = (const ctl_node_t *)node;
1204 			}
1205 			mibp[i] = (size_t)index;
1206 		}
1207 
1208 		if (node->ctl != NULL) {
1209 			/* Terminal node. */
1210 			if (*dot != '\0') {
1211 				/*
1212 				 * The name contains more elements than are
1213 				 * in this path through the tree.
1214 				 */
1215 				ret = ENOENT;
1216 				goto label_return;
1217 			}
1218 			/* Complete lookup successful. */
1219 			*depthp = i + 1;
1220 			break;
1221 		}
1222 
1223 		/* Update elm. */
1224 		if (*dot == '\0') {
1225 			/* No more elements. */
1226 			ret = ENOENT;
1227 			goto label_return;
1228 		}
1229 		elm = &dot[1];
1230 		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
1231 		    strchr(elm, '\0');
1232 		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1233 	}
1234 
1235 	ret = 0;
1236 label_return:
1237 	return ret;
1238 }
1239 
1240 int
ctl_byname(tsd_t * tsd,const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1241 ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
1242     void *newp, size_t newlen) {
1243 	int ret;
1244 	size_t depth;
1245 	ctl_node_t const *nodes[CTL_MAX_DEPTH];
1246 	size_t mib[CTL_MAX_DEPTH];
1247 	const ctl_named_node_t *node;
1248 
1249 	if (!ctl_initialized && ctl_init(tsd)) {
1250 		ret = EAGAIN;
1251 		goto label_return;
1252 	}
1253 
1254 	depth = CTL_MAX_DEPTH;
1255 	ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
1256 	if (ret != 0) {
1257 		goto label_return;
1258 	}
1259 
1260 	node = ctl_named_node(nodes[depth-1]);
1261 	if (node != NULL && node->ctl) {
1262 		ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
1263 	} else {
1264 		/* The name refers to a partial path through the ctl tree. */
1265 		ret = ENOENT;
1266 	}
1267 
1268 label_return:
1269 	return(ret);
1270 }
1271 
1272 int
ctl_nametomib(tsd_t * tsd,const char * name,size_t * mibp,size_t * miblenp)1273 ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
1274 	int ret;
1275 
1276 	if (!ctl_initialized && ctl_init(tsd)) {
1277 		ret = EAGAIN;
1278 		goto label_return;
1279 	}
1280 
1281 	ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
1282 label_return:
1283 	return(ret);
1284 }
1285 
1286 int
ctl_bymib(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1287 ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1288     size_t *oldlenp, void *newp, size_t newlen) {
1289 	int ret;
1290 	const ctl_named_node_t *node;
1291 	size_t i;
1292 
1293 	if (!ctl_initialized && ctl_init(tsd)) {
1294 		ret = EAGAIN;
1295 		goto label_return;
1296 	}
1297 
1298 	/* Iterate down the tree. */
1299 	node = super_root_node;
1300 	for (i = 0; i < miblen; i++) {
1301 		assert(node);
1302 		assert(node->nchildren > 0);
1303 		if (ctl_named_node(node->children) != NULL) {
1304 			/* Children are named. */
1305 			if (node->nchildren <= mib[i]) {
1306 				ret = ENOENT;
1307 				goto label_return;
1308 			}
1309 			node = ctl_named_children(node, mib[i]);
1310 		} else {
1311 			const ctl_indexed_node_t *inode;
1312 
1313 			/* Indexed element. */
1314 			inode = ctl_indexed_node(node->children);
1315 			node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
1316 			if (node == NULL) {
1317 				ret = ENOENT;
1318 				goto label_return;
1319 			}
1320 		}
1321 	}
1322 
1323 	/* Call the ctl function. */
1324 	if (node && node->ctl) {
1325 		ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
1326 	} else {
1327 		/* Partial MIB. */
1328 		ret = ENOENT;
1329 	}
1330 
1331 label_return:
1332 	return(ret);
1333 }
1334 
1335 bool
ctl_boot(void)1336 ctl_boot(void) {
1337 	if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
1338 	    malloc_mutex_rank_exclusive)) {
1339 		return true;
1340 	}
1341 
1342 	ctl_initialized = false;
1343 
1344 	return false;
1345 }
1346 
1347 void
ctl_prefork(tsdn_t * tsdn)1348 ctl_prefork(tsdn_t *tsdn) {
1349 	malloc_mutex_prefork(tsdn, &ctl_mtx);
1350 }
1351 
1352 void
ctl_postfork_parent(tsdn_t * tsdn)1353 ctl_postfork_parent(tsdn_t *tsdn) {
1354 	malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
1355 }
1356 
1357 void
ctl_postfork_child(tsdn_t * tsdn)1358 ctl_postfork_child(tsdn_t *tsdn) {
1359 	malloc_mutex_postfork_child(tsdn, &ctl_mtx);
1360 }
1361 
1362 /******************************************************************************/
1363 /* *_ctl() functions. */
1364 
1365 #define READONLY()	do {						\
1366 	if (newp != NULL || newlen != 0) {				\
1367 		ret = EPERM;						\
1368 		goto label_return;					\
1369 	}								\
1370 } while (0)
1371 
1372 #define WRITEONLY()	do {						\
1373 	if (oldp != NULL || oldlenp != NULL) {				\
1374 		ret = EPERM;						\
1375 		goto label_return;					\
1376 	}								\
1377 } while (0)
1378 
1379 #define READ_XOR_WRITE()	do {					\
1380 	if ((oldp != NULL && oldlenp != NULL) && (newp != NULL ||	\
1381 	    newlen != 0)) {						\
1382 		ret = EPERM;						\
1383 		goto label_return;					\
1384 	}								\
1385 } while (0)
1386 
1387 #define READ(v, t)	do {						\
1388 	if (oldp != NULL && oldlenp != NULL) {				\
1389 		if (*oldlenp != sizeof(t)) {				\
1390 			size_t	copylen = (sizeof(t) <= *oldlenp)	\
1391 			    ? sizeof(t) : *oldlenp;			\
1392 			memcpy(oldp, (void *)&(v), copylen);		\
1393 			ret = EINVAL;					\
1394 			goto label_return;				\
1395 		}							\
1396 		*(t *)oldp = (v);					\
1397 	}								\
1398 } while (0)
1399 
1400 #define WRITE(v, t)	do {						\
1401 	if (newp != NULL) {						\
1402 		if (newlen != sizeof(t)) {				\
1403 			ret = EINVAL;					\
1404 			goto label_return;				\
1405 		}							\
1406 		(v) = *(t *)newp;					\
1407 	}								\
1408 } while (0)
1409 
1410 #define MIB_UNSIGNED(v, i) do {						\
1411 	if (mib[i] > UINT_MAX) {					\
1412 		ret = EFAULT;						\
1413 		goto label_return;					\
1414 	}								\
1415 	v = (unsigned)mib[i];						\
1416 } while (0)
1417 
1418 /*
1419  * There's a lot of code duplication in the following macros due to limitations
1420  * in how nested cpp macros are expanded.
1421  */
1422 #define CTL_RO_CLGEN(c, l, n, v, t)					\
1423 static int								\
1424 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1425     size_t *oldlenp, void *newp, size_t newlen) {			\
1426 	int ret;							\
1427 	t oldval;							\
1428 									\
1429 	if (!(c)) {							\
1430 		return ENOENT;						\
1431 	}								\
1432 	if (l) {							\
1433 		malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);		\
1434 	}								\
1435 	READONLY();							\
1436 	oldval = (v);							\
1437 	READ(oldval, t);						\
1438 									\
1439 	ret = 0;							\
1440 label_return:								\
1441 	if (l) {							\
1442 		malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);		\
1443 	}								\
1444 	return ret;							\
1445 }
1446 
1447 #define CTL_RO_CGEN(c, n, v, t)						\
1448 static int								\
1449 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1450     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {			\
1451 	int ret;							\
1452 	t oldval;							\
1453 									\
1454 	if (!(c)) {							\
1455 		return ENOENT;						\
1456 	}								\
1457 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
1458 	READONLY();							\
1459 	oldval = (v);							\
1460 	READ(oldval, t);						\
1461 									\
1462 	ret = 0;							\
1463 label_return:								\
1464 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
1465 	return ret;							\
1466 }
1467 
1468 #define CTL_RO_GEN(n, v, t)						\
1469 static int								\
1470 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1471     size_t *oldlenp, void *newp, size_t newlen) {			\
1472 	int ret;							\
1473 	t oldval;							\
1474 									\
1475 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
1476 	READONLY();							\
1477 	oldval = (v);							\
1478 	READ(oldval, t);						\
1479 									\
1480 	ret = 0;							\
1481 label_return:								\
1482 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
1483 	return ret;							\
1484 }
1485 
1486 /*
1487  * ctl_mtx is not acquired, under the assumption that no pertinent data will
1488  * mutate during the call.
1489  */
1490 #define CTL_RO_NL_CGEN(c, n, v, t)					\
1491 static int								\
1492 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1493     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {			\
1494 	int ret;							\
1495 	t oldval;							\
1496 									\
1497 	if (!(c)) {							\
1498 		return ENOENT;						\
1499 	}								\
1500 	READONLY();							\
1501 	oldval = (v);							\
1502 	READ(oldval, t);						\
1503 									\
1504 	ret = 0;							\
1505 label_return:								\
1506 	return ret;							\
1507 }
1508 
1509 #define CTL_RO_NL_GEN(n, v, t)						\
1510 static int								\
1511 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1512     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {			\
1513 	int ret;							\
1514 	t oldval;							\
1515 									\
1516 	READONLY();							\
1517 	oldval = (v);							\
1518 	READ(oldval, t);						\
1519 									\
1520 	ret = 0;							\
1521 label_return:								\
1522 	return ret;							\
1523 }
1524 
1525 #define CTL_TSD_RO_NL_CGEN(c, n, m, t)					\
1526 static int								\
1527 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1528     size_t *oldlenp, void *newp, size_t newlen) {			\
1529 	int ret;							\
1530 	t oldval;							\
1531 									\
1532 	if (!(c)) {							\
1533 		return ENOENT;						\
1534 	}								\
1535 	READONLY();							\
1536 	oldval = (m(tsd));						\
1537 	READ(oldval, t);						\
1538 									\
1539 	ret = 0;							\
1540 label_return:								\
1541 	return ret;							\
1542 }
1543 
1544 #define CTL_RO_CONFIG_GEN(n, t)						\
1545 static int								\
1546 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1547     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {			\
1548 	int ret;							\
1549 	t oldval;							\
1550 									\
1551 	READONLY();							\
1552 	oldval = n;							\
1553 	READ(oldval, t);						\
1554 									\
1555 	ret = 0;							\
1556 label_return:								\
1557 	return ret;							\
1558 }
1559 
1560 /******************************************************************************/
1561 
CTL_RO_NL_GEN(version,JEMALLOC_VERSION,const char *)1562 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1563 
1564 static int
1565 epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1566     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1567 	int ret;
1568 	UNUSED uint64_t newval;
1569 
1570 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1571 	WRITE(newval, uint64_t);
1572 	if (newp != NULL) {
1573 		ctl_refresh(tsd_tsdn(tsd));
1574 	}
1575 	READ(ctl_arenas->epoch, uint64_t);
1576 
1577 	ret = 0;
1578 label_return:
1579 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1580 	return ret;
1581 }
1582 
1583 static int
background_thread_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1584 background_thread_ctl(tsd_t *tsd, const size_t *mib,
1585     size_t miblen, void *oldp, size_t *oldlenp,
1586     void *newp, size_t newlen) {
1587 	int ret;
1588 	bool oldval;
1589 
1590 	if (!have_background_thread) {
1591 		return ENOENT;
1592 	}
1593 	background_thread_ctl_init(tsd_tsdn(tsd));
1594 
1595 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1596 	malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1597 	if (newp == NULL) {
1598 		oldval = background_thread_enabled();
1599 		READ(oldval, bool);
1600 	} else {
1601 		if (newlen != sizeof(bool)) {
1602 			ret = EINVAL;
1603 			goto label_return;
1604 		}
1605 		oldval = background_thread_enabled();
1606 		READ(oldval, bool);
1607 
1608 		bool newval = *(bool *)newp;
1609 		if (newval == oldval) {
1610 			ret = 0;
1611 			goto label_return;
1612 		}
1613 
1614 		background_thread_enabled_set(tsd_tsdn(tsd), newval);
1615 		if (newval) {
1616 			if (background_threads_enable(tsd)) {
1617 				ret = EFAULT;
1618 				goto label_return;
1619 			}
1620 		} else {
1621 			if (background_threads_disable(tsd)) {
1622 				ret = EFAULT;
1623 				goto label_return;
1624 			}
1625 		}
1626 	}
1627 	ret = 0;
1628 label_return:
1629 	malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1630 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1631 
1632 	return ret;
1633 }
1634 
1635 static int
max_background_threads_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1636 max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
1637     size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1638     size_t newlen) {
1639 	int ret;
1640 	size_t oldval;
1641 
1642 	if (!have_background_thread) {
1643 		return ENOENT;
1644 	}
1645 	background_thread_ctl_init(tsd_tsdn(tsd));
1646 
1647 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1648 	malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1649 	if (newp == NULL) {
1650 		oldval = max_background_threads;
1651 		READ(oldval, size_t);
1652 	} else {
1653 		if (newlen != sizeof(size_t)) {
1654 			ret = EINVAL;
1655 			goto label_return;
1656 		}
1657 		oldval = max_background_threads;
1658 		READ(oldval, size_t);
1659 
1660 		size_t newval = *(size_t *)newp;
1661 		if (newval == oldval) {
1662 			ret = 0;
1663 			goto label_return;
1664 		}
1665 		if (newval > opt_max_background_threads) {
1666 			ret = EINVAL;
1667 			goto label_return;
1668 		}
1669 
1670 		if (background_thread_enabled()) {
1671 			background_thread_enabled_set(tsd_tsdn(tsd), false);
1672 			if (background_threads_disable(tsd)) {
1673 				ret = EFAULT;
1674 				goto label_return;
1675 			}
1676 			max_background_threads = newval;
1677 			background_thread_enabled_set(tsd_tsdn(tsd), true);
1678 			if (background_threads_enable(tsd)) {
1679 				ret = EFAULT;
1680 				goto label_return;
1681 			}
1682 		} else {
1683 			max_background_threads = newval;
1684 		}
1685 	}
1686 	ret = 0;
1687 label_return:
1688 	malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1689 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1690 
1691 	return ret;
1692 }
1693 
1694 /******************************************************************************/
1695 
CTL_RO_CONFIG_GEN(config_cache_oblivious,bool)1696 CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
1697 CTL_RO_CONFIG_GEN(config_debug, bool)
1698 CTL_RO_CONFIG_GEN(config_fill, bool)
1699 CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
1700 CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
1701 CTL_RO_CONFIG_GEN(config_prof, bool)
1702 CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
1703 CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
1704 CTL_RO_CONFIG_GEN(config_stats, bool)
1705 CTL_RO_CONFIG_GEN(config_utrace, bool)
1706 CTL_RO_CONFIG_GEN(config_xmalloc, bool)
1707 
1708 /******************************************************************************/
1709 
1710 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1711 CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
1712 CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
1713     const char *)
1714 CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
1715 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1716 CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
1717 CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
1718     const char *)
1719 CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
1720 CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
1721 CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
1722 CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
1723 CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
1724 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1725 CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
1726 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
1727 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1728 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1729 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1730 CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
1731 CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
1732 CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
1733     size_t)
1734 CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1735 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1736 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1737 CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
1738 CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
1739     opt_prof_thread_active_init, bool)
1740 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1741 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1742 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1743 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1744 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1745 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1746 
1747 /******************************************************************************/
1748 
1749 static int
1750 thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1751     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1752 	int ret;
1753 	arena_t *oldarena;
1754 	unsigned newind, oldind;
1755 
1756 	oldarena = arena_choose(tsd, NULL);
1757 	if (oldarena == NULL) {
1758 		return EAGAIN;
1759 	}
1760 	newind = oldind = arena_ind_get(oldarena);
1761 	WRITE(newind, unsigned);
1762 	READ(oldind, unsigned);
1763 
1764 	if (newind != oldind) {
1765 		arena_t *newarena;
1766 
1767 		if (newind >= narenas_total_get()) {
1768 			/* New arena index is out of range. */
1769 			ret = EFAULT;
1770 			goto label_return;
1771 		}
1772 
1773 		if (have_percpu_arena &&
1774 		    PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
1775 			if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
1776 				/*
1777 				 * If perCPU arena is enabled, thread_arena
1778 				 * control is not allowed for the auto arena
1779 				 * range.
1780 				 */
1781 				ret = EPERM;
1782 				goto label_return;
1783 			}
1784 		}
1785 
1786 		/* Initialize arena if necessary. */
1787 		newarena = arena_get(tsd_tsdn(tsd), newind, true);
1788 		if (newarena == NULL) {
1789 			ret = EAGAIN;
1790 			goto label_return;
1791 		}
1792 		/* Set new arena/tcache associations. */
1793 		arena_migrate(tsd, oldind, newind);
1794 		if (tcache_available(tsd)) {
1795 			tcache_arena_reassociate(tsd_tsdn(tsd),
1796 			    tsd_tcachep_get(tsd), newarena);
1797 		}
1798 	}
1799 
1800 	ret = 0;
1801 label_return:
1802 	return ret;
1803 }
1804 
CTL_TSD_RO_NL_CGEN(config_stats,thread_allocated,tsd_thread_allocated_get,uint64_t)1805 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
1806     uint64_t)
1807 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
1808     uint64_t *)
1809 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
1810     uint64_t)
1811 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
1812     tsd_thread_deallocatedp_get, uint64_t *)
1813 
1814 static int
1815 thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
1816     size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1817     size_t newlen) {
1818 	int ret;
1819 	bool oldval;
1820 
1821 	oldval = tcache_enabled_get(tsd);
1822 	if (newp != NULL) {
1823 		if (newlen != sizeof(bool)) {
1824 			ret = EINVAL;
1825 			goto label_return;
1826 		}
1827 		tcache_enabled_set(tsd, *(bool *)newp);
1828 	}
1829 	READ(oldval, bool);
1830 
1831 	ret = 0;
1832 label_return:
1833 	return ret;
1834 }
1835 
1836 static int
thread_tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1837 thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
1838     size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1839     size_t newlen) {
1840 	int ret;
1841 
1842 	if (!tcache_available(tsd)) {
1843 		ret = EFAULT;
1844 		goto label_return;
1845 	}
1846 
1847 	READONLY();
1848 	WRITEONLY();
1849 
1850 	tcache_flush(tsd);
1851 
1852 	ret = 0;
1853 label_return:
1854 	return ret;
1855 }
1856 
1857 static int
thread_prof_name_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1858 thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
1859     size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1860     size_t newlen) {
1861 	int ret;
1862 
1863 	if (!config_prof) {
1864 		return ENOENT;
1865 	}
1866 
1867 	READ_XOR_WRITE();
1868 
1869 	if (newp != NULL) {
1870 		if (newlen != sizeof(const char *)) {
1871 			ret = EINVAL;
1872 			goto label_return;
1873 		}
1874 
1875 		if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
1876 		    0) {
1877 			goto label_return;
1878 		}
1879 	} else {
1880 		const char *oldname = prof_thread_name_get(tsd);
1881 		READ(oldname, const char *);
1882 	}
1883 
1884 	ret = 0;
1885 label_return:
1886 	return ret;
1887 }
1888 
1889 static int
thread_prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1890 thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
1891     size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1892     size_t newlen) {
1893 	int ret;
1894 	bool oldval;
1895 
1896 	if (!config_prof) {
1897 		return ENOENT;
1898 	}
1899 
1900 	oldval = prof_thread_active_get(tsd);
1901 	if (newp != NULL) {
1902 		if (newlen != sizeof(bool)) {
1903 			ret = EINVAL;
1904 			goto label_return;
1905 		}
1906 		if (prof_thread_active_set(tsd, *(bool *)newp)) {
1907 			ret = EAGAIN;
1908 			goto label_return;
1909 		}
1910 	}
1911 	READ(oldval, bool);
1912 
1913 	ret = 0;
1914 label_return:
1915 	return ret;
1916 }
1917 
1918 /******************************************************************************/
1919 
1920 static int
tcache_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1921 tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1922     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1923 	int ret;
1924 	unsigned tcache_ind;
1925 
1926 	READONLY();
1927 	if (tcaches_create(tsd, &tcache_ind)) {
1928 		ret = EFAULT;
1929 		goto label_return;
1930 	}
1931 	READ(tcache_ind, unsigned);
1932 
1933 	ret = 0;
1934 label_return:
1935 	return ret;
1936 }
1937 
1938 static int
tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1939 tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1940     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1941 	int ret;
1942 	unsigned tcache_ind;
1943 
1944 	WRITEONLY();
1945 	tcache_ind = UINT_MAX;
1946 	WRITE(tcache_ind, unsigned);
1947 	if (tcache_ind == UINT_MAX) {
1948 		ret = EFAULT;
1949 		goto label_return;
1950 	}
1951 	tcaches_flush(tsd, tcache_ind);
1952 
1953 	ret = 0;
1954 label_return:
1955 	return ret;
1956 }
1957 
1958 static int
tcache_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1959 tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1960     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1961 	int ret;
1962 	unsigned tcache_ind;
1963 
1964 	WRITEONLY();
1965 	tcache_ind = UINT_MAX;
1966 	WRITE(tcache_ind, unsigned);
1967 	if (tcache_ind == UINT_MAX) {
1968 		ret = EFAULT;
1969 		goto label_return;
1970 	}
1971 	tcaches_destroy(tsd, tcache_ind);
1972 
1973 	ret = 0;
1974 label_return:
1975 	return ret;
1976 }
1977 
1978 /******************************************************************************/
1979 
1980 static int
arena_i_initialized_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1981 arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1982     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1983 	int ret;
1984 	tsdn_t *tsdn = tsd_tsdn(tsd);
1985 	unsigned arena_ind;
1986 	bool initialized;
1987 
1988 	READONLY();
1989 	MIB_UNSIGNED(arena_ind, 1);
1990 
1991 	malloc_mutex_lock(tsdn, &ctl_mtx);
1992 	initialized = arenas_i(arena_ind)->initialized;
1993 	malloc_mutex_unlock(tsdn, &ctl_mtx);
1994 
1995 	READ(initialized, bool);
1996 
1997 	ret = 0;
1998 label_return:
1999 	return ret;
2000 }
2001 
2002 static void
arena_i_decay(tsdn_t * tsdn,unsigned arena_ind,bool all)2003 arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
2004 	malloc_mutex_lock(tsdn, &ctl_mtx);
2005 	{
2006 		unsigned narenas = ctl_arenas->narenas;
2007 
2008 		/*
2009 		 * Access via index narenas is deprecated, and scheduled for
2010 		 * removal in 6.0.0.
2011 		 */
2012 		if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
2013 			unsigned i;
2014 			VARIABLE_ARRAY(arena_t *, tarenas, narenas);
2015 
2016 			for (i = 0; i < narenas; i++) {
2017 				tarenas[i] = arena_get(tsdn, i, false);
2018 			}
2019 
2020 			/*
2021 			 * No further need to hold ctl_mtx, since narenas and
2022 			 * tarenas contain everything needed below.
2023 			 */
2024 			malloc_mutex_unlock(tsdn, &ctl_mtx);
2025 
2026 			for (i = 0; i < narenas; i++) {
2027 				if (tarenas[i] != NULL) {
2028 					arena_decay(tsdn, tarenas[i], false,
2029 					    all);
2030 				}
2031 			}
2032 		} else {
2033 			arena_t *tarena;
2034 
2035 			assert(arena_ind < narenas);
2036 
2037 			tarena = arena_get(tsdn, arena_ind, false);
2038 
2039 			/* No further need to hold ctl_mtx. */
2040 			malloc_mutex_unlock(tsdn, &ctl_mtx);
2041 
2042 			if (tarena != NULL) {
2043 				arena_decay(tsdn, tarena, false, all);
2044 			}
2045 		}
2046 	}
2047 }
2048 
2049 static int
arena_i_decay_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2050 arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2051     size_t *oldlenp, void *newp, size_t newlen) {
2052 	int ret;
2053 	unsigned arena_ind;
2054 
2055 	READONLY();
2056 	WRITEONLY();
2057 	MIB_UNSIGNED(arena_ind, 1);
2058 	arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
2059 
2060 	ret = 0;
2061 label_return:
2062 	return ret;
2063 }
2064 
2065 static int
arena_i_purge_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2066 arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2067     size_t *oldlenp, void *newp, size_t newlen) {
2068 	int ret;
2069 	unsigned arena_ind;
2070 
2071 	READONLY();
2072 	WRITEONLY();
2073 	MIB_UNSIGNED(arena_ind, 1);
2074 	arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
2075 
2076 	ret = 0;
2077 label_return:
2078 	return ret;
2079 }
2080 
2081 static int
arena_i_reset_destroy_helper(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,unsigned * arena_ind,arena_t ** arena)2082 arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
2083     void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
2084     arena_t **arena) {
2085 	int ret;
2086 
2087 	READONLY();
2088 	WRITEONLY();
2089 	MIB_UNSIGNED(*arena_ind, 1);
2090 
2091 	*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
2092 	if (*arena == NULL || arena_is_auto(*arena)) {
2093 		ret = EFAULT;
2094 		goto label_return;
2095 	}
2096 
2097 	ret = 0;
2098 label_return:
2099 	return ret;
2100 }
2101 
2102 static void
arena_reset_prepare_background_thread(tsd_t * tsd,unsigned arena_ind)2103 arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
2104 	/* Temporarily disable the background thread during arena reset. */
2105 	if (have_background_thread) {
2106 		malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2107 		if (background_thread_enabled()) {
2108 			background_thread_info_t *info =
2109 			    background_thread_info_get(arena_ind);
2110 			assert(info->state == background_thread_started);
2111 			malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2112 			info->state = background_thread_paused;
2113 			malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2114 		}
2115 	}
2116 }
2117 
2118 static void
arena_reset_finish_background_thread(tsd_t * tsd,unsigned arena_ind)2119 arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
2120 	if (have_background_thread) {
2121 		if (background_thread_enabled()) {
2122 			background_thread_info_t *info =
2123 			    background_thread_info_get(arena_ind);
2124 			assert(info->state == background_thread_paused);
2125 			malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2126 			info->state = background_thread_started;
2127 			malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2128 		}
2129 		malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2130 	}
2131 }
2132 
2133 static int
arena_i_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2134 arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2135     size_t *oldlenp, void *newp, size_t newlen) {
2136 	int ret;
2137 	unsigned arena_ind;
2138 	arena_t *arena;
2139 
2140 	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2141 	    newp, newlen, &arena_ind, &arena);
2142 	if (ret != 0) {
2143 		return ret;
2144 	}
2145 
2146 	arena_reset_prepare_background_thread(tsd, arena_ind);
2147 	arena_reset(tsd, arena);
2148 	arena_reset_finish_background_thread(tsd, arena_ind);
2149 
2150 	return ret;
2151 }
2152 
2153 static int
arena_i_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2154 arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2155     size_t *oldlenp, void *newp, size_t newlen) {
2156 	int ret;
2157 	unsigned arena_ind;
2158 	arena_t *arena;
2159 	ctl_arena_t *ctl_darena, *ctl_arena;
2160 
2161 	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2162 	    newp, newlen, &arena_ind, &arena);
2163 	if (ret != 0) {
2164 		goto label_return;
2165 	}
2166 
2167 	if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
2168 	    true) != 0) {
2169 		ret = EFAULT;
2170 		goto label_return;
2171 	}
2172 
2173 	arena_reset_prepare_background_thread(tsd, arena_ind);
2174 	/* Merge stats after resetting and purging arena. */
2175 	arena_reset(tsd, arena);
2176 	arena_decay(tsd_tsdn(tsd), arena, false, true);
2177 	ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
2178 	ctl_darena->initialized = true;
2179 	ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
2180 	/* Destroy arena. */
2181 	arena_destroy(tsd, arena);
2182 	ctl_arena = arenas_i(arena_ind);
2183 	ctl_arena->initialized = false;
2184 	/* Record arena index for later recycling via arenas.create. */
2185 	ql_elm_new(ctl_arena, destroyed_link);
2186 	ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
2187 	arena_reset_finish_background_thread(tsd, arena_ind);
2188 
2189 	assert(ret == 0);
2190 label_return:
2191 	return ret;
2192 }
2193 
2194 static int
arena_i_dss_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2195 arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2196     size_t *oldlenp, void *newp, size_t newlen) {
2197 	int ret;
2198 	const char *dss = NULL;
2199 	unsigned arena_ind;
2200 	dss_prec_t dss_prec_old = dss_prec_limit;
2201 	dss_prec_t dss_prec = dss_prec_limit;
2202 
2203 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2204 	WRITE(dss, const char *);
2205 	MIB_UNSIGNED(arena_ind, 1);
2206 	if (dss != NULL) {
2207 		int i;
2208 		bool match = false;
2209 
2210 		for (i = 0; i < dss_prec_limit; i++) {
2211 			if (strcmp(dss_prec_names[i], dss) == 0) {
2212 				dss_prec = i;
2213 				match = true;
2214 				break;
2215 			}
2216 		}
2217 
2218 		if (!match) {
2219 			ret = EINVAL;
2220 			goto label_return;
2221 		}
2222 	}
2223 
2224 	/*
2225 	 * Access via index narenas is deprecated, and scheduled for removal in
2226 	 * 6.0.0.
2227 	 */
2228 	if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
2229 	    ctl_arenas->narenas) {
2230 		if (dss_prec != dss_prec_limit &&
2231 		    extent_dss_prec_set(dss_prec)) {
2232 			ret = EFAULT;
2233 			goto label_return;
2234 		}
2235 		dss_prec_old = extent_dss_prec_get();
2236 	} else {
2237 		arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2238 		if (arena == NULL || (dss_prec != dss_prec_limit &&
2239 		    arena_dss_prec_set(arena, dss_prec))) {
2240 			ret = EFAULT;
2241 			goto label_return;
2242 		}
2243 		dss_prec_old = arena_dss_prec_get(arena);
2244 	}
2245 
2246 	dss = dss_prec_names[dss_prec_old];
2247 	READ(dss, const char *);
2248 
2249 	ret = 0;
2250 label_return:
2251 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2252 	return ret;
2253 }
2254 
2255 static int
arena_i_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)2256 arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2257     void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2258 	int ret;
2259 	unsigned arena_ind;
2260 	arena_t *arena;
2261 
2262 	MIB_UNSIGNED(arena_ind, 1);
2263 	arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2264 	if (arena == NULL) {
2265 		ret = EFAULT;
2266 		goto label_return;
2267 	}
2268 
2269 	if (oldp != NULL && oldlenp != NULL) {
2270 		size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
2271 		    arena_muzzy_decay_ms_get(arena);
2272 		READ(oldval, ssize_t);
2273 	}
2274 	if (newp != NULL) {
2275 		if (newlen != sizeof(ssize_t)) {
2276 			ret = EINVAL;
2277 			goto label_return;
2278 		}
2279 		if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) {
2280 			/*
2281 			 * By default the huge arena purges eagerly.  If it is
2282 			 * set to non-zero decay time afterwards, background
2283 			 * thread might be needed.
2284 			 */
2285 			if (background_thread_create(tsd, arena_ind)) {
2286 				ret = EFAULT;
2287 				goto label_return;
2288 			}
2289 		}
2290 		if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
2291 		    *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
2292 		    arena, *(ssize_t *)newp)) {
2293 			ret = EFAULT;
2294 			goto label_return;
2295 		}
2296 	}
2297 
2298 	ret = 0;
2299 label_return:
2300 	return ret;
2301 }
2302 
2303 static int
arena_i_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2304 arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2305     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2306 	return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2307 	    newlen, true);
2308 }
2309 
2310 static int
arena_i_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2311 arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2312     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2313 	return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2314 	    newlen, false);
2315 }
2316 
2317 static int
arena_i_extent_hooks_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2318 arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2319     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2320 	int ret;
2321 	unsigned arena_ind;
2322 	arena_t *arena;
2323 
2324 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2325 	MIB_UNSIGNED(arena_ind, 1);
2326 	if (arena_ind < narenas_total_get()) {
2327 		extent_hooks_t *old_extent_hooks;
2328 		arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2329 		if (arena == NULL) {
2330 			if (arena_ind >= narenas_auto) {
2331 				ret = EFAULT;
2332 				goto label_return;
2333 			}
2334 			old_extent_hooks =
2335 			    (extent_hooks_t *)&extent_hooks_default;
2336 			READ(old_extent_hooks, extent_hooks_t *);
2337 			if (newp != NULL) {
2338 				/* Initialize a new arena as a side effect. */
2339 				extent_hooks_t *new_extent_hooks
2340 				    JEMALLOC_CC_SILENCE_INIT(NULL);
2341 				WRITE(new_extent_hooks, extent_hooks_t *);
2342 				arena = arena_init(tsd_tsdn(tsd), arena_ind,
2343 				    new_extent_hooks);
2344 				if (arena == NULL) {
2345 					ret = EFAULT;
2346 					goto label_return;
2347 				}
2348 			}
2349 		} else {
2350 			if (newp != NULL) {
2351 				extent_hooks_t *new_extent_hooks
2352 				    JEMALLOC_CC_SILENCE_INIT(NULL);
2353 				WRITE(new_extent_hooks, extent_hooks_t *);
2354 				old_extent_hooks = extent_hooks_set(tsd, arena,
2355 				    new_extent_hooks);
2356 				READ(old_extent_hooks, extent_hooks_t *);
2357 			} else {
2358 				old_extent_hooks = extent_hooks_get(arena);
2359 				READ(old_extent_hooks, extent_hooks_t *);
2360 			}
2361 		}
2362 	} else {
2363 		ret = EFAULT;
2364 		goto label_return;
2365 	}
2366 	ret = 0;
2367 label_return:
2368 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2369 	return ret;
2370 }
2371 
2372 static int
arena_i_retain_grow_limit_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2373 arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
2374     size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2375     size_t newlen) {
2376 	int ret;
2377 	unsigned arena_ind;
2378 	arena_t *arena;
2379 
2380 	if (!opt_retain) {
2381 		/* Only relevant when retain is enabled. */
2382 		return ENOENT;
2383 	}
2384 
2385 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2386 	MIB_UNSIGNED(arena_ind, 1);
2387 	if (arena_ind < narenas_total_get() && (arena =
2388 	    arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
2389 		size_t old_limit, new_limit;
2390 		if (newp != NULL) {
2391 			WRITE(new_limit, size_t);
2392 		}
2393 		bool err = arena_retain_grow_limit_get_set(tsd, arena,
2394 		    &old_limit, newp != NULL ? &new_limit : NULL);
2395 		if (!err) {
2396 			READ(old_limit, size_t);
2397 			ret = 0;
2398 		} else {
2399 			ret = EFAULT;
2400 		}
2401 	} else {
2402 		ret = EFAULT;
2403 	}
2404 label_return:
2405 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2406 	return ret;
2407 }
2408 
2409 static const ctl_named_node_t *
arena_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2410 arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2411     size_t i) {
2412 	const ctl_named_node_t *ret;
2413 
2414 	malloc_mutex_lock(tsdn, &ctl_mtx);
2415 	switch (i) {
2416 	case MALLCTL_ARENAS_ALL:
2417 	case MALLCTL_ARENAS_DESTROYED:
2418 		break;
2419 	default:
2420 		if (i > ctl_arenas->narenas) {
2421 			ret = NULL;
2422 			goto label_return;
2423 		}
2424 		break;
2425 	}
2426 
2427 	ret = super_arena_i_node;
2428 label_return:
2429 	malloc_mutex_unlock(tsdn, &ctl_mtx);
2430 	return ret;
2431 }
2432 
2433 /******************************************************************************/
2434 
2435 static int
arenas_narenas_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2436 arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2437     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2438 	int ret;
2439 	unsigned narenas;
2440 
2441 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2442 	READONLY();
2443 	if (*oldlenp != sizeof(unsigned)) {
2444 		ret = EINVAL;
2445 		goto label_return;
2446 	}
2447 	narenas = ctl_arenas->narenas;
2448 	READ(narenas, unsigned);
2449 
2450 	ret = 0;
2451 label_return:
2452 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2453 	return ret;
2454 }
2455 
2456 static int
arenas_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)2457 arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
2458     size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2459     size_t newlen, bool dirty) {
2460 	int ret;
2461 
2462 	if (oldp != NULL && oldlenp != NULL) {
2463 		size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
2464 		    arena_muzzy_decay_ms_default_get());
2465 		READ(oldval, ssize_t);
2466 	}
2467 	if (newp != NULL) {
2468 		if (newlen != sizeof(ssize_t)) {
2469 			ret = EINVAL;
2470 			goto label_return;
2471 		}
2472 		if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
2473 		    : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
2474 			ret = EFAULT;
2475 			goto label_return;
2476 		}
2477 	}
2478 
2479 	ret = 0;
2480 label_return:
2481 	return ret;
2482 }
2483 
2484 static int
arenas_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2485 arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2486     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2487 	return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2488 	    newlen, true);
2489 }
2490 
2491 static int
arenas_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2492 arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2493     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2494 	return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2495 	    newlen, false);
2496 }
2497 
CTL_RO_NL_GEN(arenas_quantum,QUANTUM,size_t)2498 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
2499 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
2500 CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
2501 CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
2502 CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
2503 CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
2504 CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
2505 CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
2506 CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t)
2507 static const ctl_named_node_t *
2508 arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
2509     size_t miblen, size_t i) {
2510 	if (i > SC_NBINS) {
2511 		return NULL;
2512 	}
2513 	return super_arenas_bin_i_node;
2514 }
2515 
2516 CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
2517 CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
2518     size_t)
2519 static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2520 arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
2521     size_t miblen, size_t i) {
2522 	if (i > SC_NSIZES - SC_NBINS) {
2523 		return NULL;
2524 	}
2525 	return super_arenas_lextent_i_node;
2526 }
2527 
2528 static int
arenas_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2529 arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2530     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2531 	int ret;
2532 	extent_hooks_t *extent_hooks;
2533 	unsigned arena_ind;
2534 
2535 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2536 
2537 	extent_hooks = (extent_hooks_t *)&extent_hooks_default;
2538 	WRITE(extent_hooks, extent_hooks_t *);
2539 	if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
2540 		ret = EAGAIN;
2541 		goto label_return;
2542 	}
2543 	READ(arena_ind, unsigned);
2544 
2545 	ret = 0;
2546 label_return:
2547 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2548 	return ret;
2549 }
2550 
2551 static int
arenas_lookup_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2552 arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
2553     size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2554     size_t newlen) {
2555 	int ret;
2556 	unsigned arena_ind;
2557 	void *ptr;
2558 	extent_t *extent;
2559 	arena_t *arena;
2560 
2561 	ptr = NULL;
2562 	ret = EINVAL;
2563 	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2564 	WRITE(ptr, void *);
2565 	extent = iealloc(tsd_tsdn(tsd), ptr);
2566 	if (extent == NULL)
2567 		goto label_return;
2568 
2569 	arena = extent_arena_get(extent);
2570 	if (arena == NULL)
2571 		goto label_return;
2572 
2573 	arena_ind = arena_ind_get(arena);
2574 	READ(arena_ind, unsigned);
2575 
2576 	ret = 0;
2577 label_return:
2578 	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2579 	return ret;
2580 }
2581 
2582 /******************************************************************************/
2583 
2584 static int
prof_thread_active_init_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2585 prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
2586     size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2587     size_t newlen) {
2588 	int ret;
2589 	bool oldval;
2590 
2591 	if (!config_prof) {
2592 		return ENOENT;
2593 	}
2594 
2595 	if (newp != NULL) {
2596 		if (newlen != sizeof(bool)) {
2597 			ret = EINVAL;
2598 			goto label_return;
2599 		}
2600 		oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
2601 		    *(bool *)newp);
2602 	} else {
2603 		oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
2604 	}
2605 	READ(oldval, bool);
2606 
2607 	ret = 0;
2608 label_return:
2609 	return ret;
2610 }
2611 
2612 static int
prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2613 prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2614     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2615 	int ret;
2616 	bool oldval;
2617 
2618 	if (!config_prof) {
2619 		return ENOENT;
2620 	}
2621 
2622 	if (newp != NULL) {
2623 		if (newlen != sizeof(bool)) {
2624 			ret = EINVAL;
2625 			goto label_return;
2626 		}
2627 		oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
2628 	} else {
2629 		oldval = prof_active_get(tsd_tsdn(tsd));
2630 	}
2631 	READ(oldval, bool);
2632 
2633 	ret = 0;
2634 label_return:
2635 	return ret;
2636 }
2637 
2638 static int
prof_dump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2639 prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2640     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2641 	int ret;
2642 	const char *filename = NULL;
2643 
2644 	if (!config_prof) {
2645 		return ENOENT;
2646 	}
2647 
2648 	WRITEONLY();
2649 	WRITE(filename, const char *);
2650 
2651 	if (prof_mdump(tsd, filename)) {
2652 		ret = EFAULT;
2653 		goto label_return;
2654 	}
2655 
2656 	ret = 0;
2657 label_return:
2658 	return ret;
2659 }
2660 
2661 static int
prof_gdump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2662 prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2663     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2664 	int ret;
2665 	bool oldval;
2666 
2667 	if (!config_prof) {
2668 		return ENOENT;
2669 	}
2670 
2671 	if (newp != NULL) {
2672 		if (newlen != sizeof(bool)) {
2673 			ret = EINVAL;
2674 			goto label_return;
2675 		}
2676 		oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
2677 	} else {
2678 		oldval = prof_gdump_get(tsd_tsdn(tsd));
2679 	}
2680 	READ(oldval, bool);
2681 
2682 	ret = 0;
2683 label_return:
2684 	return ret;
2685 }
2686 
2687 static int
prof_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2688 prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2689     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2690 	int ret;
2691 	size_t lg_sample = lg_prof_sample;
2692 
2693 	if (!config_prof) {
2694 		return ENOENT;
2695 	}
2696 
2697 	WRITEONLY();
2698 	WRITE(lg_sample, size_t);
2699 	if (lg_sample >= (sizeof(uint64_t) << 3)) {
2700 		lg_sample = (sizeof(uint64_t) << 3) - 1;
2701 	}
2702 
2703 	prof_reset(tsd, lg_sample);
2704 
2705 	ret = 0;
2706 label_return:
2707 	return ret;
2708 }
2709 
CTL_RO_NL_CGEN(config_prof,prof_interval,prof_interval,uint64_t)2710 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
2711 CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
2712 
2713 static int
2714 prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2715     size_t *oldlenp, void *newp, size_t newlen) {
2716 	int ret;
2717 
2718 	const char *filename = NULL;
2719 
2720 	if (!config_prof) {
2721 		return ENOENT;
2722 	}
2723 
2724 	WRITEONLY();
2725 	WRITE(filename, const char *);
2726 
2727 	if (prof_log_start(tsd_tsdn(tsd), filename)) {
2728 		ret = EFAULT;
2729 		goto label_return;
2730 	}
2731 
2732 	ret = 0;
2733 label_return:
2734 	return ret;
2735 }
2736 
2737 static int
prof_log_stop_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2738 prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2739     size_t *oldlenp, void *newp, size_t newlen) {
2740 	if (!config_prof) {
2741 		return ENOENT;
2742 	}
2743 
2744 	if (prof_log_stop(tsd_tsdn(tsd))) {
2745 		return EFAULT;
2746 	}
2747 
2748 	return 0;
2749 }
2750 
2751 /******************************************************************************/
2752 
2753 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
2754 CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
2755 CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
2756 CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
2757 CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
2758 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
2759 CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
2760 
2761 CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
2762     ctl_stats->background_thread.num_threads, size_t)
2763 CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
2764     ctl_stats->background_thread.num_runs, uint64_t)
2765 CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
2766     nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
2767 
2768 CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
2769 CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
2770     ssize_t)
2771 CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
2772     ssize_t)
2773 CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
2774 CTL_RO_GEN(stats_arenas_i_uptime,
2775     nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
2776 CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
2777 CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
2778 CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
2779 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
2780     atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
2781     size_t)
2782 CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
2783     atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
2784     size_t)
2785 CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
2786     atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
2787         ATOMIC_RELAXED),
2788     size_t)
2789 
2790 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
2791     ctl_arena_stats_read_u64(
2792     &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
2793 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
2794     ctl_arena_stats_read_u64(
2795     &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
2796 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
2797     ctl_arena_stats_read_u64(
2798     &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
2799 
2800 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
2801     ctl_arena_stats_read_u64(
2802     &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
2803 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
2804     ctl_arena_stats_read_u64(
2805     &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
2806 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
2807     ctl_arena_stats_read_u64(
2808     &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
2809 
2810 CTL_RO_CGEN(config_stats, stats_arenas_i_base,
2811     atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
2812     size_t)
2813 CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
2814     atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
2815     size_t)
2816 CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
2817     atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
2818     ATOMIC_RELAXED), size_t)
2819 CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
2820     atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
2821     ATOMIC_RELAXED), size_t)
2822 CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
2823     atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
2824     size_t)
2825 
2826 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
2827     arenas_i(mib[2])->astats->allocated_small, size_t)
2828 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
2829     arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
2830 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
2831     arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
2832 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
2833     arenas_i(mib[2])->astats->nrequests_small, uint64_t)
2834 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
2835     atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
2836     ATOMIC_RELAXED), size_t)
2837 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
2838     ctl_arena_stats_read_u64(
2839     &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
2840 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
2841     ctl_arena_stats_read_u64(
2842     &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
2843 /*
2844  * Note: "nmalloc" here instead of "nrequests" in the read.  This is intentional.
2845  */
2846 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
2847     ctl_arena_stats_read_u64(
2848     &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */
2849 
2850 /* Lock profiling related APIs below. */
2851 #define RO_MUTEX_CTL_GEN(n, l)						\
2852 CTL_RO_CGEN(config_stats, stats_##n##_num_ops,				\
2853     l.n_lock_ops, uint64_t)						\
2854 CTL_RO_CGEN(config_stats, stats_##n##_num_wait,				\
2855     l.n_wait_times, uint64_t)						\
2856 CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq,			\
2857     l.n_spin_acquired, uint64_t)					\
2858 CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch,			\
2859     l.n_owner_switches, uint64_t) 					\
2860 CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time,			\
2861     nstime_ns(&l.tot_wait_time), uint64_t)				\
2862 CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time,			\
2863     nstime_ns(&l.max_wait_time), uint64_t)				\
2864 CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds,			\
2865     l.max_n_thds, uint32_t)
2866 
2867 /* Global mutexes. */
2868 #define OP(mtx)								\
2869     RO_MUTEX_CTL_GEN(mutexes_##mtx,					\
2870         ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
2871 MUTEX_PROF_GLOBAL_MUTEXES
2872 #undef OP
2873 
2874 /* Per arena mutexes */
2875 #define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx,		\
2876     arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
2877 MUTEX_PROF_ARENA_MUTEXES
2878 #undef OP
2879 
2880 /* tcache bin mutex */
2881 RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
2882     arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
2883 #undef RO_MUTEX_CTL_GEN
2884 
2885 /* Resets all mutex stats, including global, arena and bin mutexes. */
2886 static int
stats_mutexes_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2887 stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
2888     size_t miblen, void *oldp, size_t *oldlenp,
2889     void *newp, size_t newlen) {
2890 	if (!config_stats) {
2891 		return ENOENT;
2892 	}
2893 
2894 	tsdn_t *tsdn = tsd_tsdn(tsd);
2895 
2896 #define MUTEX_PROF_RESET(mtx)						\
2897     malloc_mutex_lock(tsdn, &mtx);					\
2898     malloc_mutex_prof_data_reset(tsdn, &mtx);				\
2899     malloc_mutex_unlock(tsdn, &mtx);
2900 
2901 	/* Global mutexes: ctl and prof. */
2902 	MUTEX_PROF_RESET(ctl_mtx);
2903 	if (have_background_thread) {
2904 		MUTEX_PROF_RESET(background_thread_lock);
2905 	}
2906 	if (config_prof && opt_prof) {
2907 		MUTEX_PROF_RESET(bt2gctx_mtx);
2908 	}
2909 
2910 
2911 	/* Per arena mutexes. */
2912 	unsigned n = narenas_total_get();
2913 
2914 	for (unsigned i = 0; i < n; i++) {
2915 		arena_t *arena = arena_get(tsdn, i, false);
2916 		if (!arena) {
2917 			continue;
2918 		}
2919 		MUTEX_PROF_RESET(arena->large_mtx);
2920 		MUTEX_PROF_RESET(arena->extent_avail_mtx);
2921 		MUTEX_PROF_RESET(arena->extents_dirty.mtx);
2922 		MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
2923 		MUTEX_PROF_RESET(arena->extents_retained.mtx);
2924 		MUTEX_PROF_RESET(arena->decay_dirty.mtx);
2925 		MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
2926 		MUTEX_PROF_RESET(arena->tcache_ql_mtx);
2927 		MUTEX_PROF_RESET(arena->base->mtx);
2928 
2929 		for (szind_t i = 0; i < SC_NBINS; i++) {
2930 			for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2931 				bin_t *bin = &arena->bins[i].bin_shards[j];
2932 				MUTEX_PROF_RESET(bin->lock);
2933 			}
2934 		}
2935 	}
2936 #undef MUTEX_PROF_RESET
2937 	return 0;
2938 }
2939 
2940 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
2941     arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
2942 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
2943     arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
2944 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
2945     arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
2946 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
2947     arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
2948 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
2949     arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
2950 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
2951     arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
2952 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
2953     arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
2954 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
2955     arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
2956 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
2957     arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
2958 
2959 static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)2960 stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
2961     size_t miblen, size_t j) {
2962 	if (j > SC_NBINS) {
2963 		return NULL;
2964 	}
2965 	return super_stats_arenas_i_bins_j_node;
2966 }
2967 
2968 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
2969     ctl_arena_stats_read_u64(
2970     &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
2971 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
2972     ctl_arena_stats_read_u64(
2973     &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
2974 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
2975     ctl_arena_stats_read_u64(
2976     &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
2977 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
2978     arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
2979 
2980 static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)2981 stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
2982     size_t miblen, size_t j) {
2983 	if (j > SC_NSIZES - SC_NBINS) {
2984 		return NULL;
2985 	}
2986 	return super_stats_arenas_i_lextents_j_node;
2987 }
2988 
2989 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
2990     atomic_load_zu(
2991         &arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
2992 	ATOMIC_RELAXED), size_t);
2993 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
2994     atomic_load_zu(
2995         &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
2996 	ATOMIC_RELAXED), size_t);
2997 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
2998     atomic_load_zu(
2999         &arenas_i(mib[2])->astats->estats[mib[4]].nretained,
3000 	ATOMIC_RELAXED), size_t);
3001 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
3002     atomic_load_zu(
3003         &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
3004 	ATOMIC_RELAXED), size_t);
3005 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
3006     atomic_load_zu(
3007         &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
3008 	ATOMIC_RELAXED), size_t);
3009 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
3010     atomic_load_zu(
3011         &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
3012 	ATOMIC_RELAXED), size_t);
3013 
3014 static const ctl_named_node_t *
stats_arenas_i_extents_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)3015 stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
3016     size_t miblen, size_t j) {
3017 	if (j >= SC_NPSIZES) {
3018 		return NULL;
3019 	}
3020 	return super_stats_arenas_i_extents_j_node;
3021 }
3022 
3023 static const ctl_named_node_t *
stats_arenas_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)3024 stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
3025     size_t miblen, size_t i) {
3026 	const ctl_named_node_t *ret;
3027 	size_t a;
3028 
3029 	malloc_mutex_lock(tsdn, &ctl_mtx);
3030 	a = arenas_i2a_impl(i, true, true);
3031 	if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
3032 		ret = NULL;
3033 		goto label_return;
3034 	}
3035 
3036 	ret = super_stats_arenas_i_node;
3037 label_return:
3038 	malloc_mutex_unlock(tsdn, &ctl_mtx);
3039 	return ret;
3040 }
3041 
3042 static int
experimental_hooks_install_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3043 experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3044     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3045 	int ret;
3046 	if (oldp == NULL || oldlenp == NULL|| newp == NULL) {
3047 		ret = EINVAL;
3048 		goto label_return;
3049 	}
3050 	/*
3051 	 * Note: this is a *private* struct.  This is an experimental interface;
3052 	 * forcing the user to know the jemalloc internals well enough to
3053 	 * extract the ABI hopefully ensures nobody gets too comfortable with
3054 	 * this API, which can change at a moment's notice.
3055 	 */
3056 	hooks_t hooks;
3057 	WRITE(hooks, hooks_t);
3058 	void *handle = hook_install(tsd_tsdn(tsd), &hooks);
3059 	if (handle == NULL) {
3060 		ret = EAGAIN;
3061 		goto label_return;
3062 	}
3063 	READ(handle, void *);
3064 
3065 	ret = 0;
3066 label_return:
3067 	return ret;
3068 }
3069 
3070 static int
experimental_hooks_remove_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3071 experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3072     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3073 	int ret;
3074 	WRITEONLY();
3075 	void *handle = NULL;
3076 	WRITE(handle, void *);
3077 	if (handle == NULL) {
3078 		ret = EINVAL;
3079 		goto label_return;
3080 	}
3081 	hook_remove(tsd_tsdn(tsd), handle);
3082 	ret = 0;
3083 label_return:
3084 	return ret;
3085 }
3086