1 #define JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/atomic.h"
7 #include "jemalloc/internal/ctl.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/extent_mmap.h"
10 #include "jemalloc/internal/jemalloc_internal_types.h"
11 #include "jemalloc/internal/log.h"
12 #include "jemalloc/internal/malloc_io.h"
13 #include "jemalloc/internal/mutex.h"
14 #include "jemalloc/internal/rtree.h"
15 #include "jemalloc/internal/size_classes.h"
16 #include "jemalloc/internal/spin.h"
17 #include "jemalloc/internal/sz.h"
18 #include "jemalloc/internal/ticker.h"
19 #include "jemalloc/internal/util.h"
20
21 /******************************************************************************/
22 /* Data. */
23
24 /* Runtime configuration options. */
25 const char *je_malloc_conf
26 #ifndef _WIN32
27 JEMALLOC_ATTR(weak)
28 #endif
29 ;
30 bool opt_abort =
31 #ifdef JEMALLOC_DEBUG
32 true
33 #else
34 false
35 #endif
36 ;
37 bool opt_abort_conf =
38 #ifdef JEMALLOC_DEBUG
39 true
40 #else
41 false
42 #endif
43 ;
44 const char *opt_junk =
45 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
46 "true"
47 #else
48 "false"
49 #endif
50 ;
51 bool opt_junk_alloc =
52 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
53 true
54 #else
55 false
56 #endif
57 ;
58 bool opt_junk_free =
59 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
60 true
61 #else
62 false
63 #endif
64 ;
65
66 bool opt_utrace = false;
67 bool opt_xmalloc = false;
68 bool opt_zero = false;
69 unsigned opt_narenas = 0;
70
71 unsigned ncpus;
72
73 /* Protects arenas initialization. */
74 malloc_mutex_t arenas_lock;
75 /*
76 * Arenas that are used to service external requests. Not all elements of the
77 * arenas array are necessarily used; arenas are created lazily as needed.
78 *
79 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
80 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
81 * takes some action to create them and allocate from them.
82 *
83 * Points to an arena_t.
84 */
85 JEMALLOC_ALIGNED(CACHELINE)
86 atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
87 static atomic_u_t narenas_total; /* Use narenas_total_*(). */
88 static arena_t *a0; /* arenas[0]; read-only after initialization. */
89 unsigned narenas_auto; /* Read-only after initialization. */
90
91 typedef enum {
92 malloc_init_uninitialized = 3,
93 malloc_init_a0_initialized = 2,
94 malloc_init_recursible = 1,
95 malloc_init_initialized = 0 /* Common case --> jnz. */
96 } malloc_init_t;
97 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
98
99 /* False should be the common case. Set to true to trigger initialization. */
100 bool malloc_slow = true;
101
102 /* When malloc_slow is true, set the corresponding bits for sanity check. */
103 enum {
104 flag_opt_junk_alloc = (1U),
105 flag_opt_junk_free = (1U << 1),
106 flag_opt_zero = (1U << 2),
107 flag_opt_utrace = (1U << 3),
108 flag_opt_xmalloc = (1U << 4)
109 };
110 static uint8_t malloc_slow_flags;
111
112 #ifdef JEMALLOC_THREADED_INIT
113 /* Used to let the initializing thread recursively allocate. */
114 # define NO_INITIALIZER ((unsigned long)0)
115 # define INITIALIZER pthread_self()
116 # define IS_INITIALIZER (malloc_initializer == pthread_self())
117 static pthread_t malloc_initializer = NO_INITIALIZER;
118 #else
119 # define NO_INITIALIZER false
120 # define INITIALIZER true
121 # define IS_INITIALIZER malloc_initializer
122 static bool malloc_initializer = NO_INITIALIZER;
123 #endif
124
125 /* Used to avoid initialization races. */
126 #ifdef _WIN32
127 #if _WIN32_WINNT >= 0x0600
128 static malloc_mutex_t init_lock = SRWLOCK_INIT;
129 #else
130 static malloc_mutex_t init_lock;
131 static bool init_lock_initialized = false;
132
JEMALLOC_ATTR(constructor)133 JEMALLOC_ATTR(constructor)
134 static void WINAPI
135 _init_init_lock(void) {
136 /*
137 * If another constructor in the same binary is using mallctl to e.g.
138 * set up extent hooks, it may end up running before this one, and
139 * malloc_init_hard will crash trying to lock the uninitialized lock. So
140 * we force an initialization of the lock in malloc_init_hard as well.
141 * We don't try to care about atomicity of the accessed to the
142 * init_lock_initialized boolean, since it really only matters early in
143 * the process creation, before any separate thread normally starts
144 * doing anything.
145 */
146 if (!init_lock_initialized) {
147 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
148 malloc_mutex_rank_exclusive);
149 }
150 init_lock_initialized = true;
151 }
152
153 #ifdef _MSC_VER
154 # pragma section(".CRT$XCU", read)
155 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
156 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
157 #endif
158 #endif
159 #else
160 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
161 #endif
162
163 typedef struct {
164 void *p; /* Input pointer (as in realloc(p, s)). */
165 size_t s; /* Request size. */
166 void *r; /* Result pointer. */
167 } malloc_utrace_t;
168
169 #ifdef JEMALLOC_UTRACE
170 # define UTRACE(a, b, c) do { \
171 if (unlikely(opt_utrace)) { \
172 int utrace_serrno = errno; \
173 malloc_utrace_t ut; \
174 ut.p = (a); \
175 ut.s = (b); \
176 ut.r = (c); \
177 utrace(&ut, sizeof(ut)); \
178 errno = utrace_serrno; \
179 } \
180 } while (0)
181 #else
182 # define UTRACE(a, b, c)
183 #endif
184
185 /* Whether encountered any invalid config options. */
186 static bool had_conf_error = false;
187
188 /******************************************************************************/
189 /*
190 * Function prototypes for static functions that are referenced prior to
191 * definition.
192 */
193
194 static bool malloc_init_hard_a0(void);
195 static bool malloc_init_hard(void);
196
197 /******************************************************************************/
198 /*
199 * Begin miscellaneous support functions.
200 */
201
202 bool
malloc_initialized(void)203 malloc_initialized(void) {
204 return (malloc_init_state == malloc_init_initialized);
205 }
206
207 JEMALLOC_ALWAYS_INLINE bool
malloc_init_a0(void)208 malloc_init_a0(void) {
209 if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
210 return malloc_init_hard_a0();
211 }
212 return false;
213 }
214
215 JEMALLOC_ALWAYS_INLINE bool
malloc_init(void)216 malloc_init(void) {
217 if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
218 return true;
219 }
220 return false;
221 }
222
223 /*
224 * The a0*() functions are used instead of i{d,}alloc() in situations that
225 * cannot tolerate TLS variable access.
226 */
227
228 static void *
a0ialloc(size_t size,bool zero,bool is_internal)229 a0ialloc(size_t size, bool zero, bool is_internal) {
230 if (unlikely(malloc_init_a0())) {
231 return NULL;
232 }
233
234 return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
235 is_internal, arena_get(TSDN_NULL, 0, true), true);
236 }
237
238 static void
a0idalloc(void * ptr,bool is_internal)239 a0idalloc(void *ptr, bool is_internal) {
240 idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
241 }
242
243 void *
a0malloc(size_t size)244 a0malloc(size_t size) {
245 return a0ialloc(size, false, true);
246 }
247
248 void
a0dalloc(void * ptr)249 a0dalloc(void *ptr) {
250 a0idalloc(ptr, true);
251 }
252
253 /*
254 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
255 * situations that cannot tolerate TLS variable access (TLS allocation and very
256 * early internal data structure initialization).
257 */
258
259 void *
bootstrap_malloc(size_t size)260 bootstrap_malloc(size_t size) {
261 if (unlikely(size == 0)) {
262 size = 1;
263 }
264
265 return a0ialloc(size, false, false);
266 }
267
268 void *
bootstrap_calloc(size_t num,size_t size)269 bootstrap_calloc(size_t num, size_t size) {
270 size_t num_size;
271
272 num_size = num * size;
273 if (unlikely(num_size == 0)) {
274 assert(num == 0 || size == 0);
275 num_size = 1;
276 }
277
278 return a0ialloc(num_size, true, false);
279 }
280
281 void
bootstrap_free(void * ptr)282 bootstrap_free(void *ptr) {
283 if (unlikely(ptr == NULL)) {
284 return;
285 }
286
287 a0idalloc(ptr, false);
288 }
289
290 void
arena_set(unsigned ind,arena_t * arena)291 arena_set(unsigned ind, arena_t *arena) {
292 atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
293 }
294
295 static void
narenas_total_set(unsigned narenas)296 narenas_total_set(unsigned narenas) {
297 atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
298 }
299
300 static void
narenas_total_inc(void)301 narenas_total_inc(void) {
302 atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
303 }
304
305 unsigned
narenas_total_get(void)306 narenas_total_get(void) {
307 return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
308 }
309
310 /* Create a new arena and insert it into the arenas array at index ind. */
311 static arena_t *
arena_init_locked(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)312 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
313 arena_t *arena;
314
315 assert(ind <= narenas_total_get());
316 if (ind >= MALLOCX_ARENA_LIMIT) {
317 return NULL;
318 }
319 if (ind == narenas_total_get()) {
320 narenas_total_inc();
321 }
322
323 /*
324 * Another thread may have already initialized arenas[ind] if it's an
325 * auto arena.
326 */
327 arena = arena_get(tsdn, ind, false);
328 if (arena != NULL) {
329 assert(ind < narenas_auto);
330 return arena;
331 }
332
333 /* Actually initialize the arena. */
334 arena = arena_new(tsdn, ind, extent_hooks);
335
336 return arena;
337 }
338
339 static void
arena_new_create_background_thread(tsdn_t * tsdn,unsigned ind)340 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
341 if (ind == 0) {
342 return;
343 }
344 if (have_background_thread) {
345 bool err;
346 malloc_mutex_lock(tsdn, &background_thread_lock);
347 err = background_thread_create(tsdn_tsd(tsdn), ind);
348 malloc_mutex_unlock(tsdn, &background_thread_lock);
349 if (err) {
350 malloc_printf("<jemalloc>: error in background thread "
351 "creation for arena %u. Abort.\n", ind);
352 abort();
353 }
354 }
355 }
356
357 arena_t *
arena_init(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)358 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
359 arena_t *arena;
360
361 malloc_mutex_lock(tsdn, &arenas_lock);
362 arena = arena_init_locked(tsdn, ind, extent_hooks);
363 malloc_mutex_unlock(tsdn, &arenas_lock);
364
365 arena_new_create_background_thread(tsdn, ind);
366
367 return arena;
368 }
369
370 static void
arena_bind(tsd_t * tsd,unsigned ind,bool internal)371 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
372 arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
373 arena_nthreads_inc(arena, internal);
374
375 if (internal) {
376 tsd_iarena_set(tsd, arena);
377 } else {
378 tsd_arena_set(tsd, arena);
379 }
380 }
381
382 void
arena_migrate(tsd_t * tsd,unsigned oldind,unsigned newind)383 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
384 arena_t *oldarena, *newarena;
385
386 oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
387 newarena = arena_get(tsd_tsdn(tsd), newind, false);
388 arena_nthreads_dec(oldarena, false);
389 arena_nthreads_inc(newarena, false);
390 tsd_arena_set(tsd, newarena);
391 }
392
393 static void
arena_unbind(tsd_t * tsd,unsigned ind,bool internal)394 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
395 arena_t *arena;
396
397 arena = arena_get(tsd_tsdn(tsd), ind, false);
398 arena_nthreads_dec(arena, internal);
399
400 if (internal) {
401 tsd_iarena_set(tsd, NULL);
402 } else {
403 tsd_arena_set(tsd, NULL);
404 }
405 }
406
407 arena_tdata_t *
arena_tdata_get_hard(tsd_t * tsd,unsigned ind)408 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
409 arena_tdata_t *tdata, *arenas_tdata_old;
410 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
411 unsigned narenas_tdata_old, i;
412 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
413 unsigned narenas_actual = narenas_total_get();
414
415 /*
416 * Dissociate old tdata array (and set up for deallocation upon return)
417 * if it's too small.
418 */
419 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
420 arenas_tdata_old = arenas_tdata;
421 narenas_tdata_old = narenas_tdata;
422 arenas_tdata = NULL;
423 narenas_tdata = 0;
424 tsd_arenas_tdata_set(tsd, arenas_tdata);
425 tsd_narenas_tdata_set(tsd, narenas_tdata);
426 } else {
427 arenas_tdata_old = NULL;
428 narenas_tdata_old = 0;
429 }
430
431 /* Allocate tdata array if it's missing. */
432 if (arenas_tdata == NULL) {
433 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
434 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
435
436 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
437 *arenas_tdata_bypassp = true;
438 arenas_tdata = (arena_tdata_t *)a0malloc(
439 sizeof(arena_tdata_t) * narenas_tdata);
440 *arenas_tdata_bypassp = false;
441 }
442 if (arenas_tdata == NULL) {
443 tdata = NULL;
444 goto label_return;
445 }
446 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
447 tsd_arenas_tdata_set(tsd, arenas_tdata);
448 tsd_narenas_tdata_set(tsd, narenas_tdata);
449 }
450
451 /*
452 * Copy to tdata array. It's possible that the actual number of arenas
453 * has increased since narenas_total_get() was called above, but that
454 * causes no correctness issues unless two threads concurrently execute
455 * the arenas.create mallctl, which we trust mallctl synchronization to
456 * prevent.
457 */
458
459 /* Copy/initialize tickers. */
460 for (i = 0; i < narenas_actual; i++) {
461 if (i < narenas_tdata_old) {
462 ticker_copy(&arenas_tdata[i].decay_ticker,
463 &arenas_tdata_old[i].decay_ticker);
464 } else {
465 ticker_init(&arenas_tdata[i].decay_ticker,
466 DECAY_NTICKS_PER_UPDATE);
467 }
468 }
469 if (narenas_tdata > narenas_actual) {
470 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
471 * (narenas_tdata - narenas_actual));
472 }
473
474 /* Read the refreshed tdata array. */
475 tdata = &arenas_tdata[ind];
476 label_return:
477 if (arenas_tdata_old != NULL) {
478 a0dalloc(arenas_tdata_old);
479 }
480 return tdata;
481 }
482
483 /* Slow path, called only by arena_choose(). */
484 arena_t *
arena_choose_hard(tsd_t * tsd,bool internal)485 arena_choose_hard(tsd_t *tsd, bool internal) {
486 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
487
488 if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
489 unsigned choose = percpu_arena_choose();
490 ret = arena_get(tsd_tsdn(tsd), choose, true);
491 assert(ret != NULL);
492 arena_bind(tsd, arena_ind_get(ret), false);
493 arena_bind(tsd, arena_ind_get(ret), true);
494
495 return ret;
496 }
497
498 if (narenas_auto > 1) {
499 unsigned i, j, choose[2], first_null;
500 bool is_new_arena[2];
501
502 /*
503 * Determine binding for both non-internal and internal
504 * allocation.
505 *
506 * choose[0]: For application allocation.
507 * choose[1]: For internal metadata allocation.
508 */
509
510 for (j = 0; j < 2; j++) {
511 choose[j] = 0;
512 is_new_arena[j] = false;
513 }
514
515 first_null = narenas_auto;
516 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
517 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
518 for (i = 1; i < narenas_auto; i++) {
519 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
520 /*
521 * Choose the first arena that has the lowest
522 * number of threads assigned to it.
523 */
524 for (j = 0; j < 2; j++) {
525 if (arena_nthreads_get(arena_get(
526 tsd_tsdn(tsd), i, false), !!j) <
527 arena_nthreads_get(arena_get(
528 tsd_tsdn(tsd), choose[j], false),
529 !!j)) {
530 choose[j] = i;
531 }
532 }
533 } else if (first_null == narenas_auto) {
534 /*
535 * Record the index of the first uninitialized
536 * arena, in case all extant arenas are in use.
537 *
538 * NB: It is possible for there to be
539 * discontinuities in terms of initialized
540 * versus uninitialized arenas, due to the
541 * "thread.arena" mallctl.
542 */
543 first_null = i;
544 }
545 }
546
547 for (j = 0; j < 2; j++) {
548 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
549 choose[j], false), !!j) == 0 || first_null ==
550 narenas_auto) {
551 /*
552 * Use an unloaded arena, or the least loaded
553 * arena if all arenas are already initialized.
554 */
555 if (!!j == internal) {
556 ret = arena_get(tsd_tsdn(tsd),
557 choose[j], false);
558 }
559 } else {
560 arena_t *arena;
561
562 /* Initialize a new arena. */
563 choose[j] = first_null;
564 arena = arena_init_locked(tsd_tsdn(tsd),
565 choose[j],
566 (extent_hooks_t *)&extent_hooks_default);
567 if (arena == NULL) {
568 malloc_mutex_unlock(tsd_tsdn(tsd),
569 &arenas_lock);
570 return NULL;
571 }
572 is_new_arena[j] = true;
573 if (!!j == internal) {
574 ret = arena;
575 }
576 }
577 arena_bind(tsd, choose[j], !!j);
578 }
579 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
580
581 for (j = 0; j < 2; j++) {
582 if (is_new_arena[j]) {
583 assert(choose[j] > 0);
584 arena_new_create_background_thread(
585 tsd_tsdn(tsd), choose[j]);
586 }
587 }
588
589 } else {
590 ret = arena_get(tsd_tsdn(tsd), 0, false);
591 arena_bind(tsd, 0, false);
592 arena_bind(tsd, 0, true);
593 }
594
595 return ret;
596 }
597
598 void
iarena_cleanup(tsd_t * tsd)599 iarena_cleanup(tsd_t *tsd) {
600 arena_t *iarena;
601
602 iarena = tsd_iarena_get(tsd);
603 if (iarena != NULL) {
604 arena_unbind(tsd, arena_ind_get(iarena), true);
605 }
606 }
607
608 void
arena_cleanup(tsd_t * tsd)609 arena_cleanup(tsd_t *tsd) {
610 arena_t *arena;
611
612 arena = tsd_arena_get(tsd);
613 if (arena != NULL) {
614 arena_unbind(tsd, arena_ind_get(arena), false);
615 }
616 }
617
618 void
arenas_tdata_cleanup(tsd_t * tsd)619 arenas_tdata_cleanup(tsd_t *tsd) {
620 arena_tdata_t *arenas_tdata;
621
622 /* Prevent tsd->arenas_tdata from being (re)created. */
623 *tsd_arenas_tdata_bypassp_get(tsd) = true;
624
625 arenas_tdata = tsd_arenas_tdata_get(tsd);
626 if (arenas_tdata != NULL) {
627 tsd_arenas_tdata_set(tsd, NULL);
628 a0dalloc(arenas_tdata);
629 }
630 }
631
632 static void
stats_print_atexit(void)633 stats_print_atexit(void) {
634 if (config_stats) {
635 tsdn_t *tsdn;
636 unsigned narenas, i;
637
638 tsdn = tsdn_fetch();
639
640 /*
641 * Merge stats from extant threads. This is racy, since
642 * individual threads do not lock when recording tcache stats
643 * events. As a consequence, the final stats may be slightly
644 * out of date by the time they are reported, if other threads
645 * continue to allocate.
646 */
647 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
648 arena_t *arena = arena_get(tsdn, i, false);
649 if (arena != NULL) {
650 tcache_t *tcache;
651
652 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
653 ql_foreach(tcache, &arena->tcache_ql, link) {
654 tcache_stats_merge(tsdn, tcache, arena);
655 }
656 malloc_mutex_unlock(tsdn,
657 &arena->tcache_ql_mtx);
658 }
659 }
660 }
661 je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
662 }
663
664 /*
665 * Ensure that we don't hold any locks upon entry to or exit from allocator
666 * code (in a "broad" sense that doesn't count a reentrant allocation as an
667 * entrance or exit).
668 */
669 JEMALLOC_ALWAYS_INLINE void
check_entry_exit_locking(tsdn_t * tsdn)670 check_entry_exit_locking(tsdn_t *tsdn) {
671 if (!config_debug) {
672 return;
673 }
674 if (tsdn_null(tsdn)) {
675 return;
676 }
677 tsd_t *tsd = tsdn_tsd(tsdn);
678 /*
679 * It's possible we hold locks at entry/exit if we're in a nested
680 * allocation.
681 */
682 int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
683 if (reentrancy_level != 0) {
684 return;
685 }
686 witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
687 }
688
689 /*
690 * End miscellaneous support functions.
691 */
692 /******************************************************************************/
693 /*
694 * Begin initialization functions.
695 */
696
697 static char *
jemalloc_secure_getenv(const char * name)698 jemalloc_secure_getenv(const char *name) {
699 #ifdef JEMALLOC_HAVE_SECURE_GETENV
700 return secure_getenv(name);
701 #else
702 # ifdef JEMALLOC_HAVE_ISSETUGID
703 if (issetugid() != 0) {
704 return NULL;
705 }
706 # endif
707 return getenv(name);
708 #endif
709 }
710
711 static unsigned
malloc_ncpus(void)712 malloc_ncpus(void) {
713 long result;
714
715 #ifdef _WIN32
716 SYSTEM_INFO si;
717 GetSystemInfo(&si);
718 result = si.dwNumberOfProcessors;
719 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
720 /*
721 * glibc >= 2.6 has the CPU_COUNT macro.
722 *
723 * glibc's sysconf() uses isspace(). glibc allocates for the first time
724 * *before* setting up the isspace tables. Therefore we need a
725 * different method to get the number of CPUs.
726 */
727 {
728 cpu_set_t set;
729
730 pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
731 result = CPU_COUNT(&set);
732 }
733 #else
734 result = sysconf(_SC_NPROCESSORS_ONLN);
735 #endif
736 return ((result == -1) ? 1 : (unsigned)result);
737 }
738
739 static void
init_opt_stats_print_opts(const char * v,size_t vlen)740 init_opt_stats_print_opts(const char *v, size_t vlen) {
741 size_t opts_len = strlen(opt_stats_print_opts);
742 assert(opts_len <= stats_print_tot_num_options);
743
744 for (size_t i = 0; i < vlen; i++) {
745 switch (v[i]) {
746 #define OPTION(o, v, d, s) case o: break;
747 STATS_PRINT_OPTIONS
748 #undef OPTION
749 default: continue;
750 }
751
752 if (strchr(opt_stats_print_opts, v[i]) != NULL) {
753 /* Ignore repeated. */
754 continue;
755 }
756
757 opt_stats_print_opts[opts_len++] = v[i];
758 opt_stats_print_opts[opts_len] = '\0';
759 assert(opts_len <= stats_print_tot_num_options);
760 }
761 assert(opts_len == strlen(opt_stats_print_opts));
762 }
763
764 static bool
malloc_conf_next(char const ** opts_p,char const ** k_p,size_t * klen_p,char const ** v_p,size_t * vlen_p)765 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
766 char const **v_p, size_t *vlen_p) {
767 bool accept;
768 const char *opts = *opts_p;
769
770 *k_p = opts;
771
772 for (accept = false; !accept;) {
773 switch (*opts) {
774 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
775 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
776 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
777 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
778 case 'Y': case 'Z':
779 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
780 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
781 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
782 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
783 case 'y': case 'z':
784 case '0': case '1': case '2': case '3': case '4': case '5':
785 case '6': case '7': case '8': case '9':
786 case '_':
787 opts++;
788 break;
789 case ':':
790 opts++;
791 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
792 *v_p = opts;
793 accept = true;
794 break;
795 case '\0':
796 if (opts != *opts_p) {
797 malloc_write("<jemalloc>: Conf string ends "
798 "with key\n");
799 }
800 return true;
801 default:
802 malloc_write("<jemalloc>: Malformed conf string\n");
803 return true;
804 }
805 }
806
807 for (accept = false; !accept;) {
808 switch (*opts) {
809 case ',':
810 opts++;
811 /*
812 * Look ahead one character here, because the next time
813 * this function is called, it will assume that end of
814 * input has been cleanly reached if no input remains,
815 * but we have optimistically already consumed the
816 * comma if one exists.
817 */
818 if (*opts == '\0') {
819 malloc_write("<jemalloc>: Conf string ends "
820 "with comma\n");
821 }
822 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
823 accept = true;
824 break;
825 case '\0':
826 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
827 accept = true;
828 break;
829 default:
830 opts++;
831 break;
832 }
833 }
834
835 *opts_p = opts;
836 return false;
837 }
838
839 static void
malloc_abort_invalid_conf(void)840 malloc_abort_invalid_conf(void) {
841 assert(opt_abort_conf);
842 malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
843 "value (see above).\n");
844 abort();
845 }
846
847 static void
malloc_conf_error(const char * msg,const char * k,size_t klen,const char * v,size_t vlen)848 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
849 size_t vlen) {
850 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
851 (int)vlen, v);
852 /* If abort_conf is set, error out after processing all options. */
853 had_conf_error = true;
854 }
855
856 static void
malloc_slow_flag_init(void)857 malloc_slow_flag_init(void) {
858 /*
859 * Combine the runtime options into malloc_slow for fast path. Called
860 * after processing all the options.
861 */
862 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
863 | (opt_junk_free ? flag_opt_junk_free : 0)
864 | (opt_zero ? flag_opt_zero : 0)
865 | (opt_utrace ? flag_opt_utrace : 0)
866 | (opt_xmalloc ? flag_opt_xmalloc : 0);
867
868 malloc_slow = (malloc_slow_flags != 0);
869 }
870
871 static void
malloc_conf_init(void)872 malloc_conf_init(void) {
873 unsigned i;
874 char buf[PATH_MAX + 1];
875 const char *opts, *k, *v;
876 size_t klen, vlen;
877
878 for (i = 0; i < 4; i++) {
879 /* Get runtime configuration. */
880 switch (i) {
881 case 0:
882 opts = config_malloc_conf;
883 break;
884 case 1:
885 if (je_malloc_conf != NULL) {
886 /*
887 * Use options that were compiled into the
888 * program.
889 */
890 opts = je_malloc_conf;
891 } else {
892 /* No configuration specified. */
893 buf[0] = '\0';
894 opts = buf;
895 }
896 break;
897 case 2: {
898 ssize_t linklen = 0;
899 #ifndef _WIN32
900 int saved_errno = errno;
901 const char *linkname =
902 # ifdef JEMALLOC_PREFIX
903 "/etc/"JEMALLOC_PREFIX"malloc.conf"
904 # else
905 "/etc/malloc.conf"
906 # endif
907 ;
908
909 /*
910 * Try to use the contents of the "/etc/malloc.conf"
911 * symbolic link's name.
912 */
913 linklen = readlink(linkname, buf, sizeof(buf) - 1);
914 if (linklen == -1) {
915 /* No configuration specified. */
916 linklen = 0;
917 /* Restore errno. */
918 set_errno(saved_errno);
919 }
920 #endif
921 buf[linklen] = '\0';
922 opts = buf;
923 break;
924 } case 3: {
925 const char *envname =
926 #ifdef JEMALLOC_PREFIX
927 JEMALLOC_CPREFIX"MALLOC_CONF"
928 #else
929 "MALLOC_CONF"
930 #endif
931 ;
932
933 if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
934 /*
935 * Do nothing; opts is already initialized to
936 * the value of the MALLOC_CONF environment
937 * variable.
938 */
939 } else {
940 /* No configuration specified. */
941 buf[0] = '\0';
942 opts = buf;
943 }
944 break;
945 } default:
946 not_reached();
947 buf[0] = '\0';
948 opts = buf;
949 }
950
951 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
952 &vlen)) {
953 #define CONF_MATCH(n) \
954 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
955 #define CONF_MATCH_VALUE(n) \
956 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
957 #define CONF_HANDLE_BOOL(o, n) \
958 if (CONF_MATCH(n)) { \
959 if (CONF_MATCH_VALUE("true")) { \
960 o = true; \
961 } else if (CONF_MATCH_VALUE("false")) { \
962 o = false; \
963 } else { \
964 malloc_conf_error( \
965 "Invalid conf value", \
966 k, klen, v, vlen); \
967 } \
968 continue; \
969 }
970 #define CONF_MIN_no(um, min) false
971 #define CONF_MIN_yes(um, min) ((um) < (min))
972 #define CONF_MAX_no(um, max) false
973 #define CONF_MAX_yes(um, max) ((um) > (max))
974 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
975 if (CONF_MATCH(n)) { \
976 uintmax_t um; \
977 char *end; \
978 \
979 set_errno(0); \
980 um = malloc_strtoumax(v, &end, 0); \
981 if (get_errno() != 0 || (uintptr_t)end -\
982 (uintptr_t)v != vlen) { \
983 malloc_conf_error( \
984 "Invalid conf value", \
985 k, klen, v, vlen); \
986 } else if (clip) { \
987 if (CONF_MIN_##check_min(um, \
988 (t)(min))) { \
989 o = (t)(min); \
990 } else if ( \
991 CONF_MAX_##check_max(um, \
992 (t)(max))) { \
993 o = (t)(max); \
994 } else { \
995 o = (t)um; \
996 } \
997 } else { \
998 if (CONF_MIN_##check_min(um, \
999 (t)(min)) || \
1000 CONF_MAX_##check_max(um, \
1001 (t)(max))) { \
1002 malloc_conf_error( \
1003 "Out-of-range " \
1004 "conf value", \
1005 k, klen, v, vlen); \
1006 } else { \
1007 o = (t)um; \
1008 } \
1009 } \
1010 continue; \
1011 }
1012 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
1013 clip) \
1014 CONF_HANDLE_T_U(unsigned, o, n, min, max, \
1015 check_min, check_max, clip)
1016 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
1017 CONF_HANDLE_T_U(size_t, o, n, min, max, \
1018 check_min, check_max, clip)
1019 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1020 if (CONF_MATCH(n)) { \
1021 long l; \
1022 char *end; \
1023 \
1024 set_errno(0); \
1025 l = strtol(v, &end, 0); \
1026 if (get_errno() != 0 || (uintptr_t)end -\
1027 (uintptr_t)v != vlen) { \
1028 malloc_conf_error( \
1029 "Invalid conf value", \
1030 k, klen, v, vlen); \
1031 } else if (l < (ssize_t)(min) || l > \
1032 (ssize_t)(max)) { \
1033 malloc_conf_error( \
1034 "Out-of-range conf value", \
1035 k, klen, v, vlen); \
1036 } else { \
1037 o = l; \
1038 } \
1039 continue; \
1040 }
1041 #define CONF_HANDLE_CHAR_P(o, n, d) \
1042 if (CONF_MATCH(n)) { \
1043 size_t cpylen = (vlen <= \
1044 sizeof(o)-1) ? vlen : \
1045 sizeof(o)-1; \
1046 strncpy(o, v, cpylen); \
1047 o[cpylen] = '\0'; \
1048 continue; \
1049 }
1050
1051 CONF_HANDLE_BOOL(opt_abort, "abort")
1052 CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1053 if (strncmp("metadata_thp", k, klen) == 0) {
1054 int i;
1055 bool match = false;
1056 for (i = 0; i < metadata_thp_mode_limit; i++) {
1057 if (strncmp(metadata_thp_mode_names[i],
1058 v, vlen) == 0) {
1059 opt_metadata_thp = i;
1060 match = true;
1061 break;
1062 }
1063 }
1064 if (!match) {
1065 malloc_conf_error("Invalid conf value",
1066 k, klen, v, vlen);
1067 }
1068 continue;
1069 }
1070 CONF_HANDLE_BOOL(opt_retain, "retain")
1071 if (strncmp("dss", k, klen) == 0) {
1072 int i;
1073 bool match = false;
1074 for (i = 0; i < dss_prec_limit; i++) {
1075 if (strncmp(dss_prec_names[i], v, vlen)
1076 == 0) {
1077 if (extent_dss_prec_set(i)) {
1078 malloc_conf_error(
1079 "Error setting dss",
1080 k, klen, v, vlen);
1081 } else {
1082 opt_dss =
1083 dss_prec_names[i];
1084 match = true;
1085 break;
1086 }
1087 }
1088 }
1089 if (!match) {
1090 malloc_conf_error("Invalid conf value",
1091 k, klen, v, vlen);
1092 }
1093 continue;
1094 }
1095 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1096 UINT_MAX, yes, no, false)
1097 CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1098 "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1099 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1100 SSIZE_MAX);
1101 CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1102 "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1103 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1104 SSIZE_MAX);
1105 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1106 if (CONF_MATCH("stats_print_opts")) {
1107 init_opt_stats_print_opts(v, vlen);
1108 continue;
1109 }
1110 if (config_fill) {
1111 if (CONF_MATCH("junk")) {
1112 if (CONF_MATCH_VALUE("true")) {
1113 opt_junk = "true";
1114 opt_junk_alloc = opt_junk_free =
1115 true;
1116 } else if (CONF_MATCH_VALUE("false")) {
1117 opt_junk = "false";
1118 opt_junk_alloc = opt_junk_free =
1119 false;
1120 } else if (CONF_MATCH_VALUE("alloc")) {
1121 opt_junk = "alloc";
1122 opt_junk_alloc = true;
1123 opt_junk_free = false;
1124 } else if (CONF_MATCH_VALUE("free")) {
1125 opt_junk = "free";
1126 opt_junk_alloc = false;
1127 opt_junk_free = true;
1128 } else {
1129 malloc_conf_error(
1130 "Invalid conf value", k,
1131 klen, v, vlen);
1132 }
1133 continue;
1134 }
1135 CONF_HANDLE_BOOL(opt_zero, "zero")
1136 }
1137 if (config_utrace) {
1138 CONF_HANDLE_BOOL(opt_utrace, "utrace")
1139 }
1140 if (config_xmalloc) {
1141 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1142 }
1143 CONF_HANDLE_BOOL(opt_tcache, "tcache")
1144 CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1145 "lg_extent_max_active_fit", 0,
1146 (sizeof(size_t) << 3), yes, yes, false)
1147 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1148 -1, (sizeof(size_t) << 3) - 1)
1149 if (strncmp("percpu_arena", k, klen) == 0) {
1150 bool match = false;
1151 for (int i = percpu_arena_mode_names_base; i <
1152 percpu_arena_mode_names_limit; i++) {
1153 if (strncmp(percpu_arena_mode_names[i],
1154 v, vlen) == 0) {
1155 if (!have_percpu_arena) {
1156 malloc_conf_error(
1157 "No getcpu support",
1158 k, klen, v, vlen);
1159 }
1160 opt_percpu_arena = i;
1161 match = true;
1162 break;
1163 }
1164 }
1165 if (!match) {
1166 malloc_conf_error("Invalid conf value",
1167 k, klen, v, vlen);
1168 }
1169 continue;
1170 }
1171 CONF_HANDLE_BOOL(opt_background_thread,
1172 "background_thread");
1173 CONF_HANDLE_SIZE_T(opt_max_background_threads,
1174 "max_background_threads", 1,
1175 opt_max_background_threads, yes, yes,
1176 true);
1177 if (config_prof) {
1178 CONF_HANDLE_BOOL(opt_prof, "prof")
1179 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1180 "prof_prefix", "jeprof")
1181 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1182 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1183 "prof_thread_active_init")
1184 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1185 "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1186 - 1, no, yes, true)
1187 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1188 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1189 "lg_prof_interval", -1,
1190 (sizeof(uint64_t) << 3) - 1)
1191 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1192 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1193 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1194 }
1195 if (config_log) {
1196 if (CONF_MATCH("log")) {
1197 size_t cpylen = (
1198 vlen <= sizeof(log_var_names) ?
1199 vlen : sizeof(log_var_names) - 1);
1200 strncpy(log_var_names, v, cpylen);
1201 log_var_names[cpylen] = '\0';
1202 continue;
1203 }
1204 }
1205 if (CONF_MATCH("thp")) {
1206 bool match = false;
1207 for (int i = 0; i < thp_mode_names_limit; i++) {
1208 if (strncmp(thp_mode_names[i],v, vlen)
1209 == 0) {
1210 if (!have_madvise_huge) {
1211 malloc_conf_error(
1212 "No THP support",
1213 k, klen, v, vlen);
1214 }
1215 opt_thp = i;
1216 match = true;
1217 break;
1218 }
1219 }
1220 if (!match) {
1221 malloc_conf_error("Invalid conf value",
1222 k, klen, v, vlen);
1223 }
1224 continue;
1225 }
1226 malloc_conf_error("Invalid conf pair", k, klen, v,
1227 vlen);
1228 #undef CONF_MATCH
1229 #undef CONF_MATCH_VALUE
1230 #undef CONF_HANDLE_BOOL
1231 #undef CONF_MIN_no
1232 #undef CONF_MIN_yes
1233 #undef CONF_MAX_no
1234 #undef CONF_MAX_yes
1235 #undef CONF_HANDLE_T_U
1236 #undef CONF_HANDLE_UNSIGNED
1237 #undef CONF_HANDLE_SIZE_T
1238 #undef CONF_HANDLE_SSIZE_T
1239 #undef CONF_HANDLE_CHAR_P
1240 }
1241 if (opt_abort_conf && had_conf_error) {
1242 malloc_abort_invalid_conf();
1243 }
1244 }
1245 atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1246 }
1247
1248 static bool
malloc_init_hard_needed(void)1249 malloc_init_hard_needed(void) {
1250 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1251 malloc_init_recursible)) {
1252 /*
1253 * Another thread initialized the allocator before this one
1254 * acquired init_lock, or this thread is the initializing
1255 * thread, and it is recursively allocating.
1256 */
1257 return false;
1258 }
1259 #ifdef JEMALLOC_THREADED_INIT
1260 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1261 /* Busy-wait until the initializing thread completes. */
1262 spin_t spinner = SPIN_INITIALIZER;
1263 do {
1264 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1265 spin_adaptive(&spinner);
1266 malloc_mutex_lock(TSDN_NULL, &init_lock);
1267 } while (!malloc_initialized());
1268 return false;
1269 }
1270 #endif
1271 return true;
1272 }
1273
1274 static bool
malloc_init_hard_a0_locked()1275 malloc_init_hard_a0_locked() {
1276 malloc_initializer = INITIALIZER;
1277
1278 if (config_prof) {
1279 prof_boot0();
1280 }
1281 malloc_conf_init();
1282 if (opt_stats_print) {
1283 /* Print statistics at exit. */
1284 if (atexit(stats_print_atexit) != 0) {
1285 malloc_write("<jemalloc>: Error in atexit()\n");
1286 if (opt_abort) {
1287 abort();
1288 }
1289 }
1290 }
1291 if (pages_boot()) {
1292 return true;
1293 }
1294 if (base_boot(TSDN_NULL)) {
1295 return true;
1296 }
1297 if (extent_boot()) {
1298 return true;
1299 }
1300 if (ctl_boot()) {
1301 return true;
1302 }
1303 if (config_prof) {
1304 prof_boot1();
1305 }
1306 arena_boot();
1307 if (tcache_boot(TSDN_NULL)) {
1308 return true;
1309 }
1310 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1311 malloc_mutex_rank_exclusive)) {
1312 return true;
1313 }
1314 /*
1315 * Create enough scaffolding to allow recursive allocation in
1316 * malloc_ncpus().
1317 */
1318 narenas_auto = 1;
1319 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1320 /*
1321 * Initialize one arena here. The rest are lazily created in
1322 * arena_choose_hard().
1323 */
1324 if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
1325 == NULL) {
1326 return true;
1327 }
1328 a0 = arena_get(TSDN_NULL, 0, false);
1329 malloc_init_state = malloc_init_a0_initialized;
1330
1331 return false;
1332 }
1333
1334 static bool
malloc_init_hard_a0(void)1335 malloc_init_hard_a0(void) {
1336 bool ret;
1337
1338 malloc_mutex_lock(TSDN_NULL, &init_lock);
1339 ret = malloc_init_hard_a0_locked();
1340 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1341 return ret;
1342 }
1343
1344 /* Initialize data structures which may trigger recursive allocation. */
1345 static bool
malloc_init_hard_recursible(void)1346 malloc_init_hard_recursible(void) {
1347 malloc_init_state = malloc_init_recursible;
1348
1349 ncpus = malloc_ncpus();
1350
1351 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1352 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1353 !defined(__native_client__))
1354 /* LinuxThreads' pthread_atfork() allocates. */
1355 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1356 jemalloc_postfork_child) != 0) {
1357 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1358 if (opt_abort) {
1359 abort();
1360 }
1361 return true;
1362 }
1363 #endif
1364
1365 if (background_thread_boot0()) {
1366 return true;
1367 }
1368
1369 return false;
1370 }
1371
1372 static unsigned
malloc_narenas_default(void)1373 malloc_narenas_default(void) {
1374 assert(ncpus > 0);
1375 /*
1376 * For SMP systems, create more than one arena per CPU by
1377 * default.
1378 */
1379 if (ncpus > 1) {
1380 return ncpus << 2;
1381 } else {
1382 return 1;
1383 }
1384 }
1385
1386 static percpu_arena_mode_t
percpu_arena_as_initialized(percpu_arena_mode_t mode)1387 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1388 assert(!malloc_initialized());
1389 assert(mode <= percpu_arena_disabled);
1390
1391 if (mode != percpu_arena_disabled) {
1392 mode += percpu_arena_mode_enabled_base;
1393 }
1394
1395 return mode;
1396 }
1397
1398 static bool
malloc_init_narenas(void)1399 malloc_init_narenas(void) {
1400 assert(ncpus > 0);
1401
1402 if (opt_percpu_arena != percpu_arena_disabled) {
1403 if (!have_percpu_arena || malloc_getcpu() < 0) {
1404 opt_percpu_arena = percpu_arena_disabled;
1405 malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1406 "available. Setting narenas to %u.\n", opt_narenas ?
1407 opt_narenas : malloc_narenas_default());
1408 if (opt_abort) {
1409 abort();
1410 }
1411 } else {
1412 if (ncpus >= MALLOCX_ARENA_LIMIT) {
1413 malloc_printf("<jemalloc>: narenas w/ percpu"
1414 "arena beyond limit (%d)\n", ncpus);
1415 if (opt_abort) {
1416 abort();
1417 }
1418 return true;
1419 }
1420 /* NB: opt_percpu_arena isn't fully initialized yet. */
1421 if (percpu_arena_as_initialized(opt_percpu_arena) ==
1422 per_phycpu_arena && ncpus % 2 != 0) {
1423 malloc_printf("<jemalloc>: invalid "
1424 "configuration -- per physical CPU arena "
1425 "with odd number (%u) of CPUs (no hyper "
1426 "threading?).\n", ncpus);
1427 if (opt_abort)
1428 abort();
1429 }
1430 unsigned n = percpu_arena_ind_limit(
1431 percpu_arena_as_initialized(opt_percpu_arena));
1432 if (opt_narenas < n) {
1433 /*
1434 * If narenas is specified with percpu_arena
1435 * enabled, actual narenas is set as the greater
1436 * of the two. percpu_arena_choose will be free
1437 * to use any of the arenas based on CPU
1438 * id. This is conservative (at a small cost)
1439 * but ensures correctness.
1440 *
1441 * If for some reason the ncpus determined at
1442 * boot is not the actual number (e.g. because
1443 * of affinity setting from numactl), reserving
1444 * narenas this way provides a workaround for
1445 * percpu_arena.
1446 */
1447 opt_narenas = n;
1448 }
1449 }
1450 }
1451 if (opt_narenas == 0) {
1452 opt_narenas = malloc_narenas_default();
1453 }
1454 assert(opt_narenas > 0);
1455
1456 narenas_auto = opt_narenas;
1457 /*
1458 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1459 */
1460 if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1461 narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1462 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1463 narenas_auto);
1464 }
1465 narenas_total_set(narenas_auto);
1466
1467 return false;
1468 }
1469
1470 static void
malloc_init_percpu(void)1471 malloc_init_percpu(void) {
1472 opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1473 }
1474
1475 static bool
malloc_init_hard_finish(void)1476 malloc_init_hard_finish(void) {
1477 if (malloc_mutex_boot()) {
1478 return true;
1479 }
1480
1481 malloc_init_state = malloc_init_initialized;
1482 malloc_slow_flag_init();
1483
1484 return false;
1485 }
1486
1487 static void
malloc_init_hard_cleanup(tsdn_t * tsdn,bool reentrancy_set)1488 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1489 malloc_mutex_assert_owner(tsdn, &init_lock);
1490 malloc_mutex_unlock(tsdn, &init_lock);
1491 if (reentrancy_set) {
1492 assert(!tsdn_null(tsdn));
1493 tsd_t *tsd = tsdn_tsd(tsdn);
1494 assert(tsd_reentrancy_level_get(tsd) > 0);
1495 post_reentrancy(tsd);
1496 }
1497 }
1498
1499 static bool
malloc_init_hard(void)1500 malloc_init_hard(void) {
1501 tsd_t *tsd;
1502
1503 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1504 _init_init_lock();
1505 #endif
1506 malloc_mutex_lock(TSDN_NULL, &init_lock);
1507
1508 #define UNLOCK_RETURN(tsdn, ret, reentrancy) \
1509 malloc_init_hard_cleanup(tsdn, reentrancy); \
1510 return ret;
1511
1512 if (!malloc_init_hard_needed()) {
1513 UNLOCK_RETURN(TSDN_NULL, false, false)
1514 }
1515
1516 if (malloc_init_state != malloc_init_a0_initialized &&
1517 malloc_init_hard_a0_locked()) {
1518 UNLOCK_RETURN(TSDN_NULL, true, false)
1519 }
1520
1521 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1522 /* Recursive allocation relies on functional tsd. */
1523 tsd = malloc_tsd_boot0();
1524 if (tsd == NULL) {
1525 return true;
1526 }
1527 if (malloc_init_hard_recursible()) {
1528 return true;
1529 }
1530
1531 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1532 /* Set reentrancy level to 1 during init. */
1533 pre_reentrancy(tsd, NULL);
1534 /* Initialize narenas before prof_boot2 (for allocation). */
1535 if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1536 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1537 }
1538 if (config_prof && prof_boot2(tsd)) {
1539 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1540 }
1541
1542 malloc_init_percpu();
1543
1544 if (malloc_init_hard_finish()) {
1545 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1546 }
1547 post_reentrancy(tsd);
1548 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1549
1550 witness_assert_lockless(witness_tsd_tsdn(
1551 tsd_witness_tsdp_get_unsafe(tsd)));
1552 malloc_tsd_boot1();
1553 /* Update TSD after tsd_boot1. */
1554 tsd = tsd_fetch();
1555 if (opt_background_thread) {
1556 assert(have_background_thread);
1557 /*
1558 * Need to finish init & unlock first before creating background
1559 * threads (pthread_create depends on malloc). ctl_init (which
1560 * sets isthreaded) needs to be called without holding any lock.
1561 */
1562 background_thread_ctl_init(tsd_tsdn(tsd));
1563
1564 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1565 bool err = background_thread_create(tsd, 0);
1566 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1567 if (err) {
1568 return true;
1569 }
1570 }
1571 #undef UNLOCK_RETURN
1572 return false;
1573 }
1574
1575 /*
1576 * End initialization functions.
1577 */
1578 /******************************************************************************/
1579 /*
1580 * Begin allocation-path internal functions and data structures.
1581 */
1582
1583 /*
1584 * Settings determined by the documented behavior of the allocation functions.
1585 */
1586 typedef struct static_opts_s static_opts_t;
1587 struct static_opts_s {
1588 /* Whether or not allocation size may overflow. */
1589 bool may_overflow;
1590 /* Whether or not allocations of size 0 should be treated as size 1. */
1591 bool bump_empty_alloc;
1592 /*
1593 * Whether to assert that allocations are not of size 0 (after any
1594 * bumping).
1595 */
1596 bool assert_nonempty_alloc;
1597
1598 /*
1599 * Whether or not to modify the 'result' argument to malloc in case of
1600 * error.
1601 */
1602 bool null_out_result_on_error;
1603 /* Whether to set errno when we encounter an error condition. */
1604 bool set_errno_on_error;
1605
1606 /*
1607 * The minimum valid alignment for functions requesting aligned storage.
1608 */
1609 size_t min_alignment;
1610
1611 /* The error string to use if we oom. */
1612 const char *oom_string;
1613 /* The error string to use if the passed-in alignment is invalid. */
1614 const char *invalid_alignment_string;
1615
1616 /*
1617 * False if we're configured to skip some time-consuming operations.
1618 *
1619 * This isn't really a malloc "behavior", but it acts as a useful
1620 * summary of several other static (or at least, static after program
1621 * initialization) options.
1622 */
1623 bool slow;
1624 };
1625
1626 JEMALLOC_ALWAYS_INLINE void
static_opts_init(static_opts_t * static_opts)1627 static_opts_init(static_opts_t *static_opts) {
1628 static_opts->may_overflow = false;
1629 static_opts->bump_empty_alloc = false;
1630 static_opts->assert_nonempty_alloc = false;
1631 static_opts->null_out_result_on_error = false;
1632 static_opts->set_errno_on_error = false;
1633 static_opts->min_alignment = 0;
1634 static_opts->oom_string = "";
1635 static_opts->invalid_alignment_string = "";
1636 static_opts->slow = false;
1637 }
1638
1639 /*
1640 * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
1641 * should have one constant here per magic value there. Note however that the
1642 * representations need not be related.
1643 */
1644 #define TCACHE_IND_NONE ((unsigned)-1)
1645 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1646 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1647
1648 typedef struct dynamic_opts_s dynamic_opts_t;
1649 struct dynamic_opts_s {
1650 void **result;
1651 size_t num_items;
1652 size_t item_size;
1653 size_t alignment;
1654 bool zero;
1655 unsigned tcache_ind;
1656 unsigned arena_ind;
1657 };
1658
1659 JEMALLOC_ALWAYS_INLINE void
dynamic_opts_init(dynamic_opts_t * dynamic_opts)1660 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1661 dynamic_opts->result = NULL;
1662 dynamic_opts->num_items = 0;
1663 dynamic_opts->item_size = 0;
1664 dynamic_opts->alignment = 0;
1665 dynamic_opts->zero = false;
1666 dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1667 dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1668 }
1669
1670 /* ind is ignored if dopts->alignment > 0. */
1671 JEMALLOC_ALWAYS_INLINE void *
imalloc_no_sample(static_opts_t * sopts,dynamic_opts_t * dopts,tsd_t * tsd,size_t size,size_t usize,szind_t ind)1672 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1673 size_t size, size_t usize, szind_t ind) {
1674 tcache_t *tcache;
1675 arena_t *arena;
1676
1677 /* Fill in the tcache. */
1678 if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1679 if (likely(!sopts->slow)) {
1680 /* Getting tcache ptr unconditionally. */
1681 tcache = tsd_tcachep_get(tsd);
1682 assert(tcache == tcache_get(tsd));
1683 } else {
1684 tcache = tcache_get(tsd);
1685 }
1686 } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1687 tcache = NULL;
1688 } else {
1689 tcache = tcaches_get(tsd, dopts->tcache_ind);
1690 }
1691
1692 /* Fill in the arena. */
1693 if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1694 /*
1695 * In case of automatic arena management, we defer arena
1696 * computation until as late as we can, hoping to fill the
1697 * allocation out of the tcache.
1698 */
1699 arena = NULL;
1700 } else {
1701 arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1702 }
1703
1704 if (unlikely(dopts->alignment != 0)) {
1705 return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1706 dopts->zero, tcache, arena);
1707 }
1708
1709 return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1710 arena, sopts->slow);
1711 }
1712
1713 JEMALLOC_ALWAYS_INLINE void *
imalloc_sample(static_opts_t * sopts,dynamic_opts_t * dopts,tsd_t * tsd,size_t usize,szind_t ind)1714 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1715 size_t usize, szind_t ind) {
1716 void *ret;
1717
1718 /*
1719 * For small allocations, sampling bumps the usize. If so, we allocate
1720 * from the ind_large bucket.
1721 */
1722 szind_t ind_large;
1723 size_t bumped_usize = usize;
1724
1725 if (usize <= SMALL_MAXCLASS) {
1726 assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1727 sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1728 == LARGE_MINCLASS);
1729 ind_large = sz_size2index(LARGE_MINCLASS);
1730 bumped_usize = sz_s2u(LARGE_MINCLASS);
1731 ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1732 bumped_usize, ind_large);
1733 if (unlikely(ret == NULL)) {
1734 return NULL;
1735 }
1736 arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1737 } else {
1738 ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1739 }
1740
1741 return ret;
1742 }
1743
1744 /*
1745 * Returns true if the allocation will overflow, and false otherwise. Sets
1746 * *size to the product either way.
1747 */
1748 JEMALLOC_ALWAYS_INLINE bool
compute_size_with_overflow(bool may_overflow,dynamic_opts_t * dopts,size_t * size)1749 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1750 size_t *size) {
1751 /*
1752 * This function is just num_items * item_size, except that we may have
1753 * to check for overflow.
1754 */
1755
1756 if (!may_overflow) {
1757 assert(dopts->num_items == 1);
1758 *size = dopts->item_size;
1759 return false;
1760 }
1761
1762 /* A size_t with its high-half bits all set to 1. */
1763 static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1764
1765 *size = dopts->item_size * dopts->num_items;
1766
1767 if (unlikely(*size == 0)) {
1768 return (dopts->num_items != 0 && dopts->item_size != 0);
1769 }
1770
1771 /*
1772 * We got a non-zero size, but we don't know if we overflowed to get
1773 * there. To avoid having to do a divide, we'll be clever and note that
1774 * if both A and B can be represented in N/2 bits, then their product
1775 * can be represented in N bits (without the possibility of overflow).
1776 */
1777 if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1778 return false;
1779 }
1780 if (likely(*size / dopts->item_size == dopts->num_items)) {
1781 return false;
1782 }
1783 return true;
1784 }
1785
1786 JEMALLOC_ALWAYS_INLINE int
imalloc_body(static_opts_t * sopts,dynamic_opts_t * dopts,tsd_t * tsd)1787 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1788 /* Where the actual allocated memory will live. */
1789 void *allocation = NULL;
1790 /* Filled in by compute_size_with_overflow below. */
1791 size_t size = 0;
1792 /*
1793 * For unaligned allocations, we need only ind. For aligned
1794 * allocations, or in case of stats or profiling we need usize.
1795 *
1796 * These are actually dead stores, in that their values are reset before
1797 * any branch on their value is taken. Sometimes though, it's
1798 * convenient to pass them as arguments before this point. To avoid
1799 * undefined behavior then, we initialize them with dummy stores.
1800 */
1801 szind_t ind = 0;
1802 size_t usize = 0;
1803
1804 /* Reentrancy is only checked on slow path. */
1805 int8_t reentrancy_level;
1806
1807 /* Compute the amount of memory the user wants. */
1808 if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1809 &size))) {
1810 goto label_oom;
1811 }
1812
1813 /* Validate the user input. */
1814 if (sopts->bump_empty_alloc) {
1815 if (unlikely(size == 0)) {
1816 size = 1;
1817 }
1818 }
1819
1820 if (sopts->assert_nonempty_alloc) {
1821 assert (size != 0);
1822 }
1823
1824 if (unlikely(dopts->alignment < sopts->min_alignment
1825 || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1826 goto label_invalid_alignment;
1827 }
1828
1829 /* This is the beginning of the "core" algorithm. */
1830
1831 if (dopts->alignment == 0) {
1832 ind = sz_size2index(size);
1833 if (unlikely(ind >= NSIZES)) {
1834 goto label_oom;
1835 }
1836 if (config_stats || (config_prof && opt_prof)) {
1837 usize = sz_index2size(ind);
1838 assert(usize > 0 && usize <= LARGE_MAXCLASS);
1839 }
1840 } else {
1841 usize = sz_sa2u(size, dopts->alignment);
1842 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1843 goto label_oom;
1844 }
1845 }
1846
1847 check_entry_exit_locking(tsd_tsdn(tsd));
1848
1849 /*
1850 * If we need to handle reentrancy, we can do it out of a
1851 * known-initialized arena (i.e. arena 0).
1852 */
1853 reentrancy_level = tsd_reentrancy_level_get(tsd);
1854 if (sopts->slow && unlikely(reentrancy_level > 0)) {
1855 /*
1856 * We should never specify particular arenas or tcaches from
1857 * within our internal allocations.
1858 */
1859 assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1860 dopts->tcache_ind == TCACHE_IND_NONE);
1861 assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1862 dopts->tcache_ind = TCACHE_IND_NONE;
1863 /* We know that arena 0 has already been initialized. */
1864 dopts->arena_ind = 0;
1865 }
1866
1867 /* If profiling is on, get our profiling context. */
1868 if (config_prof && opt_prof) {
1869 /*
1870 * Note that if we're going down this path, usize must have been
1871 * initialized in the previous if statement.
1872 */
1873 prof_tctx_t *tctx = prof_alloc_prep(
1874 tsd, usize, prof_active_get_unlocked(), true);
1875
1876 alloc_ctx_t alloc_ctx;
1877 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1878 alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1879 allocation = imalloc_no_sample(
1880 sopts, dopts, tsd, usize, usize, ind);
1881 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
1882 /*
1883 * Note that ind might still be 0 here. This is fine;
1884 * imalloc_sample ignores ind if dopts->alignment > 0.
1885 */
1886 allocation = imalloc_sample(
1887 sopts, dopts, tsd, usize, ind);
1888 alloc_ctx.slab = false;
1889 } else {
1890 allocation = NULL;
1891 }
1892
1893 if (unlikely(allocation == NULL)) {
1894 prof_alloc_rollback(tsd, tctx, true);
1895 goto label_oom;
1896 }
1897 prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1898 } else {
1899 /*
1900 * If dopts->alignment > 0, then ind is still 0, but usize was
1901 * computed in the previous if statement. Down the positive
1902 * alignment path, imalloc_no_sample ignores ind and size
1903 * (relying only on usize).
1904 */
1905 allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1906 ind);
1907 if (unlikely(allocation == NULL)) {
1908 goto label_oom;
1909 }
1910 }
1911
1912 /*
1913 * Allocation has been done at this point. We still have some
1914 * post-allocation work to do though.
1915 */
1916 assert(dopts->alignment == 0
1917 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1918
1919 if (config_stats) {
1920 assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1921 *tsd_thread_allocatedp_get(tsd) += usize;
1922 }
1923
1924 if (sopts->slow) {
1925 UTRACE(0, size, allocation);
1926 }
1927
1928 /* Success! */
1929 check_entry_exit_locking(tsd_tsdn(tsd));
1930 *dopts->result = allocation;
1931 return 0;
1932
1933 label_oom:
1934 if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1935 malloc_write(sopts->oom_string);
1936 abort();
1937 }
1938
1939 if (sopts->slow) {
1940 UTRACE(NULL, size, NULL);
1941 }
1942
1943 check_entry_exit_locking(tsd_tsdn(tsd));
1944
1945 if (sopts->set_errno_on_error) {
1946 set_errno(ENOMEM);
1947 }
1948
1949 if (sopts->null_out_result_on_error) {
1950 *dopts->result = NULL;
1951 }
1952
1953 return ENOMEM;
1954
1955 /*
1956 * This label is only jumped to by one goto; we move it out of line
1957 * anyways to avoid obscuring the non-error paths, and for symmetry with
1958 * the oom case.
1959 */
1960 label_invalid_alignment:
1961 if (config_xmalloc && unlikely(opt_xmalloc)) {
1962 malloc_write(sopts->invalid_alignment_string);
1963 abort();
1964 }
1965
1966 if (sopts->set_errno_on_error) {
1967 set_errno(EINVAL);
1968 }
1969
1970 if (sopts->slow) {
1971 UTRACE(NULL, size, NULL);
1972 }
1973
1974 check_entry_exit_locking(tsd_tsdn(tsd));
1975
1976 if (sopts->null_out_result_on_error) {
1977 *dopts->result = NULL;
1978 }
1979
1980 return EINVAL;
1981 }
1982
1983 /* Returns the errno-style error code of the allocation. */
1984 JEMALLOC_ALWAYS_INLINE int
imalloc(static_opts_t * sopts,dynamic_opts_t * dopts)1985 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
1986 if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
1987 if (config_xmalloc && unlikely(opt_xmalloc)) {
1988 malloc_write(sopts->oom_string);
1989 abort();
1990 }
1991 UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
1992 set_errno(ENOMEM);
1993 *dopts->result = NULL;
1994
1995 return ENOMEM;
1996 }
1997
1998 /* We always need the tsd. Let's grab it right away. */
1999 tsd_t *tsd = tsd_fetch();
2000 assert(tsd);
2001 if (likely(tsd_fast(tsd))) {
2002 /* Fast and common path. */
2003 tsd_assert_fast(tsd);
2004 sopts->slow = false;
2005 return imalloc_body(sopts, dopts, tsd);
2006 } else {
2007 sopts->slow = true;
2008 return imalloc_body(sopts, dopts, tsd);
2009 }
2010 }
2011 /******************************************************************************/
2012 /*
2013 * Begin malloc(3)-compatible functions.
2014 */
2015
2016 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2017 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2018 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2019 je_malloc(size_t size) {
2020 void *ret;
2021 static_opts_t sopts;
2022 dynamic_opts_t dopts;
2023
2024 LOG("core.malloc.entry", "size: %zu", size);
2025
2026 static_opts_init(&sopts);
2027 dynamic_opts_init(&dopts);
2028
2029 sopts.bump_empty_alloc = true;
2030 sopts.null_out_result_on_error = true;
2031 sopts.set_errno_on_error = true;
2032 sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2033
2034 dopts.result = &ret;
2035 dopts.num_items = 1;
2036 dopts.item_size = size;
2037
2038 imalloc(&sopts, &dopts);
2039
2040 LOG("core.malloc.exit", "result: %p", ret);
2041
2042 return ret;
2043 }
2044
2045 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2046 JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void ** memptr,size_t alignment,size_t size)2047 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2048 int ret;
2049 static_opts_t sopts;
2050 dynamic_opts_t dopts;
2051
2052 LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2053 "size: %zu", memptr, alignment, size);
2054
2055 static_opts_init(&sopts);
2056 dynamic_opts_init(&dopts);
2057
2058 sopts.bump_empty_alloc = true;
2059 sopts.min_alignment = sizeof(void *);
2060 sopts.oom_string =
2061 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2062 sopts.invalid_alignment_string =
2063 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2064
2065 dopts.result = memptr;
2066 dopts.num_items = 1;
2067 dopts.item_size = size;
2068 dopts.alignment = alignment;
2069
2070 ret = imalloc(&sopts, &dopts);
2071
2072 LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2073 *memptr);
2074
2075 return ret;
2076 }
2077
2078 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2079 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2080 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2081 je_aligned_alloc(size_t alignment, size_t size) {
2082 void *ret;
2083
2084 static_opts_t sopts;
2085 dynamic_opts_t dopts;
2086
2087 LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2088 alignment, size);
2089
2090 static_opts_init(&sopts);
2091 dynamic_opts_init(&dopts);
2092
2093 sopts.bump_empty_alloc = true;
2094 sopts.null_out_result_on_error = true;
2095 sopts.set_errno_on_error = true;
2096 sopts.min_alignment = 1;
2097 sopts.oom_string =
2098 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2099 sopts.invalid_alignment_string =
2100 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2101
2102 dopts.result = &ret;
2103 dopts.num_items = 1;
2104 dopts.item_size = size;
2105 dopts.alignment = alignment;
2106
2107 imalloc(&sopts, &dopts);
2108
2109 LOG("core.aligned_alloc.exit", "result: %p", ret);
2110
2111 return ret;
2112 }
2113
2114 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2115 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2116 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2117 je_calloc(size_t num, size_t size) {
2118 void *ret;
2119 static_opts_t sopts;
2120 dynamic_opts_t dopts;
2121
2122 LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2123
2124 static_opts_init(&sopts);
2125 dynamic_opts_init(&dopts);
2126
2127 sopts.may_overflow = true;
2128 sopts.bump_empty_alloc = true;
2129 sopts.null_out_result_on_error = true;
2130 sopts.set_errno_on_error = true;
2131 sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2132
2133 dopts.result = &ret;
2134 dopts.num_items = num;
2135 dopts.item_size = size;
2136 dopts.zero = true;
2137
2138 imalloc(&sopts, &dopts);
2139
2140 LOG("core.calloc.exit", "result: %p", ret);
2141
2142 return ret;
2143 }
2144
2145 static void *
irealloc_prof_sample(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t usize,prof_tctx_t * tctx)2146 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2147 prof_tctx_t *tctx) {
2148 void *p;
2149
2150 if (tctx == NULL) {
2151 return NULL;
2152 }
2153 if (usize <= SMALL_MAXCLASS) {
2154 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2155 if (p == NULL) {
2156 return NULL;
2157 }
2158 arena_prof_promote(tsd_tsdn(tsd), p, usize);
2159 } else {
2160 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2161 }
2162
2163 return p;
2164 }
2165
2166 JEMALLOC_ALWAYS_INLINE void *
irealloc_prof(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t usize,alloc_ctx_t * alloc_ctx)2167 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2168 alloc_ctx_t *alloc_ctx) {
2169 void *p;
2170 bool prof_active;
2171 prof_tctx_t *old_tctx, *tctx;
2172
2173 prof_active = prof_active_get_unlocked();
2174 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2175 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
2176 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2177 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2178 } else {
2179 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2180 }
2181 if (unlikely(p == NULL)) {
2182 prof_alloc_rollback(tsd, tctx, true);
2183 return NULL;
2184 }
2185 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
2186 old_tctx);
2187
2188 return p;
2189 }
2190
2191 JEMALLOC_ALWAYS_INLINE void
ifree(tsd_t * tsd,void * ptr,tcache_t * tcache,bool slow_path)2192 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2193 if (!slow_path) {
2194 tsd_assert_fast(tsd);
2195 }
2196 check_entry_exit_locking(tsd_tsdn(tsd));
2197 if (tsd_reentrancy_level_get(tsd) != 0) {
2198 assert(slow_path);
2199 }
2200
2201 assert(ptr != NULL);
2202 assert(malloc_initialized() || IS_INITIALIZER);
2203
2204 alloc_ctx_t alloc_ctx;
2205 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2206 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2207 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2208 assert(alloc_ctx.szind != NSIZES);
2209
2210 size_t usize;
2211 if (config_prof && opt_prof) {
2212 usize = sz_index2size(alloc_ctx.szind);
2213 prof_free(tsd, ptr, usize, &alloc_ctx);
2214 } else if (config_stats) {
2215 usize = sz_index2size(alloc_ctx.szind);
2216 }
2217 if (config_stats) {
2218 *tsd_thread_deallocatedp_get(tsd) += usize;
2219 }
2220
2221 if (likely(!slow_path)) {
2222 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2223 false);
2224 } else {
2225 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2226 true);
2227 }
2228 }
2229
2230 JEMALLOC_ALWAYS_INLINE void
isfree(tsd_t * tsd,void * ptr,size_t usize,tcache_t * tcache,bool slow_path)2231 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2232 if (!slow_path) {
2233 tsd_assert_fast(tsd);
2234 }
2235 check_entry_exit_locking(tsd_tsdn(tsd));
2236 if (tsd_reentrancy_level_get(tsd) != 0) {
2237 assert(slow_path);
2238 }
2239
2240 assert(ptr != NULL);
2241 assert(malloc_initialized() || IS_INITIALIZER);
2242
2243 alloc_ctx_t alloc_ctx, *ctx;
2244 if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
2245 /*
2246 * When cache_oblivious is disabled and ptr is not page aligned,
2247 * the allocation was not sampled -- usize can be used to
2248 * determine szind directly.
2249 */
2250 alloc_ctx.szind = sz_size2index(usize);
2251 alloc_ctx.slab = true;
2252 ctx = &alloc_ctx;
2253 if (config_debug) {
2254 alloc_ctx_t dbg_ctx;
2255 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2256 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
2257 rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
2258 &dbg_ctx.slab);
2259 assert(dbg_ctx.szind == alloc_ctx.szind);
2260 assert(dbg_ctx.slab == alloc_ctx.slab);
2261 }
2262 } else if (config_prof && opt_prof) {
2263 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2264 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2265 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2266 assert(alloc_ctx.szind == sz_size2index(usize));
2267 ctx = &alloc_ctx;
2268 } else {
2269 ctx = NULL;
2270 }
2271
2272 if (config_prof && opt_prof) {
2273 prof_free(tsd, ptr, usize, ctx);
2274 }
2275 if (config_stats) {
2276 *tsd_thread_deallocatedp_get(tsd) += usize;
2277 }
2278
2279 if (likely(!slow_path)) {
2280 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2281 } else {
2282 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2283 }
2284 }
2285
2286 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2287 void JEMALLOC_NOTHROW *
2288 JEMALLOC_ALLOC_SIZE(2)
je_realloc(void * ptr,size_t size)2289 je_realloc(void *ptr, size_t size) {
2290 void *ret;
2291 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2292 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2293 size_t old_usize = 0;
2294
2295 LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
2296
2297 if (unlikely(size == 0)) {
2298 if (ptr != NULL) {
2299 /* realloc(ptr, 0) is equivalent to free(ptr). */
2300 UTRACE(ptr, 0, 0);
2301 tcache_t *tcache;
2302 tsd_t *tsd = tsd_fetch();
2303 if (tsd_reentrancy_level_get(tsd) == 0) {
2304 tcache = tcache_get(tsd);
2305 } else {
2306 tcache = NULL;
2307 }
2308 ifree(tsd, ptr, tcache, true);
2309
2310 LOG("core.realloc.exit", "result: %p", NULL);
2311 return NULL;
2312 }
2313 size = 1;
2314 }
2315
2316 if (likely(ptr != NULL)) {
2317 assert(malloc_initialized() || IS_INITIALIZER);
2318 tsd_t *tsd = tsd_fetch();
2319
2320 check_entry_exit_locking(tsd_tsdn(tsd));
2321
2322 alloc_ctx_t alloc_ctx;
2323 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2324 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2325 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2326 assert(alloc_ctx.szind != NSIZES);
2327 old_usize = sz_index2size(alloc_ctx.szind);
2328 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2329 if (config_prof && opt_prof) {
2330 usize = sz_s2u(size);
2331 ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2332 NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2333 &alloc_ctx);
2334 } else {
2335 if (config_stats) {
2336 usize = sz_s2u(size);
2337 }
2338 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2339 }
2340 tsdn = tsd_tsdn(tsd);
2341 } else {
2342 /* realloc(NULL, size) is equivalent to malloc(size). */
2343 void *ret = je_malloc(size);
2344 LOG("core.realloc.exit", "result: %p", ret);
2345 return ret;
2346 }
2347
2348 if (unlikely(ret == NULL)) {
2349 if (config_xmalloc && unlikely(opt_xmalloc)) {
2350 malloc_write("<jemalloc>: Error in realloc(): "
2351 "out of memory\n");
2352 abort();
2353 }
2354 set_errno(ENOMEM);
2355 }
2356 if (config_stats && likely(ret != NULL)) {
2357 tsd_t *tsd;
2358
2359 assert(usize == isalloc(tsdn, ret));
2360 tsd = tsdn_tsd(tsdn);
2361 *tsd_thread_allocatedp_get(tsd) += usize;
2362 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2363 }
2364 UTRACE(ptr, size, ret);
2365 check_entry_exit_locking(tsdn);
2366
2367 LOG("core.realloc.exit", "result: %p", ret);
2368 return ret;
2369 }
2370
2371 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void * ptr)2372 je_free(void *ptr) {
2373 LOG("core.free.entry", "ptr: %p", ptr);
2374
2375 UTRACE(ptr, 0, 0);
2376 if (likely(ptr != NULL)) {
2377 /*
2378 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2379 * based on only free() calls -- other activities trigger the
2380 * minimal to full transition. This is because free() may
2381 * happen during thread shutdown after tls deallocation: if a
2382 * thread never had any malloc activities until then, a
2383 * fully-setup tsd won't be destructed properly.
2384 */
2385 tsd_t *tsd = tsd_fetch_min();
2386 check_entry_exit_locking(tsd_tsdn(tsd));
2387
2388 tcache_t *tcache;
2389 if (likely(tsd_fast(tsd))) {
2390 tsd_assert_fast(tsd);
2391 /* Unconditionally get tcache ptr on fast path. */
2392 tcache = tsd_tcachep_get(tsd);
2393 ifree(tsd, ptr, tcache, false);
2394 } else {
2395 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2396 tcache = tcache_get(tsd);
2397 } else {
2398 tcache = NULL;
2399 }
2400 ifree(tsd, ptr, tcache, true);
2401 }
2402 check_entry_exit_locking(tsd_tsdn(tsd));
2403 }
2404 LOG("core.free.exit", "");
2405 }
2406
2407 /*
2408 * End malloc(3)-compatible functions.
2409 */
2410 /******************************************************************************/
2411 /*
2412 * Begin non-standard override functions.
2413 */
2414
2415 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2416 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2417 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2418 JEMALLOC_ATTR(malloc)
2419 je_memalign(size_t alignment, size_t size) {
2420 void *ret;
2421 static_opts_t sopts;
2422 dynamic_opts_t dopts;
2423
2424 LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
2425 size);
2426
2427 static_opts_init(&sopts);
2428 dynamic_opts_init(&dopts);
2429
2430 sopts.bump_empty_alloc = true;
2431 sopts.min_alignment = 1;
2432 sopts.oom_string =
2433 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2434 sopts.invalid_alignment_string =
2435 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2436 sopts.null_out_result_on_error = true;
2437
2438 dopts.result = &ret;
2439 dopts.num_items = 1;
2440 dopts.item_size = size;
2441 dopts.alignment = alignment;
2442
2443 imalloc(&sopts, &dopts);
2444
2445 LOG("core.memalign.exit", "result: %p", ret);
2446 return ret;
2447 }
2448 #endif
2449
2450 #ifdef JEMALLOC_OVERRIDE_VALLOC
2451 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2452 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2453 JEMALLOC_ATTR(malloc)
2454 je_valloc(size_t size) {
2455 void *ret;
2456
2457 static_opts_t sopts;
2458 dynamic_opts_t dopts;
2459
2460 LOG("core.valloc.entry", "size: %zu\n", size);
2461
2462 static_opts_init(&sopts);
2463 dynamic_opts_init(&dopts);
2464
2465 sopts.bump_empty_alloc = true;
2466 sopts.null_out_result_on_error = true;
2467 sopts.min_alignment = PAGE;
2468 sopts.oom_string =
2469 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2470 sopts.invalid_alignment_string =
2471 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2472
2473 dopts.result = &ret;
2474 dopts.num_items = 1;
2475 dopts.item_size = size;
2476 dopts.alignment = PAGE;
2477
2478 imalloc(&sopts, &dopts);
2479
2480 LOG("core.valloc.exit", "result: %p\n", ret);
2481 return ret;
2482 }
2483 #endif
2484
2485 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2486 /*
2487 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2488 * to inconsistently reference libc's malloc(3)-compatible functions
2489 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2490 *
2491 * These definitions interpose hooks in glibc. The functions are actually
2492 * passed an extra argument for the caller return address, which will be
2493 * ignored.
2494 */
2495 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2496 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2497 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2498 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2499 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2500 je_memalign;
2501 # endif
2502
2503 # ifdef CPU_COUNT
2504 /*
2505 * To enable static linking with glibc, the libc specific malloc interface must
2506 * be implemented also, so none of glibc's malloc.o functions are added to the
2507 * link.
2508 */
2509 # define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
2510 /* To force macro expansion of je_ prefix before stringification. */
2511 # define PREALIAS(je_fn) ALIAS(je_fn)
2512 # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2513 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2514 # endif
2515 # ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2516 void __libc_free(void* ptr) PREALIAS(je_free);
2517 # endif
2518 # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2519 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2520 # endif
2521 # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2522 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2523 # endif
2524 # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2525 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2526 # endif
2527 # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2528 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2529 # endif
2530 # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2531 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2532 # endif
2533 # undef PREALIAS
2534 # undef ALIAS
2535 # endif
2536 #endif
2537
2538 /*
2539 * End non-standard override functions.
2540 */
2541 /******************************************************************************/
2542 /*
2543 * Begin non-standard functions.
2544 */
2545
2546 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2547 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2548 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2549 je_mallocx(size_t size, int flags) {
2550 void *ret;
2551 static_opts_t sopts;
2552 dynamic_opts_t dopts;
2553
2554 LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
2555
2556 static_opts_init(&sopts);
2557 dynamic_opts_init(&dopts);
2558
2559 sopts.assert_nonempty_alloc = true;
2560 sopts.null_out_result_on_error = true;
2561 sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2562
2563 dopts.result = &ret;
2564 dopts.num_items = 1;
2565 dopts.item_size = size;
2566 if (unlikely(flags != 0)) {
2567 if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2568 dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2569 }
2570
2571 dopts.zero = MALLOCX_ZERO_GET(flags);
2572
2573 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2574 if ((flags & MALLOCX_TCACHE_MASK)
2575 == MALLOCX_TCACHE_NONE) {
2576 dopts.tcache_ind = TCACHE_IND_NONE;
2577 } else {
2578 dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2579 }
2580 } else {
2581 dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2582 }
2583
2584 if ((flags & MALLOCX_ARENA_MASK) != 0)
2585 dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2586 }
2587
2588 imalloc(&sopts, &dopts);
2589
2590 LOG("core.mallocx.exit", "result: %p", ret);
2591 return ret;
2592 }
2593
2594 static void *
irallocx_prof_sample(tsdn_t * tsdn,void * old_ptr,size_t old_usize,size_t usize,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena,prof_tctx_t * tctx)2595 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2596 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2597 prof_tctx_t *tctx) {
2598 void *p;
2599
2600 if (tctx == NULL) {
2601 return NULL;
2602 }
2603 if (usize <= SMALL_MAXCLASS) {
2604 p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2605 alignment, zero, tcache, arena);
2606 if (p == NULL) {
2607 return NULL;
2608 }
2609 arena_prof_promote(tsdn, p, usize);
2610 } else {
2611 p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2612 tcache, arena);
2613 }
2614
2615 return p;
2616 }
2617
2618 JEMALLOC_ALWAYS_INLINE void *
irallocx_prof(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t size,size_t alignment,size_t * usize,bool zero,tcache_t * tcache,arena_t * arena,alloc_ctx_t * alloc_ctx)2619 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2620 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2621 arena_t *arena, alloc_ctx_t *alloc_ctx) {
2622 void *p;
2623 bool prof_active;
2624 prof_tctx_t *old_tctx, *tctx;
2625
2626 prof_active = prof_active_get_unlocked();
2627 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2628 tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2629 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2630 p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2631 *usize, alignment, zero, tcache, arena, tctx);
2632 } else {
2633 p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2634 zero, tcache, arena);
2635 }
2636 if (unlikely(p == NULL)) {
2637 prof_alloc_rollback(tsd, tctx, false);
2638 return NULL;
2639 }
2640
2641 if (p == old_ptr && alignment != 0) {
2642 /*
2643 * The allocation did not move, so it is possible that the size
2644 * class is smaller than would guarantee the requested
2645 * alignment, and that the alignment constraint was
2646 * serendipitously satisfied. Additionally, old_usize may not
2647 * be the same as the current usize because of in-place large
2648 * reallocation. Therefore, query the actual value of usize.
2649 */
2650 *usize = isalloc(tsd_tsdn(tsd), p);
2651 }
2652 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2653 old_usize, old_tctx);
2654
2655 return p;
2656 }
2657
2658 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2659 void JEMALLOC_NOTHROW *
2660 JEMALLOC_ALLOC_SIZE(2)
je_rallocx(void * ptr,size_t size,int flags)2661 je_rallocx(void *ptr, size_t size, int flags) {
2662 void *p;
2663 tsd_t *tsd;
2664 size_t usize;
2665 size_t old_usize;
2666 size_t alignment = MALLOCX_ALIGN_GET(flags);
2667 bool zero = flags & MALLOCX_ZERO;
2668 arena_t *arena;
2669 tcache_t *tcache;
2670
2671 LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2672 size, flags);
2673
2674
2675 assert(ptr != NULL);
2676 assert(size != 0);
2677 assert(malloc_initialized() || IS_INITIALIZER);
2678 tsd = tsd_fetch();
2679 check_entry_exit_locking(tsd_tsdn(tsd));
2680
2681 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2682 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2683 arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2684 if (unlikely(arena == NULL)) {
2685 goto label_oom;
2686 }
2687 } else {
2688 arena = NULL;
2689 }
2690
2691 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2692 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2693 tcache = NULL;
2694 } else {
2695 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2696 }
2697 } else {
2698 tcache = tcache_get(tsd);
2699 }
2700
2701 alloc_ctx_t alloc_ctx;
2702 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2703 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2704 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2705 assert(alloc_ctx.szind != NSIZES);
2706 old_usize = sz_index2size(alloc_ctx.szind);
2707 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2708 if (config_prof && opt_prof) {
2709 usize = (alignment == 0) ?
2710 sz_s2u(size) : sz_sa2u(size, alignment);
2711 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2712 goto label_oom;
2713 }
2714 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2715 zero, tcache, arena, &alloc_ctx);
2716 if (unlikely(p == NULL)) {
2717 goto label_oom;
2718 }
2719 } else {
2720 p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2721 zero, tcache, arena);
2722 if (unlikely(p == NULL)) {
2723 goto label_oom;
2724 }
2725 if (config_stats) {
2726 usize = isalloc(tsd_tsdn(tsd), p);
2727 }
2728 }
2729 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2730
2731 if (config_stats) {
2732 *tsd_thread_allocatedp_get(tsd) += usize;
2733 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2734 }
2735 UTRACE(ptr, size, p);
2736 check_entry_exit_locking(tsd_tsdn(tsd));
2737
2738 LOG("core.rallocx.exit", "result: %p", p);
2739 return p;
2740 label_oom:
2741 if (config_xmalloc && unlikely(opt_xmalloc)) {
2742 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2743 abort();
2744 }
2745 UTRACE(ptr, size, 0);
2746 check_entry_exit_locking(tsd_tsdn(tsd));
2747
2748 LOG("core.rallocx.exit", "result: %p", NULL);
2749 return NULL;
2750 }
2751
2752 JEMALLOC_ALWAYS_INLINE size_t
ixallocx_helper(tsdn_t * tsdn,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero)2753 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2754 size_t extra, size_t alignment, bool zero) {
2755 size_t usize;
2756
2757 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2758 return old_usize;
2759 }
2760 usize = isalloc(tsdn, ptr);
2761
2762 return usize;
2763 }
2764
2765 static size_t
ixallocx_prof_sample(tsdn_t * tsdn,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero,prof_tctx_t * tctx)2766 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2767 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2768 size_t usize;
2769
2770 if (tctx == NULL) {
2771 return old_usize;
2772 }
2773 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2774 zero);
2775
2776 return usize;
2777 }
2778
2779 JEMALLOC_ALWAYS_INLINE size_t
ixallocx_prof(tsd_t * tsd,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero,alloc_ctx_t * alloc_ctx)2780 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2781 size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2782 size_t usize_max, usize;
2783 bool prof_active;
2784 prof_tctx_t *old_tctx, *tctx;
2785
2786 prof_active = prof_active_get_unlocked();
2787 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2788 /*
2789 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2790 * Therefore, compute its maximum possible value and use that in
2791 * prof_alloc_prep() to decide whether to capture a backtrace.
2792 * prof_realloc() will use the actual usize to decide whether to sample.
2793 */
2794 if (alignment == 0) {
2795 usize_max = sz_s2u(size+extra);
2796 assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2797 } else {
2798 usize_max = sz_sa2u(size+extra, alignment);
2799 if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2800 /*
2801 * usize_max is out of range, and chances are that
2802 * allocation will fail, but use the maximum possible
2803 * value and carry on with prof_alloc_prep(), just in
2804 * case allocation succeeds.
2805 */
2806 usize_max = LARGE_MAXCLASS;
2807 }
2808 }
2809 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2810
2811 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2812 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2813 size, extra, alignment, zero, tctx);
2814 } else {
2815 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2816 extra, alignment, zero);
2817 }
2818 if (usize == old_usize) {
2819 prof_alloc_rollback(tsd, tctx, false);
2820 return usize;
2821 }
2822 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2823 old_tctx);
2824
2825 return usize;
2826 }
2827
2828 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_xallocx(void * ptr,size_t size,size_t extra,int flags)2829 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2830 tsd_t *tsd;
2831 size_t usize, old_usize;
2832 size_t alignment = MALLOCX_ALIGN_GET(flags);
2833 bool zero = flags & MALLOCX_ZERO;
2834
2835 LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
2836 "flags: %d", ptr, size, extra, flags);
2837
2838 assert(ptr != NULL);
2839 assert(size != 0);
2840 assert(SIZE_T_MAX - size >= extra);
2841 assert(malloc_initialized() || IS_INITIALIZER);
2842 tsd = tsd_fetch();
2843 check_entry_exit_locking(tsd_tsdn(tsd));
2844
2845 alloc_ctx_t alloc_ctx;
2846 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2847 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2848 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2849 assert(alloc_ctx.szind != NSIZES);
2850 old_usize = sz_index2size(alloc_ctx.szind);
2851 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2852 /*
2853 * The API explicitly absolves itself of protecting against (size +
2854 * extra) numerical overflow, but we may need to clamp extra to avoid
2855 * exceeding LARGE_MAXCLASS.
2856 *
2857 * Ordinarily, size limit checking is handled deeper down, but here we
2858 * have to check as part of (size + extra) clamping, since we need the
2859 * clamped value in the above helper functions.
2860 */
2861 if (unlikely(size > LARGE_MAXCLASS)) {
2862 usize = old_usize;
2863 goto label_not_resized;
2864 }
2865 if (unlikely(LARGE_MAXCLASS - size < extra)) {
2866 extra = LARGE_MAXCLASS - size;
2867 }
2868
2869 if (config_prof && opt_prof) {
2870 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2871 alignment, zero, &alloc_ctx);
2872 } else {
2873 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2874 extra, alignment, zero);
2875 }
2876 if (unlikely(usize == old_usize)) {
2877 goto label_not_resized;
2878 }
2879
2880 if (config_stats) {
2881 *tsd_thread_allocatedp_get(tsd) += usize;
2882 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2883 }
2884 label_not_resized:
2885 UTRACE(ptr, size, ptr);
2886 check_entry_exit_locking(tsd_tsdn(tsd));
2887
2888 LOG("core.xallocx.exit", "result: %zu", usize);
2889 return usize;
2890 }
2891
2892 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)2893 JEMALLOC_ATTR(pure)
2894 je_sallocx(const void *ptr, UNUSED int flags) {
2895 size_t usize;
2896 tsdn_t *tsdn;
2897
2898 LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2899
2900 assert(malloc_initialized() || IS_INITIALIZER);
2901 assert(ptr != NULL);
2902
2903 tsdn = tsdn_fetch();
2904 check_entry_exit_locking(tsdn);
2905
2906 if (config_debug || force_ivsalloc) {
2907 usize = ivsalloc(tsdn, ptr);
2908 assert(force_ivsalloc || usize != 0);
2909 } else {
2910 usize = isalloc(tsdn, ptr);
2911 }
2912
2913 check_entry_exit_locking(tsdn);
2914
2915 LOG("core.sallocx.exit", "result: %zu", usize);
2916 return usize;
2917 }
2918
2919 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_dallocx(void * ptr,int flags)2920 je_dallocx(void *ptr, int flags) {
2921 LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2922
2923 assert(ptr != NULL);
2924 assert(malloc_initialized() || IS_INITIALIZER);
2925
2926 tsd_t *tsd = tsd_fetch();
2927 bool fast = tsd_fast(tsd);
2928 check_entry_exit_locking(tsd_tsdn(tsd));
2929
2930 tcache_t *tcache;
2931 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2932 /* Not allowed to be reentrant and specify a custom tcache. */
2933 assert(tsd_reentrancy_level_get(tsd) == 0);
2934 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2935 tcache = NULL;
2936 } else {
2937 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2938 }
2939 } else {
2940 if (likely(fast)) {
2941 tcache = tsd_tcachep_get(tsd);
2942 assert(tcache == tcache_get(tsd));
2943 } else {
2944 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2945 tcache = tcache_get(tsd);
2946 } else {
2947 tcache = NULL;
2948 }
2949 }
2950 }
2951
2952 UTRACE(ptr, 0, 0);
2953 if (likely(fast)) {
2954 tsd_assert_fast(tsd);
2955 ifree(tsd, ptr, tcache, false);
2956 } else {
2957 ifree(tsd, ptr, tcache, true);
2958 }
2959 check_entry_exit_locking(tsd_tsdn(tsd));
2960
2961 LOG("core.dallocx.exit", "");
2962 }
2963
2964 JEMALLOC_ALWAYS_INLINE size_t
inallocx(tsdn_t * tsdn,size_t size,int flags)2965 inallocx(tsdn_t *tsdn, size_t size, int flags) {
2966 check_entry_exit_locking(tsdn);
2967
2968 size_t usize;
2969 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
2970 usize = sz_s2u(size);
2971 } else {
2972 usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2973 }
2974 check_entry_exit_locking(tsdn);
2975 return usize;
2976 }
2977
2978 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_sdallocx(void * ptr,size_t size,int flags)2979 je_sdallocx(void *ptr, size_t size, int flags) {
2980 assert(ptr != NULL);
2981 assert(malloc_initialized() || IS_INITIALIZER);
2982
2983 LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2984 size, flags);
2985
2986 tsd_t *tsd = tsd_fetch();
2987 bool fast = tsd_fast(tsd);
2988 size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
2989 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
2990 check_entry_exit_locking(tsd_tsdn(tsd));
2991
2992 tcache_t *tcache;
2993 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2994 /* Not allowed to be reentrant and specify a custom tcache. */
2995 assert(tsd_reentrancy_level_get(tsd) == 0);
2996 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2997 tcache = NULL;
2998 } else {
2999 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3000 }
3001 } else {
3002 if (likely(fast)) {
3003 tcache = tsd_tcachep_get(tsd);
3004 assert(tcache == tcache_get(tsd));
3005 } else {
3006 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3007 tcache = tcache_get(tsd);
3008 } else {
3009 tcache = NULL;
3010 }
3011 }
3012 }
3013
3014 UTRACE(ptr, 0, 0);
3015 if (likely(fast)) {
3016 tsd_assert_fast(tsd);
3017 isfree(tsd, ptr, usize, tcache, false);
3018 } else {
3019 isfree(tsd, ptr, usize, tcache, true);
3020 }
3021 check_entry_exit_locking(tsd_tsdn(tsd));
3022
3023 LOG("core.sdallocx.exit", "");
3024 }
3025
3026 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)3027 JEMALLOC_ATTR(pure)
3028 je_nallocx(size_t size, int flags) {
3029 size_t usize;
3030 tsdn_t *tsdn;
3031
3032 assert(size != 0);
3033
3034 if (unlikely(malloc_init())) {
3035 LOG("core.nallocx.exit", "result: %zu", ZU(0));
3036 return 0;
3037 }
3038
3039 tsdn = tsdn_fetch();
3040 check_entry_exit_locking(tsdn);
3041
3042 usize = inallocx(tsdn, size, flags);
3043 if (unlikely(usize > LARGE_MAXCLASS)) {
3044 LOG("core.nallocx.exit", "result: %zu", ZU(0));
3045 return 0;
3046 }
3047
3048 check_entry_exit_locking(tsdn);
3049 LOG("core.nallocx.exit", "result: %zu", usize);
3050 return usize;
3051 }
3052
3053 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3054 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3055 size_t newlen) {
3056 int ret;
3057 tsd_t *tsd;
3058
3059 LOG("core.mallctl.entry", "name: %s", name);
3060
3061 if (unlikely(malloc_init())) {
3062 LOG("core.mallctl.exit", "result: %d", EAGAIN);
3063 return EAGAIN;
3064 }
3065
3066 tsd = tsd_fetch();
3067 check_entry_exit_locking(tsd_tsdn(tsd));
3068 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3069 check_entry_exit_locking(tsd_tsdn(tsd));
3070
3071 LOG("core.mallctl.exit", "result: %d", ret);
3072 return ret;
3073 }
3074
3075 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char * name,size_t * mibp,size_t * miblenp)3076 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
3077 int ret;
3078
3079 LOG("core.mallctlnametomib.entry", "name: %s", name);
3080
3081 if (unlikely(malloc_init())) {
3082 LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3083 return EAGAIN;
3084 }
3085
3086 tsd_t *tsd = tsd_fetch();
3087 check_entry_exit_locking(tsd_tsdn(tsd));
3088 ret = ctl_nametomib(tsd, name, mibp, miblenp);
3089 check_entry_exit_locking(tsd_tsdn(tsd));
3090
3091 LOG("core.mallctlnametomib.exit", "result: %d", ret);
3092 return ret;
3093 }
3094
3095 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3096 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3097 void *newp, size_t newlen) {
3098 int ret;
3099 tsd_t *tsd;
3100
3101 LOG("core.mallctlbymib.entry", "");
3102
3103 if (unlikely(malloc_init())) {
3104 LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3105 return EAGAIN;
3106 }
3107
3108 tsd = tsd_fetch();
3109 check_entry_exit_locking(tsd_tsdn(tsd));
3110 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3111 check_entry_exit_locking(tsd_tsdn(tsd));
3112 LOG("core.mallctlbymib.exit", "result: %d", ret);
3113 return ret;
3114 }
3115
3116 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (* write_cb)(void *,const char *),void * cbopaque,const char * opts)3117 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3118 const char *opts) {
3119 tsdn_t *tsdn;
3120
3121 LOG("core.malloc_stats_print.entry", "");
3122
3123 tsdn = tsdn_fetch();
3124 check_entry_exit_locking(tsdn);
3125 stats_print(write_cb, cbopaque, opts);
3126 check_entry_exit_locking(tsdn);
3127 LOG("core.malloc_stats_print.exit", "");
3128 }
3129
3130 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void * ptr)3131 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3132 size_t ret;
3133 tsdn_t *tsdn;
3134
3135 LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
3136
3137 assert(malloc_initialized() || IS_INITIALIZER);
3138
3139 tsdn = tsdn_fetch();
3140 check_entry_exit_locking(tsdn);
3141
3142 if (unlikely(ptr == NULL)) {
3143 ret = 0;
3144 } else {
3145 if (config_debug || force_ivsalloc) {
3146 ret = ivsalloc(tsdn, ptr);
3147 assert(force_ivsalloc || ret != 0);
3148 } else {
3149 ret = isalloc(tsdn, ptr);
3150 }
3151 }
3152
3153 check_entry_exit_locking(tsdn);
3154 LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3155 return ret;
3156 }
3157
3158 /*
3159 * End non-standard functions.
3160 */
3161 /******************************************************************************/
3162 /*
3163 * The following functions are used by threading libraries for protection of
3164 * malloc during fork().
3165 */
3166
3167 /*
3168 * If an application creates a thread before doing any allocation in the main
3169 * thread, then calls fork(2) in the main thread followed by memory allocation
3170 * in the child process, a race can occur that results in deadlock within the
3171 * child: the main thread may have forked while the created thread had
3172 * partially initialized the allocator. Ordinarily jemalloc prevents
3173 * fork/malloc races via the following functions it registers during
3174 * initialization using pthread_atfork(), but of course that does no good if
3175 * the allocator isn't fully initialized at fork time. The following library
3176 * constructor is a partial solution to this problem. It may still be possible
3177 * to trigger the deadlock described above, but doing so would involve forking
3178 * via a library constructor that runs before jemalloc's runs.
3179 */
3180 #ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)3181 JEMALLOC_ATTR(constructor)
3182 static void
3183 jemalloc_constructor(void) {
3184 malloc_init();
3185 }
3186 #endif
3187
3188 #ifndef JEMALLOC_MUTEX_INIT_CB
3189 void
jemalloc_prefork(void)3190 jemalloc_prefork(void)
3191 #else
3192 JEMALLOC_EXPORT void
3193 _malloc_prefork(void)
3194 #endif
3195 {
3196 tsd_t *tsd;
3197 unsigned i, j, narenas;
3198 arena_t *arena;
3199
3200 #ifdef JEMALLOC_MUTEX_INIT_CB
3201 if (!malloc_initialized()) {
3202 return;
3203 }
3204 #endif
3205 assert(malloc_initialized());
3206
3207 tsd = tsd_fetch();
3208
3209 narenas = narenas_total_get();
3210
3211 witness_prefork(tsd_witness_tsdp_get(tsd));
3212 /* Acquire all mutexes in a safe order. */
3213 ctl_prefork(tsd_tsdn(tsd));
3214 tcache_prefork(tsd_tsdn(tsd));
3215 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3216 if (have_background_thread) {
3217 background_thread_prefork0(tsd_tsdn(tsd));
3218 }
3219 prof_prefork0(tsd_tsdn(tsd));
3220 if (have_background_thread) {
3221 background_thread_prefork1(tsd_tsdn(tsd));
3222 }
3223 /* Break arena prefork into stages to preserve lock order. */
3224 for (i = 0; i < 8; i++) {
3225 for (j = 0; j < narenas; j++) {
3226 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3227 NULL) {
3228 switch (i) {
3229 case 0:
3230 arena_prefork0(tsd_tsdn(tsd), arena);
3231 break;
3232 case 1:
3233 arena_prefork1(tsd_tsdn(tsd), arena);
3234 break;
3235 case 2:
3236 arena_prefork2(tsd_tsdn(tsd), arena);
3237 break;
3238 case 3:
3239 arena_prefork3(tsd_tsdn(tsd), arena);
3240 break;
3241 case 4:
3242 arena_prefork4(tsd_tsdn(tsd), arena);
3243 break;
3244 case 5:
3245 arena_prefork5(tsd_tsdn(tsd), arena);
3246 break;
3247 case 6:
3248 arena_prefork6(tsd_tsdn(tsd), arena);
3249 break;
3250 case 7:
3251 arena_prefork7(tsd_tsdn(tsd), arena);
3252 break;
3253 default: not_reached();
3254 }
3255 }
3256 }
3257 }
3258 prof_prefork1(tsd_tsdn(tsd));
3259 }
3260
3261 #ifndef JEMALLOC_MUTEX_INIT_CB
3262 void
jemalloc_postfork_parent(void)3263 jemalloc_postfork_parent(void)
3264 #else
3265 JEMALLOC_EXPORT void
3266 _malloc_postfork(void)
3267 #endif
3268 {
3269 tsd_t *tsd;
3270 unsigned i, narenas;
3271
3272 #ifdef JEMALLOC_MUTEX_INIT_CB
3273 if (!malloc_initialized()) {
3274 return;
3275 }
3276 #endif
3277 assert(malloc_initialized());
3278
3279 tsd = tsd_fetch();
3280
3281 witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3282 /* Release all mutexes, now that fork() has completed. */
3283 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3284 arena_t *arena;
3285
3286 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3287 arena_postfork_parent(tsd_tsdn(tsd), arena);
3288 }
3289 }
3290 prof_postfork_parent(tsd_tsdn(tsd));
3291 if (have_background_thread) {
3292 background_thread_postfork_parent(tsd_tsdn(tsd));
3293 }
3294 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3295 tcache_postfork_parent(tsd_tsdn(tsd));
3296 ctl_postfork_parent(tsd_tsdn(tsd));
3297 }
3298
3299 void
jemalloc_postfork_child(void)3300 jemalloc_postfork_child(void) {
3301 tsd_t *tsd;
3302 unsigned i, narenas;
3303
3304 assert(malloc_initialized());
3305
3306 tsd = tsd_fetch();
3307
3308 witness_postfork_child(tsd_witness_tsdp_get(tsd));
3309 /* Release all mutexes, now that fork() has completed. */
3310 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3311 arena_t *arena;
3312
3313 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3314 arena_postfork_child(tsd_tsdn(tsd), arena);
3315 }
3316 }
3317 prof_postfork_child(tsd_tsdn(tsd));
3318 if (have_background_thread) {
3319 background_thread_postfork_child(tsd_tsdn(tsd));
3320 }
3321 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3322 tcache_postfork_child(tsd_tsdn(tsd));
3323 ctl_postfork_child(tsd_tsdn(tsd));
3324 }
3325
3326 /******************************************************************************/
3327
3328 /* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
3329 * returns 0 if the allocation is in the currently active run,
3330 * or when it is not causing any frag issue (large or huge bin)
3331 * returns the bin utilization and run utilization both in fixed point 16:16.
3332 * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
3333 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
get_defrag_hint(void * ptr,int * bin_util,int * run_util)3334 get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
3335 assert(ptr != NULL);
3336 return iget_defrag_hint(TSDN_NULL, ptr, bin_util, run_util);
3337 }
3338