xref: /freebsd/contrib/jemalloc/src/jemalloc.c (revision 325151a3)
1 #define	JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
8 const char	*__malloc_options_1_0 = NULL;
9 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
10 
11 /* Runtime configuration options. */
12 const char	*je_malloc_conf JEMALLOC_ATTR(weak);
13 bool	opt_abort =
14 #ifdef JEMALLOC_DEBUG
15     true
16 #else
17     false
18 #endif
19     ;
20 const char	*opt_junk =
21 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
22     "true"
23 #else
24     "false"
25 #endif
26     ;
27 bool	opt_junk_alloc =
28 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
29     true
30 #else
31     false
32 #endif
33     ;
34 bool	opt_junk_free =
35 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
36     true
37 #else
38     false
39 #endif
40     ;
41 
42 size_t	opt_quarantine = ZU(0);
43 bool	opt_redzone = false;
44 bool	opt_utrace = false;
45 bool	opt_xmalloc = false;
46 bool	opt_zero = false;
47 size_t	opt_narenas = 0;
48 
49 /* Initialized to true if the process is running inside Valgrind. */
50 bool	in_valgrind;
51 
52 unsigned	ncpus;
53 
54 /* Protects arenas initialization (arenas, narenas_total). */
55 static malloc_mutex_t	arenas_lock;
56 /*
57  * Arenas that are used to service external requests.  Not all elements of the
58  * arenas array are necessarily used; arenas are created lazily as needed.
59  *
60  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
61  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
62  * takes some action to create them and allocate from them.
63  */
64 static arena_t		**arenas;
65 static unsigned		narenas_total;
66 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
67 static unsigned		narenas_auto; /* Read-only after initialization. */
68 
69 typedef enum {
70 	malloc_init_uninitialized	= 3,
71 	malloc_init_a0_initialized	= 2,
72 	malloc_init_recursible		= 1,
73 	malloc_init_initialized		= 0 /* Common case --> jnz. */
74 } malloc_init_t;
75 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
76 
77 JEMALLOC_ALIGNED(CACHELINE)
78 const size_t	index2size_tab[NSIZES] = {
79 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
80 	((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
81 	SIZE_CLASSES
82 #undef SC
83 };
84 
85 JEMALLOC_ALIGNED(CACHELINE)
86 const uint8_t	size2index_tab[] = {
87 #if LG_TINY_MIN == 0
88 #warning "Dangerous LG_TINY_MIN"
89 #define	S2B_0(i)	i,
90 #elif LG_TINY_MIN == 1
91 #warning "Dangerous LG_TINY_MIN"
92 #define	S2B_1(i)	i,
93 #elif LG_TINY_MIN == 2
94 #warning "Dangerous LG_TINY_MIN"
95 #define	S2B_2(i)	i,
96 #elif LG_TINY_MIN == 3
97 #define	S2B_3(i)	i,
98 #elif LG_TINY_MIN == 4
99 #define	S2B_4(i)	i,
100 #elif LG_TINY_MIN == 5
101 #define	S2B_5(i)	i,
102 #elif LG_TINY_MIN == 6
103 #define	S2B_6(i)	i,
104 #elif LG_TINY_MIN == 7
105 #define	S2B_7(i)	i,
106 #elif LG_TINY_MIN == 8
107 #define	S2B_8(i)	i,
108 #elif LG_TINY_MIN == 9
109 #define	S2B_9(i)	i,
110 #elif LG_TINY_MIN == 10
111 #define	S2B_10(i)	i,
112 #elif LG_TINY_MIN == 11
113 #define	S2B_11(i)	i,
114 #else
115 #error "Unsupported LG_TINY_MIN"
116 #endif
117 #if LG_TINY_MIN < 1
118 #define	S2B_1(i)	S2B_0(i) S2B_0(i)
119 #endif
120 #if LG_TINY_MIN < 2
121 #define	S2B_2(i)	S2B_1(i) S2B_1(i)
122 #endif
123 #if LG_TINY_MIN < 3
124 #define	S2B_3(i)	S2B_2(i) S2B_2(i)
125 #endif
126 #if LG_TINY_MIN < 4
127 #define	S2B_4(i)	S2B_3(i) S2B_3(i)
128 #endif
129 #if LG_TINY_MIN < 5
130 #define	S2B_5(i)	S2B_4(i) S2B_4(i)
131 #endif
132 #if LG_TINY_MIN < 6
133 #define	S2B_6(i)	S2B_5(i) S2B_5(i)
134 #endif
135 #if LG_TINY_MIN < 7
136 #define	S2B_7(i)	S2B_6(i) S2B_6(i)
137 #endif
138 #if LG_TINY_MIN < 8
139 #define	S2B_8(i)	S2B_7(i) S2B_7(i)
140 #endif
141 #if LG_TINY_MIN < 9
142 #define	S2B_9(i)	S2B_8(i) S2B_8(i)
143 #endif
144 #if LG_TINY_MIN < 10
145 #define	S2B_10(i)	S2B_9(i) S2B_9(i)
146 #endif
147 #if LG_TINY_MIN < 11
148 #define	S2B_11(i)	S2B_10(i) S2B_10(i)
149 #endif
150 #define	S2B_no(i)
151 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
152 	S2B_##lg_delta_lookup(index)
153 	SIZE_CLASSES
154 #undef S2B_3
155 #undef S2B_4
156 #undef S2B_5
157 #undef S2B_6
158 #undef S2B_7
159 #undef S2B_8
160 #undef S2B_9
161 #undef S2B_10
162 #undef S2B_11
163 #undef S2B_no
164 #undef SC
165 };
166 
167 #ifdef JEMALLOC_THREADED_INIT
168 /* Used to let the initializing thread recursively allocate. */
169 #  define NO_INITIALIZER	((unsigned long)0)
170 #  define INITIALIZER		pthread_self()
171 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
172 static pthread_t		malloc_initializer = NO_INITIALIZER;
173 #else
174 #  define NO_INITIALIZER	false
175 #  define INITIALIZER		true
176 #  define IS_INITIALIZER	malloc_initializer
177 static bool			malloc_initializer = NO_INITIALIZER;
178 #endif
179 
180 /* Used to avoid initialization races. */
181 #ifdef _WIN32
182 #if _WIN32_WINNT >= 0x0600
183 static malloc_mutex_t	init_lock = SRWLOCK_INIT;
184 #else
185 static malloc_mutex_t	init_lock;
186 static bool init_lock_initialized = false;
187 
188 JEMALLOC_ATTR(constructor)
189 static void WINAPI
190 _init_init_lock(void)
191 {
192 
193 	/* If another constructor in the same binary is using mallctl to
194 	 * e.g. setup chunk hooks, it may end up running before this one,
195 	 * and malloc_init_hard will crash trying to lock the uninitialized
196 	 * lock. So we force an initialization of the lock in
197 	 * malloc_init_hard as well. We don't try to care about atomicity
198 	 * of the accessed to the init_lock_initialized boolean, since it
199 	 * really only matters early in the process creation, before any
200 	 * separate thread normally starts doing anything. */
201 	if (!init_lock_initialized)
202 		malloc_mutex_init(&init_lock);
203 	init_lock_initialized = true;
204 }
205 
206 #ifdef _MSC_VER
207 #  pragma section(".CRT$XCU", read)
208 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
209 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
210 #endif
211 #endif
212 #else
213 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
214 #endif
215 
216 typedef struct {
217 	void	*p;	/* Input pointer (as in realloc(p, s)). */
218 	size_t	s;	/* Request size. */
219 	void	*r;	/* Result pointer. */
220 } malloc_utrace_t;
221 
222 #ifdef JEMALLOC_UTRACE
223 #  define UTRACE(a, b, c) do {						\
224 	if (unlikely(opt_utrace)) {					\
225 		int utrace_serrno = errno;				\
226 		malloc_utrace_t ut;					\
227 		ut.p = (a);						\
228 		ut.s = (b);						\
229 		ut.r = (c);						\
230 		utrace(&ut, sizeof(ut));				\
231 		errno = utrace_serrno;					\
232 	}								\
233 } while (0)
234 #else
235 #  define UTRACE(a, b, c)
236 #endif
237 
238 /******************************************************************************/
239 /*
240  * Function prototypes for static functions that are referenced prior to
241  * definition.
242  */
243 
244 static bool	malloc_init_hard_a0(void);
245 static bool	malloc_init_hard(void);
246 
247 /******************************************************************************/
248 /*
249  * Begin miscellaneous support functions.
250  */
251 
252 JEMALLOC_ALWAYS_INLINE_C bool
253 malloc_initialized(void)
254 {
255 
256 	return (malloc_init_state == malloc_init_initialized);
257 }
258 
259 JEMALLOC_ALWAYS_INLINE_C void
260 malloc_thread_init(void)
261 {
262 
263 	/*
264 	 * TSD initialization can't be safely done as a side effect of
265 	 * deallocation, because it is possible for a thread to do nothing but
266 	 * deallocate its TLS data via free(), in which case writing to TLS
267 	 * would cause write-after-free memory corruption.  The quarantine
268 	 * facility *only* gets used as a side effect of deallocation, so make
269 	 * a best effort attempt at initializing its TSD by hooking all
270 	 * allocation events.
271 	 */
272 	if (config_fill && unlikely(opt_quarantine))
273 		quarantine_alloc_hook();
274 }
275 
276 JEMALLOC_ALWAYS_INLINE_C bool
277 malloc_init_a0(void)
278 {
279 
280 	if (unlikely(malloc_init_state == malloc_init_uninitialized))
281 		return (malloc_init_hard_a0());
282 	return (false);
283 }
284 
285 JEMALLOC_ALWAYS_INLINE_C bool
286 malloc_init(void)
287 {
288 
289 	if (unlikely(!malloc_initialized()) && malloc_init_hard())
290 		return (true);
291 	malloc_thread_init();
292 
293 	return (false);
294 }
295 
296 /*
297  * The a0*() functions are used instead of i[mcd]alloc() in situations that
298  * cannot tolerate TLS variable access.
299  */
300 
301 arena_t *
302 a0get(void)
303 {
304 
305 	assert(a0 != NULL);
306 	return (a0);
307 }
308 
309 static void *
310 a0ialloc(size_t size, bool zero, bool is_metadata)
311 {
312 
313 	if (unlikely(malloc_init_a0()))
314 		return (NULL);
315 
316 	return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
317 }
318 
319 static void
320 a0idalloc(void *ptr, bool is_metadata)
321 {
322 
323 	idalloctm(NULL, ptr, false, is_metadata);
324 }
325 
326 void *
327 a0malloc(size_t size)
328 {
329 
330 	return (a0ialloc(size, false, true));
331 }
332 
333 void
334 a0dalloc(void *ptr)
335 {
336 
337 	a0idalloc(ptr, true);
338 }
339 
340 /*
341  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
342  * situations that cannot tolerate TLS variable access (TLS allocation and very
343  * early internal data structure initialization).
344  */
345 
346 void *
347 bootstrap_malloc(size_t size)
348 {
349 
350 	if (unlikely(size == 0))
351 		size = 1;
352 
353 	return (a0ialloc(size, false, false));
354 }
355 
356 void *
357 bootstrap_calloc(size_t num, size_t size)
358 {
359 	size_t num_size;
360 
361 	num_size = num * size;
362 	if (unlikely(num_size == 0)) {
363 		assert(num == 0 || size == 0);
364 		num_size = 1;
365 	}
366 
367 	return (a0ialloc(num_size, true, false));
368 }
369 
370 void
371 bootstrap_free(void *ptr)
372 {
373 
374 	if (unlikely(ptr == NULL))
375 		return;
376 
377 	a0idalloc(ptr, false);
378 }
379 
380 /* Create a new arena and insert it into the arenas array at index ind. */
381 static arena_t *
382 arena_init_locked(unsigned ind)
383 {
384 	arena_t *arena;
385 
386 	/* Expand arenas if necessary. */
387 	assert(ind <= narenas_total);
388 	if (ind > MALLOCX_ARENA_MAX)
389 		return (NULL);
390 	if (ind == narenas_total) {
391 		unsigned narenas_new = narenas_total + 1;
392 		arena_t **arenas_new =
393 		    (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
394 		    sizeof(arena_t *)));
395 		if (arenas_new == NULL)
396 			return (NULL);
397 		memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
398 		arenas_new[ind] = NULL;
399 		/*
400 		 * Deallocate only if arenas came from a0malloc() (not
401 		 * base_alloc()).
402 		 */
403 		if (narenas_total != narenas_auto)
404 			a0dalloc(arenas);
405 		arenas = arenas_new;
406 		narenas_total = narenas_new;
407 	}
408 
409 	/*
410 	 * Another thread may have already initialized arenas[ind] if it's an
411 	 * auto arena.
412 	 */
413 	arena = arenas[ind];
414 	if (arena != NULL) {
415 		assert(ind < narenas_auto);
416 		return (arena);
417 	}
418 
419 	/* Actually initialize the arena. */
420 	arena = arenas[ind] = arena_new(ind);
421 	return (arena);
422 }
423 
424 arena_t *
425 arena_init(unsigned ind)
426 {
427 	arena_t *arena;
428 
429 	malloc_mutex_lock(&arenas_lock);
430 	arena = arena_init_locked(ind);
431 	malloc_mutex_unlock(&arenas_lock);
432 	return (arena);
433 }
434 
435 unsigned
436 narenas_total_get(void)
437 {
438 	unsigned narenas;
439 
440 	malloc_mutex_lock(&arenas_lock);
441 	narenas = narenas_total;
442 	malloc_mutex_unlock(&arenas_lock);
443 
444 	return (narenas);
445 }
446 
447 static void
448 arena_bind_locked(tsd_t *tsd, unsigned ind)
449 {
450 	arena_t *arena;
451 
452 	arena = arenas[ind];
453 	arena->nthreads++;
454 
455 	if (tsd_nominal(tsd))
456 		tsd_arena_set(tsd, arena);
457 }
458 
459 static void
460 arena_bind(tsd_t *tsd, unsigned ind)
461 {
462 
463 	malloc_mutex_lock(&arenas_lock);
464 	arena_bind_locked(tsd, ind);
465 	malloc_mutex_unlock(&arenas_lock);
466 }
467 
468 void
469 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
470 {
471 	arena_t *oldarena, *newarena;
472 
473 	malloc_mutex_lock(&arenas_lock);
474 	oldarena = arenas[oldind];
475 	newarena = arenas[newind];
476 	oldarena->nthreads--;
477 	newarena->nthreads++;
478 	malloc_mutex_unlock(&arenas_lock);
479 	tsd_arena_set(tsd, newarena);
480 }
481 
482 unsigned
483 arena_nbound(unsigned ind)
484 {
485 	unsigned nthreads;
486 
487 	malloc_mutex_lock(&arenas_lock);
488 	nthreads = arenas[ind]->nthreads;
489 	malloc_mutex_unlock(&arenas_lock);
490 	return (nthreads);
491 }
492 
493 static void
494 arena_unbind(tsd_t *tsd, unsigned ind)
495 {
496 	arena_t *arena;
497 
498 	malloc_mutex_lock(&arenas_lock);
499 	arena = arenas[ind];
500 	arena->nthreads--;
501 	malloc_mutex_unlock(&arenas_lock);
502 	tsd_arena_set(tsd, NULL);
503 }
504 
505 arena_t *
506 arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
507 {
508 	arena_t *arena;
509 	arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
510 	unsigned narenas_cache = tsd_narenas_cache_get(tsd);
511 	unsigned narenas_actual = narenas_total_get();
512 
513 	/* Deallocate old cache if it's too small. */
514 	if (arenas_cache != NULL && narenas_cache < narenas_actual) {
515 		a0dalloc(arenas_cache);
516 		arenas_cache = NULL;
517 		narenas_cache = 0;
518 		tsd_arenas_cache_set(tsd, arenas_cache);
519 		tsd_narenas_cache_set(tsd, narenas_cache);
520 	}
521 
522 	/* Allocate cache if it's missing. */
523 	if (arenas_cache == NULL) {
524 		bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
525 		assert(ind < narenas_actual || !init_if_missing);
526 		narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
527 
528 		if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
529 			*arenas_cache_bypassp = true;
530 			arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
531 			    narenas_cache);
532 			*arenas_cache_bypassp = false;
533 		}
534 		if (arenas_cache == NULL) {
535 			/*
536 			 * This function must always tell the truth, even if
537 			 * it's slow, so don't let OOM, thread cleanup (note
538 			 * tsd_nominal check), nor recursive allocation
539 			 * avoidance (note arenas_cache_bypass check) get in the
540 			 * way.
541 			 */
542 			if (ind >= narenas_actual)
543 				return (NULL);
544 			malloc_mutex_lock(&arenas_lock);
545 			arena = arenas[ind];
546 			malloc_mutex_unlock(&arenas_lock);
547 			return (arena);
548 		}
549 		assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
550 		tsd_arenas_cache_set(tsd, arenas_cache);
551 		tsd_narenas_cache_set(tsd, narenas_cache);
552 	}
553 
554 	/*
555 	 * Copy to cache.  It's possible that the actual number of arenas has
556 	 * increased since narenas_total_get() was called above, but that causes
557 	 * no correctness issues unless two threads concurrently execute the
558 	 * arenas.extend mallctl, which we trust mallctl synchronization to
559 	 * prevent.
560 	 */
561 	malloc_mutex_lock(&arenas_lock);
562 	memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
563 	malloc_mutex_unlock(&arenas_lock);
564 	if (narenas_cache > narenas_actual) {
565 		memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
566 		    (narenas_cache - narenas_actual));
567 	}
568 
569 	/* Read the refreshed cache, and init the arena if necessary. */
570 	arena = arenas_cache[ind];
571 	if (init_if_missing && arena == NULL)
572 		arena = arenas_cache[ind] = arena_init(ind);
573 	return (arena);
574 }
575 
576 /* Slow path, called only by arena_choose(). */
577 arena_t *
578 arena_choose_hard(tsd_t *tsd)
579 {
580 	arena_t *ret;
581 
582 	if (narenas_auto > 1) {
583 		unsigned i, choose, first_null;
584 
585 		choose = 0;
586 		first_null = narenas_auto;
587 		malloc_mutex_lock(&arenas_lock);
588 		assert(a0get() != NULL);
589 		for (i = 1; i < narenas_auto; i++) {
590 			if (arenas[i] != NULL) {
591 				/*
592 				 * Choose the first arena that has the lowest
593 				 * number of threads assigned to it.
594 				 */
595 				if (arenas[i]->nthreads <
596 				    arenas[choose]->nthreads)
597 					choose = i;
598 			} else if (first_null == narenas_auto) {
599 				/*
600 				 * Record the index of the first uninitialized
601 				 * arena, in case all extant arenas are in use.
602 				 *
603 				 * NB: It is possible for there to be
604 				 * discontinuities in terms of initialized
605 				 * versus uninitialized arenas, due to the
606 				 * "thread.arena" mallctl.
607 				 */
608 				first_null = i;
609 			}
610 		}
611 
612 		if (arenas[choose]->nthreads == 0
613 		    || first_null == narenas_auto) {
614 			/*
615 			 * Use an unloaded arena, or the least loaded arena if
616 			 * all arenas are already initialized.
617 			 */
618 			ret = arenas[choose];
619 		} else {
620 			/* Initialize a new arena. */
621 			choose = first_null;
622 			ret = arena_init_locked(choose);
623 			if (ret == NULL) {
624 				malloc_mutex_unlock(&arenas_lock);
625 				return (NULL);
626 			}
627 		}
628 		arena_bind_locked(tsd, choose);
629 		malloc_mutex_unlock(&arenas_lock);
630 	} else {
631 		ret = a0get();
632 		arena_bind(tsd, 0);
633 	}
634 
635 	return (ret);
636 }
637 
638 void
639 thread_allocated_cleanup(tsd_t *tsd)
640 {
641 
642 	/* Do nothing. */
643 }
644 
645 void
646 thread_deallocated_cleanup(tsd_t *tsd)
647 {
648 
649 	/* Do nothing. */
650 }
651 
652 void
653 arena_cleanup(tsd_t *tsd)
654 {
655 	arena_t *arena;
656 
657 	arena = tsd_arena_get(tsd);
658 	if (arena != NULL)
659 		arena_unbind(tsd, arena->ind);
660 }
661 
662 void
663 arenas_cache_cleanup(tsd_t *tsd)
664 {
665 	arena_t **arenas_cache;
666 
667 	arenas_cache = tsd_arenas_cache_get(tsd);
668 	if (arenas_cache != NULL) {
669 		tsd_arenas_cache_set(tsd, NULL);
670 		a0dalloc(arenas_cache);
671 	}
672 }
673 
674 void
675 narenas_cache_cleanup(tsd_t *tsd)
676 {
677 
678 	/* Do nothing. */
679 }
680 
681 void
682 arenas_cache_bypass_cleanup(tsd_t *tsd)
683 {
684 
685 	/* Do nothing. */
686 }
687 
688 static void
689 stats_print_atexit(void)
690 {
691 
692 	if (config_tcache && config_stats) {
693 		unsigned narenas, i;
694 
695 		/*
696 		 * Merge stats from extant threads.  This is racy, since
697 		 * individual threads do not lock when recording tcache stats
698 		 * events.  As a consequence, the final stats may be slightly
699 		 * out of date by the time they are reported, if other threads
700 		 * continue to allocate.
701 		 */
702 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
703 			arena_t *arena = arenas[i];
704 			if (arena != NULL) {
705 				tcache_t *tcache;
706 
707 				/*
708 				 * tcache_stats_merge() locks bins, so if any
709 				 * code is introduced that acquires both arena
710 				 * and bin locks in the opposite order,
711 				 * deadlocks may result.
712 				 */
713 				malloc_mutex_lock(&arena->lock);
714 				ql_foreach(tcache, &arena->tcache_ql, link) {
715 					tcache_stats_merge(tcache, arena);
716 				}
717 				malloc_mutex_unlock(&arena->lock);
718 			}
719 		}
720 	}
721 	je_malloc_stats_print(NULL, NULL, NULL);
722 }
723 
724 /*
725  * End miscellaneous support functions.
726  */
727 /******************************************************************************/
728 /*
729  * Begin initialization functions.
730  */
731 
732 #ifndef JEMALLOC_HAVE_SECURE_GETENV
733 static char *
734 secure_getenv(const char *name)
735 {
736 
737 #  ifdef JEMALLOC_HAVE_ISSETUGID
738 	if (issetugid() != 0)
739 		return (NULL);
740 #  endif
741 	return (getenv(name));
742 }
743 #endif
744 
745 static unsigned
746 malloc_ncpus(void)
747 {
748 	long result;
749 
750 #ifdef _WIN32
751 	SYSTEM_INFO si;
752 	GetSystemInfo(&si);
753 	result = si.dwNumberOfProcessors;
754 #else
755 	result = sysconf(_SC_NPROCESSORS_ONLN);
756 #endif
757 	return ((result == -1) ? 1 : (unsigned)result);
758 }
759 
760 static bool
761 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
762     char const **v_p, size_t *vlen_p)
763 {
764 	bool accept;
765 	const char *opts = *opts_p;
766 
767 	*k_p = opts;
768 
769 	for (accept = false; !accept;) {
770 		switch (*opts) {
771 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
772 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
773 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
774 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
775 		case 'Y': case 'Z':
776 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
777 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
778 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
779 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
780 		case 'y': case 'z':
781 		case '0': case '1': case '2': case '3': case '4': case '5':
782 		case '6': case '7': case '8': case '9':
783 		case '_':
784 			opts++;
785 			break;
786 		case ':':
787 			opts++;
788 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
789 			*v_p = opts;
790 			accept = true;
791 			break;
792 		case '\0':
793 			if (opts != *opts_p) {
794 				malloc_write("<jemalloc>: Conf string ends "
795 				    "with key\n");
796 			}
797 			return (true);
798 		default:
799 			malloc_write("<jemalloc>: Malformed conf string\n");
800 			return (true);
801 		}
802 	}
803 
804 	for (accept = false; !accept;) {
805 		switch (*opts) {
806 		case ',':
807 			opts++;
808 			/*
809 			 * Look ahead one character here, because the next time
810 			 * this function is called, it will assume that end of
811 			 * input has been cleanly reached if no input remains,
812 			 * but we have optimistically already consumed the
813 			 * comma if one exists.
814 			 */
815 			if (*opts == '\0') {
816 				malloc_write("<jemalloc>: Conf string ends "
817 				    "with comma\n");
818 			}
819 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
820 			accept = true;
821 			break;
822 		case '\0':
823 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
824 			accept = true;
825 			break;
826 		default:
827 			opts++;
828 			break;
829 		}
830 	}
831 
832 	*opts_p = opts;
833 	return (false);
834 }
835 
836 static void
837 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
838     size_t vlen)
839 {
840 
841 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
842 	    (int)vlen, v);
843 }
844 
845 static void
846 malloc_conf_init(void)
847 {
848 	unsigned i;
849 	char buf[PATH_MAX + 1];
850 	const char *opts, *k, *v;
851 	size_t klen, vlen;
852 
853 	/*
854 	 * Automatically configure valgrind before processing options.  The
855 	 * valgrind option remains in jemalloc 3.x for compatibility reasons.
856 	 */
857 	if (config_valgrind) {
858 		in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
859 		if (config_fill && unlikely(in_valgrind)) {
860 			opt_junk = "false";
861 			opt_junk_alloc = false;
862 			opt_junk_free = false;
863 			assert(!opt_zero);
864 			opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
865 			opt_redzone = true;
866 		}
867 		if (config_tcache && unlikely(in_valgrind))
868 			opt_tcache = false;
869 	}
870 
871 	for (i = 0; i < 3; i++) {
872 		/* Get runtime configuration. */
873 		switch (i) {
874 		case 0:
875 			if (je_malloc_conf != NULL) {
876 				/*
877 				 * Use options that were compiled into the
878 				 * program.
879 				 */
880 				opts = je_malloc_conf;
881 			} else {
882 				/* No configuration specified. */
883 				buf[0] = '\0';
884 				opts = buf;
885 			}
886 			break;
887 		case 1: {
888 			int linklen = 0;
889 #ifndef _WIN32
890 			int saved_errno = errno;
891 			const char *linkname =
892 #  ifdef JEMALLOC_PREFIX
893 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
894 #  else
895 			    "/etc/malloc.conf"
896 #  endif
897 			    ;
898 
899 			/*
900 			 * Try to use the contents of the "/etc/malloc.conf"
901 			 * symbolic link's name.
902 			 */
903 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
904 			if (linklen == -1) {
905 				/* No configuration specified. */
906 				linklen = 0;
907 				/* Restore errno. */
908 				set_errno(saved_errno);
909 			}
910 #endif
911 			buf[linklen] = '\0';
912 			opts = buf;
913 			break;
914 		} case 2: {
915 			const char *envname =
916 #ifdef JEMALLOC_PREFIX
917 			    JEMALLOC_CPREFIX"MALLOC_CONF"
918 #else
919 			    "MALLOC_CONF"
920 #endif
921 			    ;
922 
923 			if ((opts = secure_getenv(envname)) != NULL) {
924 				/*
925 				 * Do nothing; opts is already initialized to
926 				 * the value of the MALLOC_CONF environment
927 				 * variable.
928 				 */
929 			} else {
930 				/* No configuration specified. */
931 				buf[0] = '\0';
932 				opts = buf;
933 			}
934 			break;
935 		} default:
936 			not_reached();
937 			buf[0] = '\0';
938 			opts = buf;
939 		}
940 
941 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
942 		    &vlen)) {
943 #define	CONF_MATCH(n)							\
944 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
945 #define	CONF_MATCH_VALUE(n)						\
946 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
947 #define	CONF_HANDLE_BOOL(o, n, cont)					\
948 			if (CONF_MATCH(n)) {				\
949 				if (CONF_MATCH_VALUE("true"))		\
950 					o = true;			\
951 				else if (CONF_MATCH_VALUE("false"))	\
952 					o = false;			\
953 				else {					\
954 					malloc_conf_error(		\
955 					    "Invalid conf value",	\
956 					    k, klen, v, vlen);		\
957 				}					\
958 				if (cont)				\
959 					continue;			\
960 			}
961 #define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
962 			if (CONF_MATCH(n)) {				\
963 				uintmax_t um;				\
964 				char *end;				\
965 									\
966 				set_errno(0);				\
967 				um = malloc_strtoumax(v, &end, 0);	\
968 				if (get_errno() != 0 || (uintptr_t)end -\
969 				    (uintptr_t)v != vlen) {		\
970 					malloc_conf_error(		\
971 					    "Invalid conf value",	\
972 					    k, klen, v, vlen);		\
973 				} else if (clip) {			\
974 					if ((min) != 0 && um < (min))	\
975 						o = (min);		\
976 					else if (um > (max))		\
977 						o = (max);		\
978 					else				\
979 						o = um;			\
980 				} else {				\
981 					if (((min) != 0 && um < (min))	\
982 					    || um > (max)) {		\
983 						malloc_conf_error(	\
984 						    "Out-of-range "	\
985 						    "conf value",	\
986 						    k, klen, v, vlen);	\
987 					} else				\
988 						o = um;			\
989 				}					\
990 				continue;				\
991 			}
992 #define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
993 			if (CONF_MATCH(n)) {				\
994 				long l;					\
995 				char *end;				\
996 									\
997 				set_errno(0);				\
998 				l = strtol(v, &end, 0);			\
999 				if (get_errno() != 0 || (uintptr_t)end -\
1000 				    (uintptr_t)v != vlen) {		\
1001 					malloc_conf_error(		\
1002 					    "Invalid conf value",	\
1003 					    k, klen, v, vlen);		\
1004 				} else if (l < (ssize_t)(min) || l >	\
1005 				    (ssize_t)(max)) {			\
1006 					malloc_conf_error(		\
1007 					    "Out-of-range conf value",	\
1008 					    k, klen, v, vlen);		\
1009 				} else					\
1010 					o = l;				\
1011 				continue;				\
1012 			}
1013 #define	CONF_HANDLE_CHAR_P(o, n, d)					\
1014 			if (CONF_MATCH(n)) {				\
1015 				size_t cpylen = (vlen <=		\
1016 				    sizeof(o)-1) ? vlen :		\
1017 				    sizeof(o)-1;			\
1018 				strncpy(o, v, cpylen);			\
1019 				o[cpylen] = '\0';			\
1020 				continue;				\
1021 			}
1022 
1023 			CONF_HANDLE_BOOL(opt_abort, "abort", true)
1024 			/*
1025 			 * Chunks always require at least one header page,
1026 			 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1027 			 * possibly an additional page in the presence of
1028 			 * redzones.  In order to simplify options processing,
1029 			 * use a conservative bound that accommodates all these
1030 			 * constraints.
1031 			 */
1032 			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1033 			    LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1034 			    (sizeof(size_t) << 3) - 1, true)
1035 			if (strncmp("dss", k, klen) == 0) {
1036 				int i;
1037 				bool match = false;
1038 				for (i = 0; i < dss_prec_limit; i++) {
1039 					if (strncmp(dss_prec_names[i], v, vlen)
1040 					    == 0) {
1041 						if (chunk_dss_prec_set(i)) {
1042 							malloc_conf_error(
1043 							    "Error setting dss",
1044 							    k, klen, v, vlen);
1045 						} else {
1046 							opt_dss =
1047 							    dss_prec_names[i];
1048 							match = true;
1049 							break;
1050 						}
1051 					}
1052 				}
1053 				if (!match) {
1054 					malloc_conf_error("Invalid conf value",
1055 					    k, klen, v, vlen);
1056 				}
1057 				continue;
1058 			}
1059 			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
1060 			    SIZE_T_MAX, false)
1061 			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1062 			    -1, (sizeof(size_t) << 3) - 1)
1063 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1064 			if (config_fill) {
1065 				if (CONF_MATCH("junk")) {
1066 					if (CONF_MATCH_VALUE("true")) {
1067 						opt_junk = "true";
1068 						opt_junk_alloc = opt_junk_free =
1069 						    true;
1070 					} else if (CONF_MATCH_VALUE("false")) {
1071 						opt_junk = "false";
1072 						opt_junk_alloc = opt_junk_free =
1073 						    false;
1074 					} else if (CONF_MATCH_VALUE("alloc")) {
1075 						opt_junk = "alloc";
1076 						opt_junk_alloc = true;
1077 						opt_junk_free = false;
1078 					} else if (CONF_MATCH_VALUE("free")) {
1079 						opt_junk = "free";
1080 						opt_junk_alloc = false;
1081 						opt_junk_free = true;
1082 					} else {
1083 						malloc_conf_error(
1084 						    "Invalid conf value", k,
1085 						    klen, v, vlen);
1086 					}
1087 					continue;
1088 				}
1089 				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1090 				    0, SIZE_T_MAX, false)
1091 				CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1092 				CONF_HANDLE_BOOL(opt_zero, "zero", true)
1093 			}
1094 			if (config_utrace) {
1095 				CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1096 			}
1097 			if (config_xmalloc) {
1098 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1099 			}
1100 			if (config_tcache) {
1101 				CONF_HANDLE_BOOL(opt_tcache, "tcache",
1102 				    !config_valgrind || !in_valgrind)
1103 				if (CONF_MATCH("tcache")) {
1104 					assert(config_valgrind && in_valgrind);
1105 					if (opt_tcache) {
1106 						opt_tcache = false;
1107 						malloc_conf_error(
1108 						"tcache cannot be enabled "
1109 						"while running inside Valgrind",
1110 						k, klen, v, vlen);
1111 					}
1112 					continue;
1113 				}
1114 				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1115 				    "lg_tcache_max", -1,
1116 				    (sizeof(size_t) << 3) - 1)
1117 			}
1118 			if (config_prof) {
1119 				CONF_HANDLE_BOOL(opt_prof, "prof", true)
1120 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1121 				    "prof_prefix", "jeprof")
1122 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1123 				    true)
1124 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1125 				    "prof_thread_active_init", true)
1126 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1127 				    "lg_prof_sample", 0,
1128 				    (sizeof(uint64_t) << 3) - 1, true)
1129 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1130 				    true)
1131 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1132 				    "lg_prof_interval", -1,
1133 				    (sizeof(uint64_t) << 3) - 1)
1134 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1135 				    true)
1136 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1137 				    true)
1138 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1139 				    true)
1140 			}
1141 			malloc_conf_error("Invalid conf pair", k, klen, v,
1142 			    vlen);
1143 #undef CONF_MATCH
1144 #undef CONF_HANDLE_BOOL
1145 #undef CONF_HANDLE_SIZE_T
1146 #undef CONF_HANDLE_SSIZE_T
1147 #undef CONF_HANDLE_CHAR_P
1148 		}
1149 	}
1150 }
1151 
1152 /* init_lock must be held. */
1153 static bool
1154 malloc_init_hard_needed(void)
1155 {
1156 
1157 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1158 	    malloc_init_recursible)) {
1159 		/*
1160 		 * Another thread initialized the allocator before this one
1161 		 * acquired init_lock, or this thread is the initializing
1162 		 * thread, and it is recursively allocating.
1163 		 */
1164 		return (false);
1165 	}
1166 #ifdef JEMALLOC_THREADED_INIT
1167 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1168 		/* Busy-wait until the initializing thread completes. */
1169 		do {
1170 			malloc_mutex_unlock(&init_lock);
1171 			CPU_SPINWAIT;
1172 			malloc_mutex_lock(&init_lock);
1173 		} while (!malloc_initialized());
1174 		return (false);
1175 	}
1176 #endif
1177 	return (true);
1178 }
1179 
1180 /* init_lock must be held. */
1181 static bool
1182 malloc_init_hard_a0_locked(void)
1183 {
1184 
1185 	malloc_initializer = INITIALIZER;
1186 
1187 	if (config_prof)
1188 		prof_boot0();
1189 	malloc_conf_init();
1190 	if (opt_stats_print) {
1191 		/* Print statistics at exit. */
1192 		if (atexit(stats_print_atexit) != 0) {
1193 			malloc_write("<jemalloc>: Error in atexit()\n");
1194 			if (opt_abort)
1195 				abort();
1196 		}
1197 	}
1198 	if (base_boot())
1199 		return (true);
1200 	if (chunk_boot())
1201 		return (true);
1202 	if (ctl_boot())
1203 		return (true);
1204 	if (config_prof)
1205 		prof_boot1();
1206 	if (arena_boot())
1207 		return (true);
1208 	if (config_tcache && tcache_boot())
1209 		return (true);
1210 	if (malloc_mutex_init(&arenas_lock))
1211 		return (true);
1212 	/*
1213 	 * Create enough scaffolding to allow recursive allocation in
1214 	 * malloc_ncpus().
1215 	 */
1216 	narenas_total = narenas_auto = 1;
1217 	arenas = &a0;
1218 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1219 	/*
1220 	 * Initialize one arena here.  The rest are lazily created in
1221 	 * arena_choose_hard().
1222 	 */
1223 	if (arena_init(0) == NULL)
1224 		return (true);
1225 	malloc_init_state = malloc_init_a0_initialized;
1226 	return (false);
1227 }
1228 
1229 static bool
1230 malloc_init_hard_a0(void)
1231 {
1232 	bool ret;
1233 
1234 	malloc_mutex_lock(&init_lock);
1235 	ret = malloc_init_hard_a0_locked();
1236 	malloc_mutex_unlock(&init_lock);
1237 	return (ret);
1238 }
1239 
1240 /*
1241  * Initialize data structures which may trigger recursive allocation.
1242  *
1243  * init_lock must be held.
1244  */
1245 static void
1246 malloc_init_hard_recursible(void)
1247 {
1248 
1249 	malloc_init_state = malloc_init_recursible;
1250 	malloc_mutex_unlock(&init_lock);
1251 
1252 	ncpus = malloc_ncpus();
1253 
1254 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1255     && !defined(_WIN32) && !defined(__native_client__))
1256 	/* LinuxThreads's pthread_atfork() allocates. */
1257 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1258 	    jemalloc_postfork_child) != 0) {
1259 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1260 		if (opt_abort)
1261 			abort();
1262 	}
1263 #endif
1264 	malloc_mutex_lock(&init_lock);
1265 }
1266 
1267 /* init_lock must be held. */
1268 static bool
1269 malloc_init_hard_finish(void)
1270 {
1271 
1272 	if (mutex_boot())
1273 		return (true);
1274 
1275 	if (opt_narenas == 0) {
1276 		/*
1277 		 * For SMP systems, create more than one arena per CPU by
1278 		 * default.
1279 		 */
1280 		if (ncpus > 1)
1281 			opt_narenas = ncpus << 2;
1282 		else
1283 			opt_narenas = 1;
1284 	}
1285 	narenas_auto = opt_narenas;
1286 	/*
1287 	 * Make sure that the arenas array can be allocated.  In practice, this
1288 	 * limit is enough to allow the allocator to function, but the ctl
1289 	 * machinery will fail to allocate memory at far lower limits.
1290 	 */
1291 	if (narenas_auto > chunksize / sizeof(arena_t *)) {
1292 		narenas_auto = chunksize / sizeof(arena_t *);
1293 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1294 		    narenas_auto);
1295 	}
1296 	narenas_total = narenas_auto;
1297 
1298 	/* Allocate and initialize arenas. */
1299 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
1300 	if (arenas == NULL)
1301 		return (true);
1302 	/*
1303 	 * Zero the array.  In practice, this should always be pre-zeroed,
1304 	 * since it was just mmap()ed, but let's be sure.
1305 	 */
1306 	memset(arenas, 0, sizeof(arena_t *) * narenas_total);
1307 	/* Copy the pointer to the one arena that was already initialized. */
1308 	arenas[0] = a0;
1309 
1310 	malloc_init_state = malloc_init_initialized;
1311 	return (false);
1312 }
1313 
1314 static bool
1315 malloc_init_hard(void)
1316 {
1317 
1318 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1319 	_init_init_lock();
1320 #endif
1321 	malloc_mutex_lock(&init_lock);
1322 	if (!malloc_init_hard_needed()) {
1323 		malloc_mutex_unlock(&init_lock);
1324 		return (false);
1325 	}
1326 
1327 	if (malloc_init_state != malloc_init_a0_initialized &&
1328 	    malloc_init_hard_a0_locked()) {
1329 		malloc_mutex_unlock(&init_lock);
1330 		return (true);
1331 	}
1332 	if (malloc_tsd_boot0()) {
1333 		malloc_mutex_unlock(&init_lock);
1334 		return (true);
1335 	}
1336 	if (config_prof && prof_boot2()) {
1337 		malloc_mutex_unlock(&init_lock);
1338 		return (true);
1339 	}
1340 
1341 	malloc_init_hard_recursible();
1342 
1343 	if (malloc_init_hard_finish()) {
1344 		malloc_mutex_unlock(&init_lock);
1345 		return (true);
1346 	}
1347 
1348 	malloc_mutex_unlock(&init_lock);
1349 	malloc_tsd_boot1();
1350 	return (false);
1351 }
1352 
1353 /*
1354  * End initialization functions.
1355  */
1356 /******************************************************************************/
1357 /*
1358  * Begin malloc(3)-compatible functions.
1359  */
1360 
1361 static void *
1362 imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1363 {
1364 	void *p;
1365 
1366 	if (tctx == NULL)
1367 		return (NULL);
1368 	if (usize <= SMALL_MAXCLASS) {
1369 		p = imalloc(tsd, LARGE_MINCLASS);
1370 		if (p == NULL)
1371 			return (NULL);
1372 		arena_prof_promoted(p, usize);
1373 	} else
1374 		p = imalloc(tsd, usize);
1375 
1376 	return (p);
1377 }
1378 
1379 JEMALLOC_ALWAYS_INLINE_C void *
1380 imalloc_prof(tsd_t *tsd, size_t usize)
1381 {
1382 	void *p;
1383 	prof_tctx_t *tctx;
1384 
1385 	tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1386 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1387 		p = imalloc_prof_sample(tsd, usize, tctx);
1388 	else
1389 		p = imalloc(tsd, usize);
1390 	if (unlikely(p == NULL)) {
1391 		prof_alloc_rollback(tsd, tctx, true);
1392 		return (NULL);
1393 	}
1394 	prof_malloc(p, usize, tctx);
1395 
1396 	return (p);
1397 }
1398 
1399 JEMALLOC_ALWAYS_INLINE_C void *
1400 imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
1401 {
1402 
1403 	if (unlikely(malloc_init()))
1404 		return (NULL);
1405 	*tsd = tsd_fetch();
1406 
1407 	if (config_prof && opt_prof) {
1408 		*usize = s2u(size);
1409 		if (unlikely(*usize == 0))
1410 			return (NULL);
1411 		return (imalloc_prof(*tsd, *usize));
1412 	}
1413 
1414 	if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1415 		*usize = s2u(size);
1416 	return (imalloc(*tsd, size));
1417 }
1418 
1419 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1420 void JEMALLOC_NOTHROW *
1421 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1422 je_malloc(size_t size)
1423 {
1424 	void *ret;
1425 	tsd_t *tsd;
1426 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1427 
1428 	if (size == 0)
1429 		size = 1;
1430 
1431 	ret = imalloc_body(size, &tsd, &usize);
1432 	if (unlikely(ret == NULL)) {
1433 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1434 			malloc_write("<jemalloc>: Error in malloc(): "
1435 			    "out of memory\n");
1436 			abort();
1437 		}
1438 		set_errno(ENOMEM);
1439 	}
1440 	if (config_stats && likely(ret != NULL)) {
1441 		assert(usize == isalloc(ret, config_prof));
1442 		*tsd_thread_allocatedp_get(tsd) += usize;
1443 	}
1444 	UTRACE(0, size, ret);
1445 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
1446 	return (ret);
1447 }
1448 
1449 static void *
1450 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1451     prof_tctx_t *tctx)
1452 {
1453 	void *p;
1454 
1455 	if (tctx == NULL)
1456 		return (NULL);
1457 	if (usize <= SMALL_MAXCLASS) {
1458 		assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1459 		p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1460 		if (p == NULL)
1461 			return (NULL);
1462 		arena_prof_promoted(p, usize);
1463 	} else
1464 		p = ipalloc(tsd, usize, alignment, false);
1465 
1466 	return (p);
1467 }
1468 
1469 JEMALLOC_ALWAYS_INLINE_C void *
1470 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1471 {
1472 	void *p;
1473 	prof_tctx_t *tctx;
1474 
1475 	tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1476 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1477 		p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1478 	else
1479 		p = ipalloc(tsd, usize, alignment, false);
1480 	if (unlikely(p == NULL)) {
1481 		prof_alloc_rollback(tsd, tctx, true);
1482 		return (NULL);
1483 	}
1484 	prof_malloc(p, usize, tctx);
1485 
1486 	return (p);
1487 }
1488 
1489 JEMALLOC_ATTR(nonnull(1))
1490 static int
1491 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1492 {
1493 	int ret;
1494 	tsd_t *tsd;
1495 	size_t usize;
1496 	void *result;
1497 
1498 	assert(min_alignment != 0);
1499 
1500 	if (unlikely(malloc_init())) {
1501 		result = NULL;
1502 		goto label_oom;
1503 	}
1504 	tsd = tsd_fetch();
1505 	if (size == 0)
1506 		size = 1;
1507 
1508 	/* Make sure that alignment is a large enough power of 2. */
1509 	if (unlikely(((alignment - 1) & alignment) != 0
1510 	    || (alignment < min_alignment))) {
1511 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1512 			malloc_write("<jemalloc>: Error allocating "
1513 			    "aligned memory: invalid alignment\n");
1514 			abort();
1515 		}
1516 		result = NULL;
1517 		ret = EINVAL;
1518 		goto label_return;
1519 	}
1520 
1521 	usize = sa2u(size, alignment);
1522 	if (unlikely(usize == 0)) {
1523 		result = NULL;
1524 		goto label_oom;
1525 	}
1526 
1527 	if (config_prof && opt_prof)
1528 		result = imemalign_prof(tsd, alignment, usize);
1529 	else
1530 		result = ipalloc(tsd, usize, alignment, false);
1531 	if (unlikely(result == NULL))
1532 		goto label_oom;
1533 	assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1534 
1535 	*memptr = result;
1536 	ret = 0;
1537 label_return:
1538 	if (config_stats && likely(result != NULL)) {
1539 		assert(usize == isalloc(result, config_prof));
1540 		*tsd_thread_allocatedp_get(tsd) += usize;
1541 	}
1542 	UTRACE(0, size, result);
1543 	return (ret);
1544 label_oom:
1545 	assert(result == NULL);
1546 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1547 		malloc_write("<jemalloc>: Error allocating aligned memory: "
1548 		    "out of memory\n");
1549 		abort();
1550 	}
1551 	ret = ENOMEM;
1552 	goto label_return;
1553 }
1554 
1555 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1556 JEMALLOC_ATTR(nonnull(1))
1557 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1558 {
1559 	int ret = imemalign(memptr, alignment, size, sizeof(void *));
1560 	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1561 	    config_prof), false);
1562 	return (ret);
1563 }
1564 
1565 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1566 void JEMALLOC_NOTHROW *
1567 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
1568 je_aligned_alloc(size_t alignment, size_t size)
1569 {
1570 	void *ret;
1571 	int err;
1572 
1573 	if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1574 		ret = NULL;
1575 		set_errno(err);
1576 	}
1577 	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1578 	    false);
1579 	return (ret);
1580 }
1581 
1582 static void *
1583 icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1584 {
1585 	void *p;
1586 
1587 	if (tctx == NULL)
1588 		return (NULL);
1589 	if (usize <= SMALL_MAXCLASS) {
1590 		p = icalloc(tsd, LARGE_MINCLASS);
1591 		if (p == NULL)
1592 			return (NULL);
1593 		arena_prof_promoted(p, usize);
1594 	} else
1595 		p = icalloc(tsd, usize);
1596 
1597 	return (p);
1598 }
1599 
1600 JEMALLOC_ALWAYS_INLINE_C void *
1601 icalloc_prof(tsd_t *tsd, size_t usize)
1602 {
1603 	void *p;
1604 	prof_tctx_t *tctx;
1605 
1606 	tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1607 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1608 		p = icalloc_prof_sample(tsd, usize, tctx);
1609 	else
1610 		p = icalloc(tsd, usize);
1611 	if (unlikely(p == NULL)) {
1612 		prof_alloc_rollback(tsd, tctx, true);
1613 		return (NULL);
1614 	}
1615 	prof_malloc(p, usize, tctx);
1616 
1617 	return (p);
1618 }
1619 
1620 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1621 void JEMALLOC_NOTHROW *
1622 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
1623 je_calloc(size_t num, size_t size)
1624 {
1625 	void *ret;
1626 	tsd_t *tsd;
1627 	size_t num_size;
1628 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1629 
1630 	if (unlikely(malloc_init())) {
1631 		num_size = 0;
1632 		ret = NULL;
1633 		goto label_return;
1634 	}
1635 	tsd = tsd_fetch();
1636 
1637 	num_size = num * size;
1638 	if (unlikely(num_size == 0)) {
1639 		if (num == 0 || size == 0)
1640 			num_size = 1;
1641 		else {
1642 			ret = NULL;
1643 			goto label_return;
1644 		}
1645 	/*
1646 	 * Try to avoid division here.  We know that it isn't possible to
1647 	 * overflow during multiplication if neither operand uses any of the
1648 	 * most significant half of the bits in a size_t.
1649 	 */
1650 	} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1651 	    2))) && (num_size / size != num))) {
1652 		/* size_t overflow. */
1653 		ret = NULL;
1654 		goto label_return;
1655 	}
1656 
1657 	if (config_prof && opt_prof) {
1658 		usize = s2u(num_size);
1659 		if (unlikely(usize == 0)) {
1660 			ret = NULL;
1661 			goto label_return;
1662 		}
1663 		ret = icalloc_prof(tsd, usize);
1664 	} else {
1665 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1666 			usize = s2u(num_size);
1667 		ret = icalloc(tsd, num_size);
1668 	}
1669 
1670 label_return:
1671 	if (unlikely(ret == NULL)) {
1672 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1673 			malloc_write("<jemalloc>: Error in calloc(): out of "
1674 			    "memory\n");
1675 			abort();
1676 		}
1677 		set_errno(ENOMEM);
1678 	}
1679 	if (config_stats && likely(ret != NULL)) {
1680 		assert(usize == isalloc(ret, config_prof));
1681 		*tsd_thread_allocatedp_get(tsd) += usize;
1682 	}
1683 	UTRACE(0, num_size, ret);
1684 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1685 	return (ret);
1686 }
1687 
1688 static void *
1689 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
1690     prof_tctx_t *tctx)
1691 {
1692 	void *p;
1693 
1694 	if (tctx == NULL)
1695 		return (NULL);
1696 	if (usize <= SMALL_MAXCLASS) {
1697 		p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
1698 		if (p == NULL)
1699 			return (NULL);
1700 		arena_prof_promoted(p, usize);
1701 	} else
1702 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1703 
1704 	return (p);
1705 }
1706 
1707 JEMALLOC_ALWAYS_INLINE_C void *
1708 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
1709 {
1710 	void *p;
1711 	bool prof_active;
1712 	prof_tctx_t *old_tctx, *tctx;
1713 
1714 	prof_active = prof_active_get_unlocked();
1715 	old_tctx = prof_tctx_get(old_ptr);
1716 	tctx = prof_alloc_prep(tsd, usize, prof_active, true);
1717 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1718 		p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
1719 	else
1720 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1721 	if (unlikely(p == NULL)) {
1722 		prof_alloc_rollback(tsd, tctx, true);
1723 		return (NULL);
1724 	}
1725 	prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
1726 	    old_tctx);
1727 
1728 	return (p);
1729 }
1730 
1731 JEMALLOC_INLINE_C void
1732 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
1733 {
1734 	size_t usize;
1735 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1736 
1737 	assert(ptr != NULL);
1738 	assert(malloc_initialized() || IS_INITIALIZER);
1739 
1740 	if (config_prof && opt_prof) {
1741 		usize = isalloc(ptr, config_prof);
1742 		prof_free(tsd, ptr, usize);
1743 	} else if (config_stats || config_valgrind)
1744 		usize = isalloc(ptr, config_prof);
1745 	if (config_stats)
1746 		*tsd_thread_deallocatedp_get(tsd) += usize;
1747 	if (config_valgrind && unlikely(in_valgrind))
1748 		rzsize = p2rz(ptr);
1749 	iqalloc(tsd, ptr, tcache);
1750 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1751 }
1752 
1753 JEMALLOC_INLINE_C void
1754 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
1755 {
1756 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1757 
1758 	assert(ptr != NULL);
1759 	assert(malloc_initialized() || IS_INITIALIZER);
1760 
1761 	if (config_prof && opt_prof)
1762 		prof_free(tsd, ptr, usize);
1763 	if (config_stats)
1764 		*tsd_thread_deallocatedp_get(tsd) += usize;
1765 	if (config_valgrind && unlikely(in_valgrind))
1766 		rzsize = p2rz(ptr);
1767 	isqalloc(tsd, ptr, usize, tcache);
1768 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1769 }
1770 
1771 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1772 void JEMALLOC_NOTHROW *
1773 JEMALLOC_ALLOC_SIZE(2)
1774 je_realloc(void *ptr, size_t size)
1775 {
1776 	void *ret;
1777 	tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
1778 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1779 	size_t old_usize = 0;
1780 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1781 
1782 	if (unlikely(size == 0)) {
1783 		if (ptr != NULL) {
1784 			/* realloc(ptr, 0) is equivalent to free(ptr). */
1785 			UTRACE(ptr, 0, 0);
1786 			tsd = tsd_fetch();
1787 			ifree(tsd, ptr, tcache_get(tsd, false));
1788 			return (NULL);
1789 		}
1790 		size = 1;
1791 	}
1792 
1793 	if (likely(ptr != NULL)) {
1794 		assert(malloc_initialized() || IS_INITIALIZER);
1795 		malloc_thread_init();
1796 		tsd = tsd_fetch();
1797 
1798 		old_usize = isalloc(ptr, config_prof);
1799 		if (config_valgrind && unlikely(in_valgrind))
1800 			old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1801 
1802 		if (config_prof && opt_prof) {
1803 			usize = s2u(size);
1804 			ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
1805 			    ptr, old_usize, usize);
1806 		} else {
1807 			if (config_stats || (config_valgrind &&
1808 			    unlikely(in_valgrind)))
1809 				usize = s2u(size);
1810 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1811 		}
1812 	} else {
1813 		/* realloc(NULL, size) is equivalent to malloc(size). */
1814 		ret = imalloc_body(size, &tsd, &usize);
1815 	}
1816 
1817 	if (unlikely(ret == NULL)) {
1818 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1819 			malloc_write("<jemalloc>: Error in realloc(): "
1820 			    "out of memory\n");
1821 			abort();
1822 		}
1823 		set_errno(ENOMEM);
1824 	}
1825 	if (config_stats && likely(ret != NULL)) {
1826 		assert(usize == isalloc(ret, config_prof));
1827 		*tsd_thread_allocatedp_get(tsd) += usize;
1828 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
1829 	}
1830 	UTRACE(ptr, size, ret);
1831 	JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1832 	    old_rzsize, true, false);
1833 	return (ret);
1834 }
1835 
1836 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1837 je_free(void *ptr)
1838 {
1839 
1840 	UTRACE(ptr, 0, 0);
1841 	if (likely(ptr != NULL)) {
1842 		tsd_t *tsd = tsd_fetch();
1843 		ifree(tsd, ptr, tcache_get(tsd, false));
1844 	}
1845 }
1846 
1847 /*
1848  * End malloc(3)-compatible functions.
1849  */
1850 /******************************************************************************/
1851 /*
1852  * Begin non-standard override functions.
1853  */
1854 
1855 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1856 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1857 void JEMALLOC_NOTHROW *
1858 JEMALLOC_ATTR(malloc)
1859 je_memalign(size_t alignment, size_t size)
1860 {
1861 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1862 	if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1863 		ret = NULL;
1864 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1865 	return (ret);
1866 }
1867 #endif
1868 
1869 #ifdef JEMALLOC_OVERRIDE_VALLOC
1870 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1871 void JEMALLOC_NOTHROW *
1872 JEMALLOC_ATTR(malloc)
1873 je_valloc(size_t size)
1874 {
1875 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1876 	if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1877 		ret = NULL;
1878 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1879 	return (ret);
1880 }
1881 #endif
1882 
1883 /*
1884  * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1885  * #define je_malloc malloc
1886  */
1887 #define	malloc_is_malloc 1
1888 #define	is_malloc_(a) malloc_is_ ## a
1889 #define	is_malloc(a) is_malloc_(a)
1890 
1891 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1892 /*
1893  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1894  * to inconsistently reference libc's malloc(3)-compatible functions
1895  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1896  *
1897  * These definitions interpose hooks in glibc.  The functions are actually
1898  * passed an extra argument for the caller return address, which will be
1899  * ignored.
1900  */
1901 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1902 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1903 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1904 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1905 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
1906     je_memalign;
1907 # endif
1908 #endif
1909 
1910 /*
1911  * End non-standard override functions.
1912  */
1913 /******************************************************************************/
1914 /*
1915  * Begin non-standard functions.
1916  */
1917 
1918 JEMALLOC_ALWAYS_INLINE_C bool
1919 imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
1920     size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1921 {
1922 
1923 	if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
1924 		*alignment = 0;
1925 		*usize = s2u(size);
1926 	} else {
1927 		*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
1928 		*usize = sa2u(size, *alignment);
1929 	}
1930 	assert(*usize != 0);
1931 	*zero = MALLOCX_ZERO_GET(flags);
1932 	if ((flags & MALLOCX_TCACHE_MASK) != 0) {
1933 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
1934 			*tcache = NULL;
1935 		else
1936 			*tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
1937 	} else
1938 		*tcache = tcache_get(tsd, true);
1939 	if ((flags & MALLOCX_ARENA_MASK) != 0) {
1940 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
1941 		*arena = arena_get(tsd, arena_ind, true, true);
1942 		if (unlikely(*arena == NULL))
1943 			return (true);
1944 	} else
1945 		*arena = NULL;
1946 	return (false);
1947 }
1948 
1949 JEMALLOC_ALWAYS_INLINE_C bool
1950 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
1951     size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1952 {
1953 
1954 	if (likely(flags == 0)) {
1955 		*usize = s2u(size);
1956 		assert(*usize != 0);
1957 		*alignment = 0;
1958 		*zero = false;
1959 		*tcache = tcache_get(tsd, true);
1960 		*arena = NULL;
1961 		return (false);
1962 	} else {
1963 		return (imallocx_flags_decode_hard(tsd, size, flags, usize,
1964 		    alignment, zero, tcache, arena));
1965 	}
1966 }
1967 
1968 JEMALLOC_ALWAYS_INLINE_C void *
1969 imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
1970     tcache_t *tcache, arena_t *arena)
1971 {
1972 
1973 	if (unlikely(alignment != 0))
1974 		return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
1975 	if (unlikely(zero))
1976 		return (icalloct(tsd, usize, tcache, arena));
1977 	return (imalloct(tsd, usize, tcache, arena));
1978 }
1979 
1980 static void *
1981 imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
1982     tcache_t *tcache, arena_t *arena)
1983 {
1984 	void *p;
1985 
1986 	if (usize <= SMALL_MAXCLASS) {
1987 		assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
1988 		    sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
1989 		p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
1990 		    arena);
1991 		if (p == NULL)
1992 			return (NULL);
1993 		arena_prof_promoted(p, usize);
1994 	} else
1995 		p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
1996 
1997 	return (p);
1998 }
1999 
2000 JEMALLOC_ALWAYS_INLINE_C void *
2001 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2002 {
2003 	void *p;
2004 	size_t alignment;
2005 	bool zero;
2006 	tcache_t *tcache;
2007 	arena_t *arena;
2008 	prof_tctx_t *tctx;
2009 
2010 	if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2011 	    &zero, &tcache, &arena)))
2012 		return (NULL);
2013 	tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
2014 	if (likely((uintptr_t)tctx == (uintptr_t)1U))
2015 		p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2016 	else if ((uintptr_t)tctx > (uintptr_t)1U) {
2017 		p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
2018 		    arena);
2019 	} else
2020 		p = NULL;
2021 	if (unlikely(p == NULL)) {
2022 		prof_alloc_rollback(tsd, tctx, true);
2023 		return (NULL);
2024 	}
2025 	prof_malloc(p, *usize, tctx);
2026 
2027 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2028 	return (p);
2029 }
2030 
2031 JEMALLOC_ALWAYS_INLINE_C void *
2032 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2033 {
2034 	void *p;
2035 	size_t alignment;
2036 	bool zero;
2037 	tcache_t *tcache;
2038 	arena_t *arena;
2039 
2040 	if (likely(flags == 0)) {
2041 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2042 			*usize = s2u(size);
2043 		return (imalloc(tsd, size));
2044 	}
2045 
2046 	if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
2047 	    &alignment, &zero, &tcache, &arena)))
2048 		return (NULL);
2049 	p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2050 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2051 	return (p);
2052 }
2053 
2054 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2055 void JEMALLOC_NOTHROW *
2056 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2057 je_mallocx(size_t size, int flags)
2058 {
2059 	tsd_t *tsd;
2060 	void *p;
2061 	size_t usize;
2062 
2063 	assert(size != 0);
2064 
2065 	if (unlikely(malloc_init()))
2066 		goto label_oom;
2067 	tsd = tsd_fetch();
2068 
2069 	if (config_prof && opt_prof)
2070 		p = imallocx_prof(tsd, size, flags, &usize);
2071 	else
2072 		p = imallocx_no_prof(tsd, size, flags, &usize);
2073 	if (unlikely(p == NULL))
2074 		goto label_oom;
2075 
2076 	if (config_stats) {
2077 		assert(usize == isalloc(p, config_prof));
2078 		*tsd_thread_allocatedp_get(tsd) += usize;
2079 	}
2080 	UTRACE(0, size, p);
2081 	JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
2082 	return (p);
2083 label_oom:
2084 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2085 		malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2086 		abort();
2087 	}
2088 	UTRACE(0, size, 0);
2089 	return (NULL);
2090 }
2091 
2092 static void *
2093 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2094     size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2095     prof_tctx_t *tctx)
2096 {
2097 	void *p;
2098 
2099 	if (tctx == NULL)
2100 		return (NULL);
2101 	if (usize <= SMALL_MAXCLASS) {
2102 		p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
2103 		    zero, tcache, arena);
2104 		if (p == NULL)
2105 			return (NULL);
2106 		arena_prof_promoted(p, usize);
2107 	} else {
2108 		p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
2109 		    tcache, arena);
2110 	}
2111 
2112 	return (p);
2113 }
2114 
2115 JEMALLOC_ALWAYS_INLINE_C void *
2116 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2117     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2118     arena_t *arena)
2119 {
2120 	void *p;
2121 	bool prof_active;
2122 	prof_tctx_t *old_tctx, *tctx;
2123 
2124 	prof_active = prof_active_get_unlocked();
2125 	old_tctx = prof_tctx_get(old_ptr);
2126 	tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
2127 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2128 		p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2129 		    alignment, zero, tcache, arena, tctx);
2130 	} else {
2131 		p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
2132 		    tcache, arena);
2133 	}
2134 	if (unlikely(p == NULL)) {
2135 		prof_alloc_rollback(tsd, tctx, true);
2136 		return (NULL);
2137 	}
2138 
2139 	if (p == old_ptr && alignment != 0) {
2140 		/*
2141 		 * The allocation did not move, so it is possible that the size
2142 		 * class is smaller than would guarantee the requested
2143 		 * alignment, and that the alignment constraint was
2144 		 * serendipitously satisfied.  Additionally, old_usize may not
2145 		 * be the same as the current usize because of in-place large
2146 		 * reallocation.  Therefore, query the actual value of usize.
2147 		 */
2148 		*usize = isalloc(p, config_prof);
2149 	}
2150 	prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
2151 	    old_usize, old_tctx);
2152 
2153 	return (p);
2154 }
2155 
2156 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2157 void JEMALLOC_NOTHROW *
2158 JEMALLOC_ALLOC_SIZE(2)
2159 je_rallocx(void *ptr, size_t size, int flags)
2160 {
2161 	void *p;
2162 	tsd_t *tsd;
2163 	size_t usize;
2164 	size_t old_usize;
2165 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2166 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2167 	bool zero = flags & MALLOCX_ZERO;
2168 	arena_t *arena;
2169 	tcache_t *tcache;
2170 
2171 	assert(ptr != NULL);
2172 	assert(size != 0);
2173 	assert(malloc_initialized() || IS_INITIALIZER);
2174 	malloc_thread_init();
2175 	tsd = tsd_fetch();
2176 
2177 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2178 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2179 		arena = arena_get(tsd, arena_ind, true, true);
2180 		if (unlikely(arena == NULL))
2181 			goto label_oom;
2182 	} else
2183 		arena = NULL;
2184 
2185 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2186 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2187 			tcache = NULL;
2188 		else
2189 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2190 	} else
2191 		tcache = tcache_get(tsd, true);
2192 
2193 	old_usize = isalloc(ptr, config_prof);
2194 	if (config_valgrind && unlikely(in_valgrind))
2195 		old_rzsize = u2rz(old_usize);
2196 
2197 	if (config_prof && opt_prof) {
2198 		usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2199 		assert(usize != 0);
2200 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2201 		    zero, tcache, arena);
2202 		if (unlikely(p == NULL))
2203 			goto label_oom;
2204 	} else {
2205 		p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2206 		     tcache, arena);
2207 		if (unlikely(p == NULL))
2208 			goto label_oom;
2209 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2210 			usize = isalloc(p, config_prof);
2211 	}
2212 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2213 
2214 	if (config_stats) {
2215 		*tsd_thread_allocatedp_get(tsd) += usize;
2216 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2217 	}
2218 	UTRACE(ptr, size, p);
2219 	JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2220 	    old_rzsize, false, zero);
2221 	return (p);
2222 label_oom:
2223 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2224 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2225 		abort();
2226 	}
2227 	UTRACE(ptr, size, 0);
2228 	return (NULL);
2229 }
2230 
2231 JEMALLOC_ALWAYS_INLINE_C size_t
2232 ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
2233     size_t alignment, bool zero)
2234 {
2235 	size_t usize;
2236 
2237 	if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
2238 		return (old_usize);
2239 	usize = isalloc(ptr, config_prof);
2240 
2241 	return (usize);
2242 }
2243 
2244 static size_t
2245 ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
2246     size_t alignment, bool zero, prof_tctx_t *tctx)
2247 {
2248 	size_t usize;
2249 
2250 	if (tctx == NULL)
2251 		return (old_usize);
2252 	usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero);
2253 
2254 	return (usize);
2255 }
2256 
2257 JEMALLOC_ALWAYS_INLINE_C size_t
2258 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2259     size_t extra, size_t alignment, bool zero)
2260 {
2261 	size_t usize_max, usize;
2262 	bool prof_active;
2263 	prof_tctx_t *old_tctx, *tctx;
2264 
2265 	prof_active = prof_active_get_unlocked();
2266 	old_tctx = prof_tctx_get(ptr);
2267 	/*
2268 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2269 	 * Therefore, compute its maximum possible value and use that in
2270 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2271 	 * prof_realloc() will use the actual usize to decide whether to sample.
2272 	 */
2273 	usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
2274 	    alignment);
2275 	assert(usize_max != 0);
2276 	tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2277 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2278 		usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
2279 		    alignment, zero, tctx);
2280 	} else {
2281 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2282 		    zero);
2283 	}
2284 	if (usize == old_usize) {
2285 		prof_alloc_rollback(tsd, tctx, false);
2286 		return (usize);
2287 	}
2288 	prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2289 	    old_tctx);
2290 
2291 	return (usize);
2292 }
2293 
2294 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2295 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2296 {
2297 	tsd_t *tsd;
2298 	size_t usize, old_usize;
2299 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2300 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2301 	bool zero = flags & MALLOCX_ZERO;
2302 
2303 	assert(ptr != NULL);
2304 	assert(size != 0);
2305 	assert(SIZE_T_MAX - size >= extra);
2306 	assert(malloc_initialized() || IS_INITIALIZER);
2307 	malloc_thread_init();
2308 	tsd = tsd_fetch();
2309 
2310 	old_usize = isalloc(ptr, config_prof);
2311 
2312 	/* Clamp extra if necessary to avoid (size + extra) overflow. */
2313 	if (unlikely(size + extra > HUGE_MAXCLASS)) {
2314 		/* Check for size overflow. */
2315 		if (unlikely(size > HUGE_MAXCLASS)) {
2316 			usize = old_usize;
2317 			goto label_not_resized;
2318 		}
2319 		extra = HUGE_MAXCLASS - size;
2320 	}
2321 
2322 	if (config_valgrind && unlikely(in_valgrind))
2323 		old_rzsize = u2rz(old_usize);
2324 
2325 	if (config_prof && opt_prof) {
2326 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2327 		    alignment, zero);
2328 	} else {
2329 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2330 		    zero);
2331 	}
2332 	if (unlikely(usize == old_usize))
2333 		goto label_not_resized;
2334 
2335 	if (config_stats) {
2336 		*tsd_thread_allocatedp_get(tsd) += usize;
2337 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2338 	}
2339 	JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2340 	    old_rzsize, false, zero);
2341 label_not_resized:
2342 	UTRACE(ptr, size, ptr);
2343 	return (usize);
2344 }
2345 
2346 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2347 JEMALLOC_ATTR(pure)
2348 je_sallocx(const void *ptr, int flags)
2349 {
2350 	size_t usize;
2351 
2352 	assert(malloc_initialized() || IS_INITIALIZER);
2353 	malloc_thread_init();
2354 
2355 	if (config_ivsalloc)
2356 		usize = ivsalloc(ptr, config_prof);
2357 	else
2358 		usize = isalloc(ptr, config_prof);
2359 
2360 	return (usize);
2361 }
2362 
2363 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2364 je_dallocx(void *ptr, int flags)
2365 {
2366 	tsd_t *tsd;
2367 	tcache_t *tcache;
2368 
2369 	assert(ptr != NULL);
2370 	assert(malloc_initialized() || IS_INITIALIZER);
2371 
2372 	tsd = tsd_fetch();
2373 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2374 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2375 			tcache = NULL;
2376 		else
2377 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2378 	} else
2379 		tcache = tcache_get(tsd, false);
2380 
2381 	UTRACE(ptr, 0, 0);
2382 	ifree(tsd_fetch(), ptr, tcache);
2383 }
2384 
2385 JEMALLOC_ALWAYS_INLINE_C size_t
2386 inallocx(size_t size, int flags)
2387 {
2388 	size_t usize;
2389 
2390 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2391 		usize = s2u(size);
2392 	else
2393 		usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2394 	assert(usize != 0);
2395 	return (usize);
2396 }
2397 
2398 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2399 je_sdallocx(void *ptr, size_t size, int flags)
2400 {
2401 	tsd_t *tsd;
2402 	tcache_t *tcache;
2403 	size_t usize;
2404 
2405 	assert(ptr != NULL);
2406 	assert(malloc_initialized() || IS_INITIALIZER);
2407 	usize = inallocx(size, flags);
2408 	assert(usize == isalloc(ptr, config_prof));
2409 
2410 	tsd = tsd_fetch();
2411 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2412 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2413 			tcache = NULL;
2414 		else
2415 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2416 	} else
2417 		tcache = tcache_get(tsd, false);
2418 
2419 	UTRACE(ptr, 0, 0);
2420 	isfree(tsd, ptr, usize, tcache);
2421 }
2422 
2423 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2424 JEMALLOC_ATTR(pure)
2425 je_nallocx(size_t size, int flags)
2426 {
2427 
2428 	assert(size != 0);
2429 
2430 	if (unlikely(malloc_init()))
2431 		return (0);
2432 
2433 	return (inallocx(size, flags));
2434 }
2435 
2436 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2437 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2438     size_t newlen)
2439 {
2440 
2441 	if (unlikely(malloc_init()))
2442 		return (EAGAIN);
2443 
2444 	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2445 }
2446 
2447 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2448 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2449 {
2450 
2451 	if (unlikely(malloc_init()))
2452 		return (EAGAIN);
2453 
2454 	return (ctl_nametomib(name, mibp, miblenp));
2455 }
2456 
2457 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2458 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2459   void *newp, size_t newlen)
2460 {
2461 
2462 	if (unlikely(malloc_init()))
2463 		return (EAGAIN);
2464 
2465 	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2466 }
2467 
2468 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2469 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2470     const char *opts)
2471 {
2472 
2473 	stats_print(write_cb, cbopaque, opts);
2474 }
2475 
2476 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2477 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2478 {
2479 	size_t ret;
2480 
2481 	assert(malloc_initialized() || IS_INITIALIZER);
2482 	malloc_thread_init();
2483 
2484 	if (config_ivsalloc)
2485 		ret = ivsalloc(ptr, config_prof);
2486 	else
2487 		ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
2488 
2489 	return (ret);
2490 }
2491 
2492 /*
2493  * End non-standard functions.
2494  */
2495 /******************************************************************************/
2496 /*
2497  * Begin compatibility functions.
2498  */
2499 
2500 #define	ALLOCM_LG_ALIGN(la)	(la)
2501 #define	ALLOCM_ALIGN(a)		(ffsl(a)-1)
2502 #define	ALLOCM_ZERO		((int)0x40)
2503 #define	ALLOCM_NO_MOVE		((int)0x80)
2504 
2505 #define	ALLOCM_SUCCESS		0
2506 #define	ALLOCM_ERR_OOM		1
2507 #define	ALLOCM_ERR_NOT_MOVED	2
2508 
2509 int
2510 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
2511 {
2512 	void *p;
2513 
2514 	assert(ptr != NULL);
2515 
2516 	p = je_mallocx(size, flags);
2517 	if (p == NULL)
2518 		return (ALLOCM_ERR_OOM);
2519 	if (rsize != NULL)
2520 		*rsize = isalloc(p, config_prof);
2521 	*ptr = p;
2522 	return (ALLOCM_SUCCESS);
2523 }
2524 
2525 int
2526 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
2527 {
2528 	int ret;
2529 	bool no_move = flags & ALLOCM_NO_MOVE;
2530 
2531 	assert(ptr != NULL);
2532 	assert(*ptr != NULL);
2533 	assert(size != 0);
2534 	assert(SIZE_T_MAX - size >= extra);
2535 
2536 	if (no_move) {
2537 		size_t usize = je_xallocx(*ptr, size, extra, flags);
2538 		ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
2539 		if (rsize != NULL)
2540 			*rsize = usize;
2541 	} else {
2542 		void *p = je_rallocx(*ptr, size+extra, flags);
2543 		if (p != NULL) {
2544 			*ptr = p;
2545 			ret = ALLOCM_SUCCESS;
2546 		} else
2547 			ret = ALLOCM_ERR_OOM;
2548 		if (rsize != NULL)
2549 			*rsize = isalloc(*ptr, config_prof);
2550 	}
2551 	return (ret);
2552 }
2553 
2554 int
2555 je_sallocm(const void *ptr, size_t *rsize, int flags)
2556 {
2557 
2558 	assert(rsize != NULL);
2559 	*rsize = je_sallocx(ptr, flags);
2560 	return (ALLOCM_SUCCESS);
2561 }
2562 
2563 int
2564 je_dallocm(void *ptr, int flags)
2565 {
2566 
2567 	je_dallocx(ptr, flags);
2568 	return (ALLOCM_SUCCESS);
2569 }
2570 
2571 int
2572 je_nallocm(size_t *rsize, size_t size, int flags)
2573 {
2574 	size_t usize;
2575 
2576 	usize = je_nallocx(size, flags);
2577 	if (usize == 0)
2578 		return (ALLOCM_ERR_OOM);
2579 	if (rsize != NULL)
2580 		*rsize = usize;
2581 	return (ALLOCM_SUCCESS);
2582 }
2583 
2584 #undef ALLOCM_LG_ALIGN
2585 #undef ALLOCM_ALIGN
2586 #undef ALLOCM_ZERO
2587 #undef ALLOCM_NO_MOVE
2588 
2589 #undef ALLOCM_SUCCESS
2590 #undef ALLOCM_ERR_OOM
2591 #undef ALLOCM_ERR_NOT_MOVED
2592 
2593 /*
2594  * End compatibility functions.
2595  */
2596 /******************************************************************************/
2597 /*
2598  * The following functions are used by threading libraries for protection of
2599  * malloc during fork().
2600  */
2601 
2602 /*
2603  * If an application creates a thread before doing any allocation in the main
2604  * thread, then calls fork(2) in the main thread followed by memory allocation
2605  * in the child process, a race can occur that results in deadlock within the
2606  * child: the main thread may have forked while the created thread had
2607  * partially initialized the allocator.  Ordinarily jemalloc prevents
2608  * fork/malloc races via the following functions it registers during
2609  * initialization using pthread_atfork(), but of course that does no good if
2610  * the allocator isn't fully initialized at fork time.  The following library
2611  * constructor is a partial solution to this problem.  It may still be possible
2612  * to trigger the deadlock described above, but doing so would involve forking
2613  * via a library constructor that runs before jemalloc's runs.
2614  */
2615 JEMALLOC_ATTR(constructor)
2616 static void
2617 jemalloc_constructor(void)
2618 {
2619 
2620 	malloc_init();
2621 }
2622 
2623 #ifndef JEMALLOC_MUTEX_INIT_CB
2624 void
2625 jemalloc_prefork(void)
2626 #else
2627 JEMALLOC_EXPORT void
2628 _malloc_prefork(void)
2629 #endif
2630 {
2631 	unsigned i;
2632 
2633 #ifdef JEMALLOC_MUTEX_INIT_CB
2634 	if (!malloc_initialized())
2635 		return;
2636 #endif
2637 	assert(malloc_initialized());
2638 
2639 	/* Acquire all mutexes in a safe order. */
2640 	ctl_prefork();
2641 	prof_prefork();
2642 	malloc_mutex_prefork(&arenas_lock);
2643 	for (i = 0; i < narenas_total; i++) {
2644 		if (arenas[i] != NULL)
2645 			arena_prefork(arenas[i]);
2646 	}
2647 	chunk_prefork();
2648 	base_prefork();
2649 }
2650 
2651 #ifndef JEMALLOC_MUTEX_INIT_CB
2652 void
2653 jemalloc_postfork_parent(void)
2654 #else
2655 JEMALLOC_EXPORT void
2656 _malloc_postfork(void)
2657 #endif
2658 {
2659 	unsigned i;
2660 
2661 #ifdef JEMALLOC_MUTEX_INIT_CB
2662 	if (!malloc_initialized())
2663 		return;
2664 #endif
2665 	assert(malloc_initialized());
2666 
2667 	/* Release all mutexes, now that fork() has completed. */
2668 	base_postfork_parent();
2669 	chunk_postfork_parent();
2670 	for (i = 0; i < narenas_total; i++) {
2671 		if (arenas[i] != NULL)
2672 			arena_postfork_parent(arenas[i]);
2673 	}
2674 	malloc_mutex_postfork_parent(&arenas_lock);
2675 	prof_postfork_parent();
2676 	ctl_postfork_parent();
2677 }
2678 
2679 void
2680 jemalloc_postfork_child(void)
2681 {
2682 	unsigned i;
2683 
2684 	assert(malloc_initialized());
2685 
2686 	/* Release all mutexes, now that fork() has completed. */
2687 	base_postfork_child();
2688 	chunk_postfork_child();
2689 	for (i = 0; i < narenas_total; i++) {
2690 		if (arenas[i] != NULL)
2691 			arena_postfork_child(arenas[i]);
2692 	}
2693 	malloc_mutex_postfork_child(&arenas_lock);
2694 	prof_postfork_child();
2695 	ctl_postfork_child();
2696 }
2697 
2698 void
2699 _malloc_first_thread(void)
2700 {
2701 
2702 	(void)malloc_mutex_first_thread();
2703 }
2704 
2705 /******************************************************************************/
2706