1*d0774691Schristos #include "jemalloc/internal/jemalloc_preamble.h"
2*d0774691Schristos #include "jemalloc/internal/jemalloc_internal_includes.h"
3*d0774691Schristos
4*d0774691Schristos #include "jemalloc/internal/assert.h"
5*d0774691Schristos
6*d0774691Schristos #ifndef JEMALLOC_ZONE
7*d0774691Schristos # error "This source file is for zones on Darwin (OS X)."
8*d0774691Schristos #endif
9*d0774691Schristos
10*d0774691Schristos /* Definitions of the following structs in malloc/malloc.h might be too old
11*d0774691Schristos * for the built binary to run on newer versions of OSX. So use the newest
12*d0774691Schristos * possible version of those structs.
13*d0774691Schristos */
14*d0774691Schristos typedef struct _malloc_zone_t {
15*d0774691Schristos void *reserved1;
16*d0774691Schristos void *reserved2;
17*d0774691Schristos size_t (*size)(struct _malloc_zone_t *, const void *);
18*d0774691Schristos void *(*malloc)(struct _malloc_zone_t *, size_t);
19*d0774691Schristos void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
20*d0774691Schristos void *(*valloc)(struct _malloc_zone_t *, size_t);
21*d0774691Schristos void (*free)(struct _malloc_zone_t *, void *);
22*d0774691Schristos void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
23*d0774691Schristos void (*destroy)(struct _malloc_zone_t *);
24*d0774691Schristos const char *zone_name;
25*d0774691Schristos unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
26*d0774691Schristos void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
27*d0774691Schristos struct malloc_introspection_t *introspect;
28*d0774691Schristos unsigned version;
29*d0774691Schristos void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
30*d0774691Schristos void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
31*d0774691Schristos size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
32*d0774691Schristos } malloc_zone_t;
33*d0774691Schristos
34*d0774691Schristos typedef struct {
35*d0774691Schristos vm_address_t address;
36*d0774691Schristos vm_size_t size;
37*d0774691Schristos } vm_range_t;
38*d0774691Schristos
39*d0774691Schristos typedef struct malloc_statistics_t {
40*d0774691Schristos unsigned blocks_in_use;
41*d0774691Schristos size_t size_in_use;
42*d0774691Schristos size_t max_size_in_use;
43*d0774691Schristos size_t size_allocated;
44*d0774691Schristos } malloc_statistics_t;
45*d0774691Schristos
46*d0774691Schristos typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
47*d0774691Schristos
48*d0774691Schristos typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
49*d0774691Schristos
50*d0774691Schristos typedef struct malloc_introspection_t {
51*d0774691Schristos kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
52*d0774691Schristos size_t (*good_size)(malloc_zone_t *, size_t);
53*d0774691Schristos boolean_t (*check)(malloc_zone_t *);
54*d0774691Schristos void (*print)(malloc_zone_t *, boolean_t);
55*d0774691Schristos void (*log)(malloc_zone_t *, void *);
56*d0774691Schristos void (*force_lock)(malloc_zone_t *);
57*d0774691Schristos void (*force_unlock)(malloc_zone_t *);
58*d0774691Schristos void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
59*d0774691Schristos boolean_t (*zone_locked)(malloc_zone_t *);
60*d0774691Schristos boolean_t (*enable_discharge_checking)(malloc_zone_t *);
61*d0774691Schristos boolean_t (*disable_discharge_checking)(malloc_zone_t *);
62*d0774691Schristos void (*discharge)(malloc_zone_t *, void *);
63*d0774691Schristos #ifdef __BLOCKS__
64*d0774691Schristos void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
65*d0774691Schristos #else
66*d0774691Schristos void *enumerate_unavailable_without_blocks;
67*d0774691Schristos #endif
68*d0774691Schristos void (*reinit_lock)(malloc_zone_t *);
69*d0774691Schristos } malloc_introspection_t;
70*d0774691Schristos
71*d0774691Schristos extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
72*d0774691Schristos
73*d0774691Schristos extern malloc_zone_t *malloc_default_zone(void);
74*d0774691Schristos
75*d0774691Schristos extern void malloc_zone_register(malloc_zone_t *zone);
76*d0774691Schristos
77*d0774691Schristos extern void malloc_zone_unregister(malloc_zone_t *zone);
78*d0774691Schristos
79*d0774691Schristos /*
80*d0774691Schristos * The malloc_default_purgeable_zone() function is only available on >= 10.6.
81*d0774691Schristos * We need to check whether it is present at runtime, thus the weak_import.
82*d0774691Schristos */
83*d0774691Schristos extern malloc_zone_t *malloc_default_purgeable_zone(void)
84*d0774691Schristos JEMALLOC_ATTR(weak_import);
85*d0774691Schristos
86*d0774691Schristos /******************************************************************************/
87*d0774691Schristos /* Data. */
88*d0774691Schristos
89*d0774691Schristos static malloc_zone_t *default_zone, *purgeable_zone;
90*d0774691Schristos static malloc_zone_t jemalloc_zone;
91*d0774691Schristos static struct malloc_introspection_t jemalloc_zone_introspect;
92*d0774691Schristos static pid_t zone_force_lock_pid = -1;
93*d0774691Schristos
94*d0774691Schristos /******************************************************************************/
95*d0774691Schristos /* Function prototypes for non-inline static functions. */
96*d0774691Schristos
97*d0774691Schristos static size_t zone_size(malloc_zone_t *zone, const void *ptr);
98*d0774691Schristos static void *zone_malloc(malloc_zone_t *zone, size_t size);
99*d0774691Schristos static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
100*d0774691Schristos static void *zone_valloc(malloc_zone_t *zone, size_t size);
101*d0774691Schristos static void zone_free(malloc_zone_t *zone, void *ptr);
102*d0774691Schristos static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
103*d0774691Schristos static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
104*d0774691Schristos size_t size);
105*d0774691Schristos static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
106*d0774691Schristos size_t size);
107*d0774691Schristos static void zone_destroy(malloc_zone_t *zone);
108*d0774691Schristos static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
109*d0774691Schristos void **results, unsigned num_requested);
110*d0774691Schristos static void zone_batch_free(struct _malloc_zone_t *zone,
111*d0774691Schristos void **to_be_freed, unsigned num_to_be_freed);
112*d0774691Schristos static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
113*d0774691Schristos static size_t zone_good_size(malloc_zone_t *zone, size_t size);
114*d0774691Schristos static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
115*d0774691Schristos vm_address_t zone_address, memory_reader_t reader,
116*d0774691Schristos vm_range_recorder_t recorder);
117*d0774691Schristos static boolean_t zone_check(malloc_zone_t *zone);
118*d0774691Schristos static void zone_print(malloc_zone_t *zone, boolean_t verbose);
119*d0774691Schristos static void zone_log(malloc_zone_t *zone, void *address);
120*d0774691Schristos static void zone_force_lock(malloc_zone_t *zone);
121*d0774691Schristos static void zone_force_unlock(malloc_zone_t *zone);
122*d0774691Schristos static void zone_statistics(malloc_zone_t *zone,
123*d0774691Schristos malloc_statistics_t *stats);
124*d0774691Schristos static boolean_t zone_locked(malloc_zone_t *zone);
125*d0774691Schristos static void zone_reinit_lock(malloc_zone_t *zone);
126*d0774691Schristos
127*d0774691Schristos /******************************************************************************/
128*d0774691Schristos /*
129*d0774691Schristos * Functions.
130*d0774691Schristos */
131*d0774691Schristos
132*d0774691Schristos static size_t
zone_size(malloc_zone_t * zone,const void * ptr)133*d0774691Schristos zone_size(malloc_zone_t *zone, const void *ptr) {
134*d0774691Schristos /*
135*d0774691Schristos * There appear to be places within Darwin (such as setenv(3)) that
136*d0774691Schristos * cause calls to this function with pointers that *no* zone owns. If
137*d0774691Schristos * we knew that all pointers were owned by *some* zone, we could split
138*d0774691Schristos * our zone into two parts, and use one as the default allocator and
139*d0774691Schristos * the other as the default deallocator/reallocator. Since that will
140*d0774691Schristos * not work in practice, we must check all pointers to assure that they
141*d0774691Schristos * reside within a mapped extent before determining size.
142*d0774691Schristos */
143*d0774691Schristos return ivsalloc(tsdn_fetch(), ptr);
144*d0774691Schristos }
145*d0774691Schristos
146*d0774691Schristos static void *
zone_malloc(malloc_zone_t * zone,size_t size)147*d0774691Schristos zone_malloc(malloc_zone_t *zone, size_t size) {
148*d0774691Schristos return je_malloc(size);
149*d0774691Schristos }
150*d0774691Schristos
151*d0774691Schristos static void *
zone_calloc(malloc_zone_t * zone,size_t num,size_t size)152*d0774691Schristos zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
153*d0774691Schristos return je_calloc(num, size);
154*d0774691Schristos }
155*d0774691Schristos
156*d0774691Schristos static void *
zone_valloc(malloc_zone_t * zone,size_t size)157*d0774691Schristos zone_valloc(malloc_zone_t *zone, size_t size) {
158*d0774691Schristos void *ret = NULL; /* Assignment avoids useless compiler warning. */
159*d0774691Schristos
160*d0774691Schristos je_posix_memalign(&ret, PAGE, size);
161*d0774691Schristos
162*d0774691Schristos return ret;
163*d0774691Schristos }
164*d0774691Schristos
165*d0774691Schristos static void
zone_free(malloc_zone_t * zone,void * ptr)166*d0774691Schristos zone_free(malloc_zone_t *zone, void *ptr) {
167*d0774691Schristos if (ivsalloc(tsdn_fetch(), ptr) != 0) {
168*d0774691Schristos je_free(ptr);
169*d0774691Schristos return;
170*d0774691Schristos }
171*d0774691Schristos
172*d0774691Schristos free(ptr);
173*d0774691Schristos }
174*d0774691Schristos
175*d0774691Schristos static void *
zone_realloc(malloc_zone_t * zone,void * ptr,size_t size)176*d0774691Schristos zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
177*d0774691Schristos if (ivsalloc(tsdn_fetch(), ptr) != 0) {
178*d0774691Schristos return je_realloc(ptr, size);
179*d0774691Schristos }
180*d0774691Schristos
181*d0774691Schristos return realloc(ptr, size);
182*d0774691Schristos }
183*d0774691Schristos
184*d0774691Schristos static void *
zone_memalign(malloc_zone_t * zone,size_t alignment,size_t size)185*d0774691Schristos zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
186*d0774691Schristos void *ret = NULL; /* Assignment avoids useless compiler warning. */
187*d0774691Schristos
188*d0774691Schristos je_posix_memalign(&ret, alignment, size);
189*d0774691Schristos
190*d0774691Schristos return ret;
191*d0774691Schristos }
192*d0774691Schristos
193*d0774691Schristos static void
zone_free_definite_size(malloc_zone_t * zone,void * ptr,size_t size)194*d0774691Schristos zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
195*d0774691Schristos size_t alloc_size;
196*d0774691Schristos
197*d0774691Schristos alloc_size = ivsalloc(tsdn_fetch(), ptr);
198*d0774691Schristos if (alloc_size != 0) {
199*d0774691Schristos assert(alloc_size == size);
200*d0774691Schristos je_free(ptr);
201*d0774691Schristos return;
202*d0774691Schristos }
203*d0774691Schristos
204*d0774691Schristos free(ptr);
205*d0774691Schristos }
206*d0774691Schristos
207*d0774691Schristos static void
zone_destroy(malloc_zone_t * zone)208*d0774691Schristos zone_destroy(malloc_zone_t *zone) {
209*d0774691Schristos /* This function should never be called. */
210*d0774691Schristos not_reached();
211*d0774691Schristos }
212*d0774691Schristos
213*d0774691Schristos static unsigned
zone_batch_malloc(struct _malloc_zone_t * zone,size_t size,void ** results,unsigned num_requested)214*d0774691Schristos zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
215*d0774691Schristos unsigned num_requested) {
216*d0774691Schristos unsigned i;
217*d0774691Schristos
218*d0774691Schristos for (i = 0; i < num_requested; i++) {
219*d0774691Schristos results[i] = je_malloc(size);
220*d0774691Schristos if (!results[i])
221*d0774691Schristos break;
222*d0774691Schristos }
223*d0774691Schristos
224*d0774691Schristos return i;
225*d0774691Schristos }
226*d0774691Schristos
227*d0774691Schristos static void
zone_batch_free(struct _malloc_zone_t * zone,void ** to_be_freed,unsigned num_to_be_freed)228*d0774691Schristos zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
229*d0774691Schristos unsigned num_to_be_freed) {
230*d0774691Schristos unsigned i;
231*d0774691Schristos
232*d0774691Schristos for (i = 0; i < num_to_be_freed; i++) {
233*d0774691Schristos zone_free(zone, to_be_freed[i]);
234*d0774691Schristos to_be_freed[i] = NULL;
235*d0774691Schristos }
236*d0774691Schristos }
237*d0774691Schristos
238*d0774691Schristos static size_t
zone_pressure_relief(struct _malloc_zone_t * zone,size_t goal)239*d0774691Schristos zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
240*d0774691Schristos return 0;
241*d0774691Schristos }
242*d0774691Schristos
243*d0774691Schristos static size_t
zone_good_size(malloc_zone_t * zone,size_t size)244*d0774691Schristos zone_good_size(malloc_zone_t *zone, size_t size) {
245*d0774691Schristos if (size == 0) {
246*d0774691Schristos size = 1;
247*d0774691Schristos }
248*d0774691Schristos return sz_s2u(size);
249*d0774691Schristos }
250*d0774691Schristos
251*d0774691Schristos static kern_return_t
zone_enumerator(task_t task,void * data,unsigned type_mask,vm_address_t zone_address,memory_reader_t reader,vm_range_recorder_t recorder)252*d0774691Schristos zone_enumerator(task_t task, void *data, unsigned type_mask,
253*d0774691Schristos vm_address_t zone_address, memory_reader_t reader,
254*d0774691Schristos vm_range_recorder_t recorder) {
255*d0774691Schristos return KERN_SUCCESS;
256*d0774691Schristos }
257*d0774691Schristos
258*d0774691Schristos static boolean_t
zone_check(malloc_zone_t * zone)259*d0774691Schristos zone_check(malloc_zone_t *zone) {
260*d0774691Schristos return true;
261*d0774691Schristos }
262*d0774691Schristos
263*d0774691Schristos static void
zone_print(malloc_zone_t * zone,boolean_t verbose)264*d0774691Schristos zone_print(malloc_zone_t *zone, boolean_t verbose) {
265*d0774691Schristos }
266*d0774691Schristos
267*d0774691Schristos static void
zone_log(malloc_zone_t * zone,void * address)268*d0774691Schristos zone_log(malloc_zone_t *zone, void *address) {
269*d0774691Schristos }
270*d0774691Schristos
271*d0774691Schristos static void
zone_force_lock(malloc_zone_t * zone)272*d0774691Schristos zone_force_lock(malloc_zone_t *zone) {
273*d0774691Schristos if (isthreaded) {
274*d0774691Schristos /*
275*d0774691Schristos * See the note in zone_force_unlock, below, to see why we need
276*d0774691Schristos * this.
277*d0774691Schristos */
278*d0774691Schristos assert(zone_force_lock_pid == -1);
279*d0774691Schristos zone_force_lock_pid = getpid();
280*d0774691Schristos jemalloc_prefork();
281*d0774691Schristos }
282*d0774691Schristos }
283*d0774691Schristos
284*d0774691Schristos static void
zone_force_unlock(malloc_zone_t * zone)285*d0774691Schristos zone_force_unlock(malloc_zone_t *zone) {
286*d0774691Schristos /*
287*d0774691Schristos * zone_force_lock and zone_force_unlock are the entry points to the
288*d0774691Schristos * forking machinery on OS X. The tricky thing is, the child is not
289*d0774691Schristos * allowed to unlock mutexes locked in the parent, even if owned by the
290*d0774691Schristos * forking thread (and the mutex type we use in OS X will fail an assert
291*d0774691Schristos * if we try). In the child, we can get away with reinitializing all
292*d0774691Schristos * the mutexes, which has the effect of unlocking them. In the parent,
293*d0774691Schristos * doing this would mean we wouldn't wake any waiters blocked on the
294*d0774691Schristos * mutexes we unlock. So, we record the pid of the current thread in
295*d0774691Schristos * zone_force_lock, and use that to detect if we're in the parent or
296*d0774691Schristos * child here, to decide which unlock logic we need.
297*d0774691Schristos */
298*d0774691Schristos if (isthreaded) {
299*d0774691Schristos assert(zone_force_lock_pid != -1);
300*d0774691Schristos if (getpid() == zone_force_lock_pid) {
301*d0774691Schristos jemalloc_postfork_parent();
302*d0774691Schristos } else {
303*d0774691Schristos jemalloc_postfork_child();
304*d0774691Schristos }
305*d0774691Schristos zone_force_lock_pid = -1;
306*d0774691Schristos }
307*d0774691Schristos }
308*d0774691Schristos
309*d0774691Schristos static void
zone_statistics(malloc_zone_t * zone,malloc_statistics_t * stats)310*d0774691Schristos zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
311*d0774691Schristos /* We make no effort to actually fill the values */
312*d0774691Schristos stats->blocks_in_use = 0;
313*d0774691Schristos stats->size_in_use = 0;
314*d0774691Schristos stats->max_size_in_use = 0;
315*d0774691Schristos stats->size_allocated = 0;
316*d0774691Schristos }
317*d0774691Schristos
318*d0774691Schristos static boolean_t
zone_locked(malloc_zone_t * zone)319*d0774691Schristos zone_locked(malloc_zone_t *zone) {
320*d0774691Schristos /* Pretend no lock is being held */
321*d0774691Schristos return false;
322*d0774691Schristos }
323*d0774691Schristos
324*d0774691Schristos static void
zone_reinit_lock(malloc_zone_t * zone)325*d0774691Schristos zone_reinit_lock(malloc_zone_t *zone) {
326*d0774691Schristos /* As of OSX 10.12, this function is only used when force_unlock would
327*d0774691Schristos * be used if the zone version were < 9. So just use force_unlock. */
328*d0774691Schristos zone_force_unlock(zone);
329*d0774691Schristos }
330*d0774691Schristos
331*d0774691Schristos static void
zone_init(void)332*d0774691Schristos zone_init(void) {
333*d0774691Schristos jemalloc_zone.size = zone_size;
334*d0774691Schristos jemalloc_zone.malloc = zone_malloc;
335*d0774691Schristos jemalloc_zone.calloc = zone_calloc;
336*d0774691Schristos jemalloc_zone.valloc = zone_valloc;
337*d0774691Schristos jemalloc_zone.free = zone_free;
338*d0774691Schristos jemalloc_zone.realloc = zone_realloc;
339*d0774691Schristos jemalloc_zone.destroy = zone_destroy;
340*d0774691Schristos jemalloc_zone.zone_name = "jemalloc_zone";
341*d0774691Schristos jemalloc_zone.batch_malloc = zone_batch_malloc;
342*d0774691Schristos jemalloc_zone.batch_free = zone_batch_free;
343*d0774691Schristos jemalloc_zone.introspect = &jemalloc_zone_introspect;
344*d0774691Schristos jemalloc_zone.version = 9;
345*d0774691Schristos jemalloc_zone.memalign = zone_memalign;
346*d0774691Schristos jemalloc_zone.free_definite_size = zone_free_definite_size;
347*d0774691Schristos jemalloc_zone.pressure_relief = zone_pressure_relief;
348*d0774691Schristos
349*d0774691Schristos jemalloc_zone_introspect.enumerator = zone_enumerator;
350*d0774691Schristos jemalloc_zone_introspect.good_size = zone_good_size;
351*d0774691Schristos jemalloc_zone_introspect.check = zone_check;
352*d0774691Schristos jemalloc_zone_introspect.print = zone_print;
353*d0774691Schristos jemalloc_zone_introspect.log = zone_log;
354*d0774691Schristos jemalloc_zone_introspect.force_lock = zone_force_lock;
355*d0774691Schristos jemalloc_zone_introspect.force_unlock = zone_force_unlock;
356*d0774691Schristos jemalloc_zone_introspect.statistics = zone_statistics;
357*d0774691Schristos jemalloc_zone_introspect.zone_locked = zone_locked;
358*d0774691Schristos jemalloc_zone_introspect.enable_discharge_checking = NULL;
359*d0774691Schristos jemalloc_zone_introspect.disable_discharge_checking = NULL;
360*d0774691Schristos jemalloc_zone_introspect.discharge = NULL;
361*d0774691Schristos #ifdef __BLOCKS__
362*d0774691Schristos jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
363*d0774691Schristos #else
364*d0774691Schristos jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
365*d0774691Schristos #endif
366*d0774691Schristos jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
367*d0774691Schristos }
368*d0774691Schristos
369*d0774691Schristos static malloc_zone_t *
zone_default_get(void)370*d0774691Schristos zone_default_get(void) {
371*d0774691Schristos malloc_zone_t **zones = NULL;
372*d0774691Schristos unsigned int num_zones = 0;
373*d0774691Schristos
374*d0774691Schristos /*
375*d0774691Schristos * On OSX 10.12, malloc_default_zone returns a special zone that is not
376*d0774691Schristos * present in the list of registered zones. That zone uses a "lite zone"
377*d0774691Schristos * if one is present (apparently enabled when malloc stack logging is
378*d0774691Schristos * enabled), or the first registered zone otherwise. In practice this
379*d0774691Schristos * means unless malloc stack logging is enabled, the first registered
380*d0774691Schristos * zone is the default. So get the list of zones to get the first one,
381*d0774691Schristos * instead of relying on malloc_default_zone.
382*d0774691Schristos */
383*d0774691Schristos if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
384*d0774691Schristos (vm_address_t**)&zones, &num_zones)) {
385*d0774691Schristos /*
386*d0774691Schristos * Reset the value in case the failure happened after it was
387*d0774691Schristos * set.
388*d0774691Schristos */
389*d0774691Schristos num_zones = 0;
390*d0774691Schristos }
391*d0774691Schristos
392*d0774691Schristos if (num_zones) {
393*d0774691Schristos return zones[0];
394*d0774691Schristos }
395*d0774691Schristos
396*d0774691Schristos return malloc_default_zone();
397*d0774691Schristos }
398*d0774691Schristos
399*d0774691Schristos /* As written, this function can only promote jemalloc_zone. */
400*d0774691Schristos static void
zone_promote(void)401*d0774691Schristos zone_promote(void) {
402*d0774691Schristos malloc_zone_t *zone;
403*d0774691Schristos
404*d0774691Schristos do {
405*d0774691Schristos /*
406*d0774691Schristos * Unregister and reregister the default zone. On OSX >= 10.6,
407*d0774691Schristos * unregistering takes the last registered zone and places it
408*d0774691Schristos * at the location of the specified zone. Unregistering the
409*d0774691Schristos * default zone thus makes the last registered one the default.
410*d0774691Schristos * On OSX < 10.6, unregistering shifts all registered zones.
411*d0774691Schristos * The first registered zone then becomes the default.
412*d0774691Schristos */
413*d0774691Schristos malloc_zone_unregister(default_zone);
414*d0774691Schristos malloc_zone_register(default_zone);
415*d0774691Schristos
416*d0774691Schristos /*
417*d0774691Schristos * On OSX 10.6, having the default purgeable zone appear before
418*d0774691Schristos * the default zone makes some things crash because it thinks it
419*d0774691Schristos * owns the default zone allocated pointers. We thus
420*d0774691Schristos * unregister/re-register it in order to ensure it's always
421*d0774691Schristos * after the default zone. On OSX < 10.6, there is no purgeable
422*d0774691Schristos * zone, so this does nothing. On OSX >= 10.6, unregistering
423*d0774691Schristos * replaces the purgeable zone with the last registered zone
424*d0774691Schristos * above, i.e. the default zone. Registering it again then puts
425*d0774691Schristos * it at the end, obviously after the default zone.
426*d0774691Schristos */
427*d0774691Schristos if (purgeable_zone != NULL) {
428*d0774691Schristos malloc_zone_unregister(purgeable_zone);
429*d0774691Schristos malloc_zone_register(purgeable_zone);
430*d0774691Schristos }
431*d0774691Schristos
432*d0774691Schristos zone = zone_default_get();
433*d0774691Schristos } while (zone != &jemalloc_zone);
434*d0774691Schristos }
435*d0774691Schristos
JEMALLOC_ATTR(constructor)436*d0774691Schristos JEMALLOC_ATTR(constructor)
437*d0774691Schristos void
438*d0774691Schristos zone_register(void) {
439*d0774691Schristos /*
440*d0774691Schristos * If something else replaced the system default zone allocator, don't
441*d0774691Schristos * register jemalloc's.
442*d0774691Schristos */
443*d0774691Schristos default_zone = zone_default_get();
444*d0774691Schristos if (!default_zone->zone_name || strcmp(default_zone->zone_name,
445*d0774691Schristos "DefaultMallocZone") != 0) {
446*d0774691Schristos return;
447*d0774691Schristos }
448*d0774691Schristos
449*d0774691Schristos /*
450*d0774691Schristos * The default purgeable zone is created lazily by OSX's libc. It uses
451*d0774691Schristos * the default zone when it is created for "small" allocations
452*d0774691Schristos * (< 15 KiB), but assumes the default zone is a scalable_zone. This
453*d0774691Schristos * obviously fails when the default zone is the jemalloc zone, so
454*d0774691Schristos * malloc_default_purgeable_zone() is called beforehand so that the
455*d0774691Schristos * default purgeable zone is created when the default zone is still
456*d0774691Schristos * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
457*d0774691Schristos * to check for the existence of malloc_default_purgeable_zone() at
458*d0774691Schristos * run time.
459*d0774691Schristos */
460*d0774691Schristos purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
461*d0774691Schristos malloc_default_purgeable_zone();
462*d0774691Schristos
463*d0774691Schristos /* Register the custom zone. At this point it won't be the default. */
464*d0774691Schristos zone_init();
465*d0774691Schristos malloc_zone_register(&jemalloc_zone);
466*d0774691Schristos
467*d0774691Schristos /* Promote the custom zone to be default. */
468*d0774691Schristos zone_promote();
469*d0774691Schristos }
470