1 #include "jemalloc/internal/jemalloc_preamble.h"
2 #include "jemalloc/internal/jemalloc_internal_includes.h"
3
4 #include "jemalloc/internal/assert.h"
5
6 #ifndef JEMALLOC_ZONE
7 # error "This source file is for zones on Darwin (OS X)."
8 #endif
9
10 /* Definitions of the following structs in malloc/malloc.h might be too old
11 * for the built binary to run on newer versions of OSX. So use the newest
12 * possible version of those structs.
13 */
14 typedef struct _malloc_zone_t {
15 void *reserved1;
16 void *reserved2;
17 size_t (*size)(struct _malloc_zone_t *, const void *);
18 void *(*malloc)(struct _malloc_zone_t *, size_t);
19 void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
20 void *(*valloc)(struct _malloc_zone_t *, size_t);
21 void (*free)(struct _malloc_zone_t *, void *);
22 void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
23 void (*destroy)(struct _malloc_zone_t *);
24 const char *zone_name;
25 unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
26 void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
27 struct malloc_introspection_t *introspect;
28 unsigned version;
29 void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
30 void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
31 size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
32 } malloc_zone_t;
33
34 typedef struct {
35 vm_address_t address;
36 vm_size_t size;
37 } vm_range_t;
38
39 typedef struct malloc_statistics_t {
40 unsigned blocks_in_use;
41 size_t size_in_use;
42 size_t max_size_in_use;
43 size_t size_allocated;
44 } malloc_statistics_t;
45
46 typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
47
48 typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
49
50 typedef struct malloc_introspection_t {
51 kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
52 size_t (*good_size)(malloc_zone_t *, size_t);
53 boolean_t (*check)(malloc_zone_t *);
54 void (*print)(malloc_zone_t *, boolean_t);
55 void (*log)(malloc_zone_t *, void *);
56 void (*force_lock)(malloc_zone_t *);
57 void (*force_unlock)(malloc_zone_t *);
58 void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
59 boolean_t (*zone_locked)(malloc_zone_t *);
60 boolean_t (*enable_discharge_checking)(malloc_zone_t *);
61 boolean_t (*disable_discharge_checking)(malloc_zone_t *);
62 void (*discharge)(malloc_zone_t *, void *);
63 #ifdef __BLOCKS__
64 void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
65 #else
66 void *enumerate_unavailable_without_blocks;
67 #endif
68 void (*reinit_lock)(malloc_zone_t *);
69 } malloc_introspection_t;
70
71 extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
72
73 extern malloc_zone_t *malloc_default_zone(void);
74
75 extern void malloc_zone_register(malloc_zone_t *zone);
76
77 extern void malloc_zone_unregister(malloc_zone_t *zone);
78
79 /*
80 * The malloc_default_purgeable_zone() function is only available on >= 10.6.
81 * We need to check whether it is present at runtime, thus the weak_import.
82 */
83 extern malloc_zone_t *malloc_default_purgeable_zone(void)
84 JEMALLOC_ATTR(weak_import);
85
86 /******************************************************************************/
87 /* Data. */
88
89 static malloc_zone_t *default_zone, *purgeable_zone;
90 static malloc_zone_t jemalloc_zone;
91 static struct malloc_introspection_t jemalloc_zone_introspect;
92 static pid_t zone_force_lock_pid = -1;
93
94 /******************************************************************************/
95 /* Function prototypes for non-inline static functions. */
96
97 static size_t zone_size(malloc_zone_t *zone, const void *ptr);
98 static void *zone_malloc(malloc_zone_t *zone, size_t size);
99 static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
100 static void *zone_valloc(malloc_zone_t *zone, size_t size);
101 static void zone_free(malloc_zone_t *zone, void *ptr);
102 static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
103 static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
104 size_t size);
105 static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
106 size_t size);
107 static void zone_destroy(malloc_zone_t *zone);
108 static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
109 void **results, unsigned num_requested);
110 static void zone_batch_free(struct _malloc_zone_t *zone,
111 void **to_be_freed, unsigned num_to_be_freed);
112 static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
113 static size_t zone_good_size(malloc_zone_t *zone, size_t size);
114 static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
115 vm_address_t zone_address, memory_reader_t reader,
116 vm_range_recorder_t recorder);
117 static boolean_t zone_check(malloc_zone_t *zone);
118 static void zone_print(malloc_zone_t *zone, boolean_t verbose);
119 static void zone_log(malloc_zone_t *zone, void *address);
120 static void zone_force_lock(malloc_zone_t *zone);
121 static void zone_force_unlock(malloc_zone_t *zone);
122 static void zone_statistics(malloc_zone_t *zone,
123 malloc_statistics_t *stats);
124 static boolean_t zone_locked(malloc_zone_t *zone);
125 static void zone_reinit_lock(malloc_zone_t *zone);
126
127 /******************************************************************************/
128 /*
129 * Functions.
130 */
131
132 static size_t
zone_size(malloc_zone_t * zone,const void * ptr)133 zone_size(malloc_zone_t *zone, const void *ptr) {
134 /*
135 * There appear to be places within Darwin (such as setenv(3)) that
136 * cause calls to this function with pointers that *no* zone owns. If
137 * we knew that all pointers were owned by *some* zone, we could split
138 * our zone into two parts, and use one as the default allocator and
139 * the other as the default deallocator/reallocator. Since that will
140 * not work in practice, we must check all pointers to assure that they
141 * reside within a mapped extent before determining size.
142 */
143 return ivsalloc(tsdn_fetch(), ptr);
144 }
145
146 static void *
zone_malloc(malloc_zone_t * zone,size_t size)147 zone_malloc(malloc_zone_t *zone, size_t size) {
148 return je_malloc(size);
149 }
150
151 static void *
zone_calloc(malloc_zone_t * zone,size_t num,size_t size)152 zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
153 return je_calloc(num, size);
154 }
155
156 static void *
zone_valloc(malloc_zone_t * zone,size_t size)157 zone_valloc(malloc_zone_t *zone, size_t size) {
158 void *ret = NULL; /* Assignment avoids useless compiler warning. */
159
160 je_posix_memalign(&ret, PAGE, size);
161
162 return ret;
163 }
164
165 static void
zone_free(malloc_zone_t * zone,void * ptr)166 zone_free(malloc_zone_t *zone, void *ptr) {
167 if (ivsalloc(tsdn_fetch(), ptr) != 0) {
168 je_free(ptr);
169 return;
170 }
171
172 free(ptr);
173 }
174
175 static void *
zone_realloc(malloc_zone_t * zone,void * ptr,size_t size)176 zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
177 if (ivsalloc(tsdn_fetch(), ptr) != 0) {
178 return je_realloc(ptr, size);
179 }
180
181 return realloc(ptr, size);
182 }
183
184 static void *
zone_memalign(malloc_zone_t * zone,size_t alignment,size_t size)185 zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
186 void *ret = NULL; /* Assignment avoids useless compiler warning. */
187
188 je_posix_memalign(&ret, alignment, size);
189
190 return ret;
191 }
192
193 static void
zone_free_definite_size(malloc_zone_t * zone,void * ptr,size_t size)194 zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
195 size_t alloc_size;
196
197 alloc_size = ivsalloc(tsdn_fetch(), ptr);
198 if (alloc_size != 0) {
199 assert(alloc_size == size);
200 je_free(ptr);
201 return;
202 }
203
204 free(ptr);
205 }
206
207 static void
zone_destroy(malloc_zone_t * zone)208 zone_destroy(malloc_zone_t *zone) {
209 /* This function should never be called. */
210 not_reached();
211 }
212
213 static unsigned
zone_batch_malloc(struct _malloc_zone_t * zone,size_t size,void ** results,unsigned num_requested)214 zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
215 unsigned num_requested) {
216 unsigned i;
217
218 for (i = 0; i < num_requested; i++) {
219 results[i] = je_malloc(size);
220 if (!results[i])
221 break;
222 }
223
224 return i;
225 }
226
227 static void
zone_batch_free(struct _malloc_zone_t * zone,void ** to_be_freed,unsigned num_to_be_freed)228 zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
229 unsigned num_to_be_freed) {
230 unsigned i;
231
232 for (i = 0; i < num_to_be_freed; i++) {
233 zone_free(zone, to_be_freed[i]);
234 to_be_freed[i] = NULL;
235 }
236 }
237
238 static size_t
zone_pressure_relief(struct _malloc_zone_t * zone,size_t goal)239 zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
240 return 0;
241 }
242
243 static size_t
zone_good_size(malloc_zone_t * zone,size_t size)244 zone_good_size(malloc_zone_t *zone, size_t size) {
245 if (size == 0) {
246 size = 1;
247 }
248 return sz_s2u(size);
249 }
250
251 static kern_return_t
zone_enumerator(task_t task,void * data,unsigned type_mask,vm_address_t zone_address,memory_reader_t reader,vm_range_recorder_t recorder)252 zone_enumerator(task_t task, void *data, unsigned type_mask,
253 vm_address_t zone_address, memory_reader_t reader,
254 vm_range_recorder_t recorder) {
255 return KERN_SUCCESS;
256 }
257
258 static boolean_t
zone_check(malloc_zone_t * zone)259 zone_check(malloc_zone_t *zone) {
260 return true;
261 }
262
263 static void
zone_print(malloc_zone_t * zone,boolean_t verbose)264 zone_print(malloc_zone_t *zone, boolean_t verbose) {
265 }
266
267 static void
zone_log(malloc_zone_t * zone,void * address)268 zone_log(malloc_zone_t *zone, void *address) {
269 }
270
271 static void
zone_force_lock(malloc_zone_t * zone)272 zone_force_lock(malloc_zone_t *zone) {
273 if (isthreaded) {
274 /*
275 * See the note in zone_force_unlock, below, to see why we need
276 * this.
277 */
278 assert(zone_force_lock_pid == -1);
279 zone_force_lock_pid = getpid();
280 jemalloc_prefork();
281 }
282 }
283
284 static void
zone_force_unlock(malloc_zone_t * zone)285 zone_force_unlock(malloc_zone_t *zone) {
286 /*
287 * zone_force_lock and zone_force_unlock are the entry points to the
288 * forking machinery on OS X. The tricky thing is, the child is not
289 * allowed to unlock mutexes locked in the parent, even if owned by the
290 * forking thread (and the mutex type we use in OS X will fail an assert
291 * if we try). In the child, we can get away with reinitializing all
292 * the mutexes, which has the effect of unlocking them. In the parent,
293 * doing this would mean we wouldn't wake any waiters blocked on the
294 * mutexes we unlock. So, we record the pid of the current thread in
295 * zone_force_lock, and use that to detect if we're in the parent or
296 * child here, to decide which unlock logic we need.
297 */
298 if (isthreaded) {
299 assert(zone_force_lock_pid != -1);
300 if (getpid() == zone_force_lock_pid) {
301 jemalloc_postfork_parent();
302 } else {
303 jemalloc_postfork_child();
304 }
305 zone_force_lock_pid = -1;
306 }
307 }
308
309 static void
zone_statistics(malloc_zone_t * zone,malloc_statistics_t * stats)310 zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
311 /* We make no effort to actually fill the values */
312 stats->blocks_in_use = 0;
313 stats->size_in_use = 0;
314 stats->max_size_in_use = 0;
315 stats->size_allocated = 0;
316 }
317
318 static boolean_t
zone_locked(malloc_zone_t * zone)319 zone_locked(malloc_zone_t *zone) {
320 /* Pretend no lock is being held */
321 return false;
322 }
323
324 static void
zone_reinit_lock(malloc_zone_t * zone)325 zone_reinit_lock(malloc_zone_t *zone) {
326 /* As of OSX 10.12, this function is only used when force_unlock would
327 * be used if the zone version were < 9. So just use force_unlock. */
328 zone_force_unlock(zone);
329 }
330
331 static void
zone_init(void)332 zone_init(void) {
333 jemalloc_zone.size = zone_size;
334 jemalloc_zone.malloc = zone_malloc;
335 jemalloc_zone.calloc = zone_calloc;
336 jemalloc_zone.valloc = zone_valloc;
337 jemalloc_zone.free = zone_free;
338 jemalloc_zone.realloc = zone_realloc;
339 jemalloc_zone.destroy = zone_destroy;
340 jemalloc_zone.zone_name = "jemalloc_zone";
341 jemalloc_zone.batch_malloc = zone_batch_malloc;
342 jemalloc_zone.batch_free = zone_batch_free;
343 jemalloc_zone.introspect = &jemalloc_zone_introspect;
344 jemalloc_zone.version = 9;
345 jemalloc_zone.memalign = zone_memalign;
346 jemalloc_zone.free_definite_size = zone_free_definite_size;
347 jemalloc_zone.pressure_relief = zone_pressure_relief;
348
349 jemalloc_zone_introspect.enumerator = zone_enumerator;
350 jemalloc_zone_introspect.good_size = zone_good_size;
351 jemalloc_zone_introspect.check = zone_check;
352 jemalloc_zone_introspect.print = zone_print;
353 jemalloc_zone_introspect.log = zone_log;
354 jemalloc_zone_introspect.force_lock = zone_force_lock;
355 jemalloc_zone_introspect.force_unlock = zone_force_unlock;
356 jemalloc_zone_introspect.statistics = zone_statistics;
357 jemalloc_zone_introspect.zone_locked = zone_locked;
358 jemalloc_zone_introspect.enable_discharge_checking = NULL;
359 jemalloc_zone_introspect.disable_discharge_checking = NULL;
360 jemalloc_zone_introspect.discharge = NULL;
361 #ifdef __BLOCKS__
362 jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
363 #else
364 jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
365 #endif
366 jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
367 }
368
369 static malloc_zone_t *
zone_default_get(void)370 zone_default_get(void) {
371 malloc_zone_t **zones = NULL;
372 unsigned int num_zones = 0;
373
374 /*
375 * On OSX 10.12, malloc_default_zone returns a special zone that is not
376 * present in the list of registered zones. That zone uses a "lite zone"
377 * if one is present (apparently enabled when malloc stack logging is
378 * enabled), or the first registered zone otherwise. In practice this
379 * means unless malloc stack logging is enabled, the first registered
380 * zone is the default. So get the list of zones to get the first one,
381 * instead of relying on malloc_default_zone.
382 */
383 if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
384 (vm_address_t**)&zones, &num_zones)) {
385 /*
386 * Reset the value in case the failure happened after it was
387 * set.
388 */
389 num_zones = 0;
390 }
391
392 if (num_zones) {
393 return zones[0];
394 }
395
396 return malloc_default_zone();
397 }
398
399 /* As written, this function can only promote jemalloc_zone. */
400 static void
zone_promote(void)401 zone_promote(void) {
402 malloc_zone_t *zone;
403
404 do {
405 /*
406 * Unregister and reregister the default zone. On OSX >= 10.6,
407 * unregistering takes the last registered zone and places it
408 * at the location of the specified zone. Unregistering the
409 * default zone thus makes the last registered one the default.
410 * On OSX < 10.6, unregistering shifts all registered zones.
411 * The first registered zone then becomes the default.
412 */
413 malloc_zone_unregister(default_zone);
414 malloc_zone_register(default_zone);
415
416 /*
417 * On OSX 10.6, having the default purgeable zone appear before
418 * the default zone makes some things crash because it thinks it
419 * owns the default zone allocated pointers. We thus
420 * unregister/re-register it in order to ensure it's always
421 * after the default zone. On OSX < 10.6, there is no purgeable
422 * zone, so this does nothing. On OSX >= 10.6, unregistering
423 * replaces the purgeable zone with the last registered zone
424 * above, i.e. the default zone. Registering it again then puts
425 * it at the end, obviously after the default zone.
426 */
427 if (purgeable_zone != NULL) {
428 malloc_zone_unregister(purgeable_zone);
429 malloc_zone_register(purgeable_zone);
430 }
431
432 zone = zone_default_get();
433 } while (zone != &jemalloc_zone);
434 }
435
JEMALLOC_ATTR(constructor)436 JEMALLOC_ATTR(constructor)
437 void
438 zone_register(void) {
439 /*
440 * If something else replaced the system default zone allocator, don't
441 * register jemalloc's.
442 */
443 default_zone = zone_default_get();
444 if (!default_zone->zone_name || strcmp(default_zone->zone_name,
445 "DefaultMallocZone") != 0) {
446 return;
447 }
448
449 /*
450 * The default purgeable zone is created lazily by OSX's libc. It uses
451 * the default zone when it is created for "small" allocations
452 * (< 15 KiB), but assumes the default zone is a scalable_zone. This
453 * obviously fails when the default zone is the jemalloc zone, so
454 * malloc_default_purgeable_zone() is called beforehand so that the
455 * default purgeable zone is created when the default zone is still
456 * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
457 * to check for the existence of malloc_default_purgeable_zone() at
458 * run time.
459 */
460 purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
461 malloc_default_purgeable_zone();
462
463 /* Register the custom zone. At this point it won't be the default. */
464 zone_init();
465 malloc_zone_register(&jemalloc_zone);
466
467 /* Promote the custom zone to be default. */
468 zone_promote();
469 }
470