1 /*
2  * $Id: ptmalloc3.c,v 1.8 2006/03/31 15:57:28 wg Exp $
3  *
4 
5 ptmalloc3 -- wrapper for Doug Lea's malloc-2.8.3 with concurrent
6              allocations
7 
8 Copyright (c) 2005, 2006 Wolfram Gloger  <ptmalloc@malloc.de>
9 
10 Permission to use, copy, modify, distribute, and sell this software
11 and its documentation for any purpose is hereby granted without fee,
12 provided that (i) the above copyright notices and this permission
13 notice appear in all copies of the software and related documentation,
14 and (ii) the name of Wolfram Gloger may not be used in any advertising
15 or publicity relating to the software.
16 
17 THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
18 EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
19 WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
20 
21 IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
22 INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
23 DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
24 WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
25 OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
26 PERFORMANCE OF THIS SOFTWARE.
27 
28  */
29 
30 /*
31  * TODO: optimization / better integration with malloc.c (partly done)
32  *       malloc_{get,set}_state (probably hard to keep compatibility)
33  *       debugging hooks
34  *       better mstats
35  */
36 
37 #include <sys/types.h>   /* For size_t */
38 #include <sys/mman.h>    /* for mmap */
39 #include <errno.h>
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <string.h>      /* for memset */
43 
44 #include <malloc-machine.h>
45 
46 #include "malloc-2.8.3.h"
47 
48 /* ----------------------------------------------------------------------- */
49 
50 /* The following section is replicated from malloc.c */
51 
52 #include "malloc-private.h"
53 
54 /* end of definitions replicated from malloc.c */
55 
56 #define munmap_chunk(mst, p) do {                         \
57   size_t prevsize = (p)->prev_foot & ~IS_MMAPPED_BIT;     \
58   size_t psize = chunksize(p) + prevsize + MMAP_FOOT_PAD; \
59   if (CALL_MUNMAP((char*)(p) - prevsize, psize) == 0)     \
60     ((struct malloc_state*)(mst))->footprint -= psize;    \
61 } while (0)
62 
63 /* ---------------------------------------------------------------------- */
64 
65 /* Minimum size for a newly created arena.  */
66 #ifndef ARENA_SIZE_MIN
67 # define ARENA_SIZE_MIN	   (128*1024)
68 #endif
69 #define HAVE_MEMCPY        1
70 
71 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
72    computed.  */
73 #ifndef THREAD_STATS
74 # define THREAD_STATS 0
75 #endif
76 
77 #ifndef MALLOC_DEBUG
78 # define MALLOC_DEBUG 0
79 #endif
80 
81 #define my_powerof2(x) ((((x)-1)&(x))==0)
82 
83 /* Already initialized? */
84 int __malloc_initialized = -1;
85 
86 #ifndef RETURN_ADDRESS
87 # define RETURN_ADDRESS(X_) (NULL)
88 #endif
89 
90 #if THREAD_STATS
91 # define THREAD_STAT(x) x
92 #else
93 # define THREAD_STAT(x) do ; while(0)
94 #endif
95 
96 #ifdef _LIBC
97 
98 /* Special defines for the GNU C library.  */
99 #define public_cALLOc    __libc_calloc
100 #define public_fREe      __libc_free
101 #define public_cFREe     __libc_cfree
102 #define public_mALLOc    __libc_malloc
103 #define public_mEMALIGn  __libc_memalign
104 #define public_rEALLOc   __libc_realloc
105 #define public_vALLOc    __libc_valloc
106 #define public_pVALLOc   __libc_pvalloc
107 #define public_pMEMALIGn __posix_memalign
108 #define public_mALLINFo  __libc_mallinfo
109 #define public_mALLOPt   __libc_mallopt
110 #define public_mTRIm     __malloc_trim
111 #define public_mSTATs    __malloc_stats
112 #define public_mUSABLe   __malloc_usable_size
113 #define public_iCALLOc   __libc_independent_calloc
114 #define public_iCOMALLOc __libc_independent_comalloc
115 #define public_gET_STATe __malloc_get_state
116 #define public_sET_STATe __malloc_set_state
117 #define malloc_getpagesize __getpagesize()
118 #define open             __open
119 #define mmap             __mmap
120 #define munmap           __munmap
121 #define mremap           __mremap
122 #define mprotect         __mprotect
123 #define MORECORE         (*__morecore)
124 #define MORECORE_FAILURE 0
125 
126 void * __default_morecore (ptrdiff_t);
127 void *(*__morecore)(ptrdiff_t) = __default_morecore;
128 
129 #else /* !_LIBC */
130 
131 #define public_cALLOc    calloc
132 #define public_fREe      free
133 #define public_cFREe     cfree
134 #define public_mALLOc    malloc
135 #define public_mEMALIGn  memalign
136 #define public_rEALLOc   realloc
137 #define public_vALLOc    valloc
138 #define public_pVALLOc   pvalloc
139 #define public_pMEMALIGn posix_memalign
140 #define public_mALLINFo  mallinfo
141 #define public_mALLOPt   mallopt
142 #define public_mTRIm     malloc_trim
143 #define public_mSTATs    malloc_stats
144 #define public_mUSABLe   malloc_usable_size
145 #define public_iCALLOc   independent_calloc
146 #define public_iCOMALLOc independent_comalloc
147 #define public_gET_STATe malloc_get_state
148 #define public_sET_STATe malloc_set_state
149 
150 #endif /* _LIBC */
151 
152 #if !defined _LIBC && (!defined __GNUC__ || __GNUC__<3)
153 #define __builtin_expect(expr, val) (expr)
154 #endif
155 
156 #if MALLOC_DEBUG
157 #include <assert.h>
158 #else
159 #undef assert
160 #define assert(x) ((void)0)
161 #endif
162 
163 /* USE_STARTER determines if and when the special "starter" hook
164    functions are used: not at all (0), during ptmalloc_init (first bit
165    set), or from the beginning until an explicit call to ptmalloc_init
166    (second bit set).  This is necessary if thread-related
167    initialization functions (e.g.  pthread_key_create) require
168    malloc() calls (set USE_STARTER=1), or if those functions initially
169    cannot be used at all (set USE_STARTER=2 and perform an explicit
170    ptmalloc_init() when the thread library is ready, typically at the
171    start of main()). */
172 
173 #ifndef USE_STARTER
174 # ifndef _LIBC
175 #  define USE_STARTER 1
176 # else
177 #  if USE___THREAD || (defined USE_TLS && !defined SHARED)
178     /* These routines are never needed in this configuration.  */
179 #   define USE_STARTER 0
180 #  else
181 #   define USE_STARTER (USE_TLS ? 4 : 1)
182 #  endif
183 # endif
184 #endif
185 
186 /*----------------------------------------------------------------------*/
187 
188 /* Arenas */
189 static tsd_key_t arena_key;
190 static mutex_t list_lock;
191 
192 /* Arena structure */
193 struct malloc_arena {
194   /* Serialize access.  */
195   mutex_t mutex;
196 
197   /* Statistics for locking.  Only used if THREAD_STATS is defined.  */
198   long stat_lock_direct, stat_lock_loop, stat_lock_wait;
199   long stat_starter;
200 
201   /* Linked list */
202   struct malloc_arena *next;
203 
204   /* Space for mstate.  The size is just the minimum such that
205      create_mspace_with_base can be successfully called.  */
206   char buf_[pad_request(sizeof(struct malloc_state)) + TOP_FOOT_SIZE +
207 	    CHUNK_ALIGN_MASK + 1];
208 };
209 #define MSPACE_OFFSET (((offsetof(struct malloc_arena, buf_) \
210 			 + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK))
211 #define arena_to_mspace(a) ((void *)chunk2mem((char*)(a) + MSPACE_OFFSET))
212 
213 /* check for chunk from non-main arena */
214 #define chunk_non_main_arena(p) ((p)->head & NON_MAIN_ARENA)
215 
216 static struct malloc_arena* _int_new_arena(size_t size);
217 
218 /* Buffer for the main arena. */
219 static struct malloc_arena main_arena;
220 
221 /* For now, store arena in footer.  This means typically 4bytes more
222    overhead for each non-main-arena chunk, but is fast and easy to
223    compute.  Note that the pointer stored in the extra footer must be
224    properly aligned, though. */
225 #define FOOTER_OVERHEAD \
226  (2*sizeof(struct malloc_arena*) - SIZE_T_SIZE)
227 
228 #define arena_for_chunk(ptr) \
229  (chunk_non_main_arena(ptr) ? *(struct malloc_arena**)              \
230   ((char*)(ptr) + chunksize(ptr) - (FOOTER_OVERHEAD - SIZE_T_SIZE)) \
231   : &main_arena)
232 
233 /* special because of extra overhead */
234 #define arena_for_mmap_chunk(ptr) \
235  (chunk_non_main_arena(ptr) ? *(struct malloc_arena**)             \
236   ((char*)(ptr) + chunksize(ptr) - sizeof(struct malloc_arena*))   \
237   : &main_arena)
238 
239 #define set_non_main_arena(mem, ar_ptr) do {                   		      \
240   mchunkptr P = mem2chunk(mem);                                               \
241   size_t SZ = chunksize(P) - (is_mmapped(P) ? sizeof(struct malloc_arena*)    \
242                               : (FOOTER_OVERHEAD - SIZE_T_SIZE));             \
243   assert((unsigned long)((char*)(P) + SZ)%sizeof(struct malloc_arena*) == 0); \
244   *(struct malloc_arena**)((char*)(P) + SZ) = (ar_ptr);                       \
245   P->head |= NON_MAIN_ARENA;                                                  \
246 } while (0)
247 
248 /* arena_get() acquires an arena and locks the corresponding mutex.
249    First, try the one last locked successfully by this thread.  (This
250    is the common case and handled with a macro for speed.)  Then, loop
251    once over the circularly linked list of arenas.  If no arena is
252    readily available, create a new one.  In this latter case, `size'
253    is just a hint as to how much memory will be required immediately
254    in the new arena. */
255 
256 #define arena_get(ptr, size) do { \
257   void *vptr = NULL; \
258   ptr = (struct malloc_arena*)tsd_getspecific(arena_key, vptr); \
259   if(ptr && !mutex_trylock(&ptr->mutex)) { \
260     THREAD_STAT(++(ptr->stat_lock_direct)); \
261   } else \
262     ptr = arena_get2(ptr, (size)); \
263 } while(0)
264 
265 static struct malloc_arena*
arena_get2(struct malloc_arena * a_tsd,size_t size)266 arena_get2(struct malloc_arena* a_tsd, size_t size)
267 {
268   struct malloc_arena* a;
269   int err;
270 
271   if(!a_tsd)
272     a = a_tsd = &main_arena;
273   else {
274     a = a_tsd->next;
275     if(!a) {
276       /* This can only happen while initializing the new arena. */
277       (void)mutex_lock(&main_arena.mutex);
278       THREAD_STAT(++(main_arena.stat_lock_wait));
279       return &main_arena;
280     }
281   }
282 
283   /* Check the global, circularly linked list for available arenas. */
284  repeat:
285   do {
286     if(!mutex_trylock(&a->mutex)) {
287       THREAD_STAT(++(a->stat_lock_loop));
288       tsd_setspecific(arena_key, (void *)a);
289       return a;
290     }
291     a = a->next;
292   } while(a != a_tsd);
293 
294   /* If not even the list_lock can be obtained, try again.  This can
295      happen during `atfork', or for example on systems where thread
296      creation makes it temporarily impossible to obtain _any_
297      locks. */
298   if(mutex_trylock(&list_lock)) {
299     a = a_tsd;
300     goto repeat;
301   }
302   (void)mutex_unlock(&list_lock);
303 
304   /* Nothing immediately available, so generate a new arena.  */
305   a = _int_new_arena(size);
306   if(!a)
307     return 0;
308 
309   tsd_setspecific(arena_key, (void *)a);
310   mutex_init(&a->mutex);
311   err = mutex_lock(&a->mutex); /* remember result */
312 
313   /* Add the new arena to the global list.  */
314   (void)mutex_lock(&list_lock);
315   a->next = main_arena.next;
316   atomic_write_barrier ();
317   main_arena.next = a;
318   (void)mutex_unlock(&list_lock);
319 
320   if(err) /* locking failed; keep arena for further attempts later */
321     return 0;
322 
323   THREAD_STAT(++(a->stat_lock_loop));
324   return a;
325 }
326 
327 /* Create a new arena with room for a chunk of size "size".  */
328 
329 static struct malloc_arena*
_int_new_arena(size_t size)330 _int_new_arena(size_t size)
331 {
332   struct malloc_arena* a;
333   size_t mmap_sz = sizeof(*a) + pad_request(size);
334   void *m;
335 
336   if (mmap_sz < ARENA_SIZE_MIN)
337     mmap_sz = ARENA_SIZE_MIN;
338   /* conservative estimate for page size */
339   mmap_sz = (mmap_sz + 8191) & ~(size_t)8191;
340   a = CALL_MMAP(mmap_sz);
341   if ((char*)a == (char*)-1)
342     return 0;
343 
344   m = create_mspace_with_base((char*)a + MSPACE_OFFSET,
345 			      mmap_sz - MSPACE_OFFSET,
346 			      0);
347 
348   if (!m) {
349     CALL_MUNMAP(a, mmap_sz);
350     a = 0;
351   } else {
352     /*a->next = NULL;*/
353     /*a->system_mem = a->max_system_mem = h->size;*/
354   }
355 
356   return a;
357 }
358 
359 /*------------------------------------------------------------------------*/
360 
361 /* Hook mechanism for proper initialization and atfork support. */
362 
363 /* Define and initialize the hook variables.  These weak definitions must
364    appear before any use of the variables in a function.  */
365 #ifndef weak_variable
366 #ifndef _LIBC
367 #define weak_variable /**/
368 #else
369 /* In GNU libc we want the hook variables to be weak definitions to
370    avoid a problem with Emacs.  */
371 #define weak_variable weak_function
372 #endif
373 #endif
374 
375 #if !(USE_STARTER & 2)
376 # define free_hook_ini     NULL
377 /* Forward declarations.  */
378 static void* malloc_hook_ini (size_t sz, const void *caller);
379 static void* realloc_hook_ini (void* ptr, size_t sz, const void* caller);
380 static void* memalign_hook_ini (size_t alignment, size_t sz,
381 				const void* caller);
382 #else
383 # define free_hook_ini     free_starter
384 # define malloc_hook_ini   malloc_starter
385 # define realloc_hook_ini  NULL
386 # define memalign_hook_ini memalign_starter
387 #endif
388 
389 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
390 void weak_variable (*__free_hook) (void * __ptr, const void *)
391      = free_hook_ini;
392 void * weak_variable (*__malloc_hook) (size_t __size, const void *)
393      = malloc_hook_ini;
394 void * weak_variable (*__realloc_hook)
395      (void * __ptr, size_t __size, const void *) = realloc_hook_ini;
396 void * weak_variable (*__memalign_hook)
397   (size_t __alignment, size_t __size, const void *) = memalign_hook_ini;
398 /*void weak_variable (*__after_morecore_hook) (void) = NULL;*/
399 
400 /* The initial hooks just call the initialization routine, then do the
401    normal work. */
402 
403 #if !(USE_STARTER & 2)
404 static
405 #endif
406 void ptmalloc_init(void);
407 
408 #if !(USE_STARTER & 2)
409 
410 static void*
malloc_hook_ini(size_t sz,const void * caller)411 malloc_hook_ini(size_t sz, const void * caller)
412 {
413   __malloc_hook = NULL;
414   ptmalloc_init();
415   return public_mALLOc(sz);
416 }
417 
418 static void *
realloc_hook_ini(void * ptr,size_t sz,const void * caller)419 realloc_hook_ini(void *ptr, size_t sz, const void * caller)
420 {
421   __malloc_hook = NULL;
422   __realloc_hook = NULL;
423   ptmalloc_init();
424   return public_rEALLOc(ptr, sz);
425 }
426 
427 static void*
memalign_hook_ini(size_t alignment,size_t sz,const void * caller)428 memalign_hook_ini(size_t alignment, size_t sz, const void * caller)
429 {
430   __memalign_hook = NULL;
431   ptmalloc_init();
432   return public_mEMALIGn(alignment, sz);
433 }
434 
435 #endif /* !(USE_STARTER & 2) */
436 
437 /*----------------------------------------------------------------------*/
438 
439 #if !defined NO_THREADS && USE_STARTER
440 
441 /* The following hooks are used when the global initialization in
442    ptmalloc_init() hasn't completed yet. */
443 
444 static void*
malloc_starter(size_t sz,const void * caller)445 malloc_starter(size_t sz, const void *caller)
446 {
447   void* victim;
448 
449   /*ptmalloc_init_minimal();*/
450   victim = mspace_malloc(arena_to_mspace(&main_arena), sz);
451   THREAD_STAT(++main_arena.stat_starter);
452 
453   return victim;
454 }
455 
456 static void*
memalign_starter(size_t align,size_t sz,const void * caller)457 memalign_starter(size_t align, size_t sz, const void *caller)
458 {
459   void* victim;
460 
461   /*ptmalloc_init_minimal();*/
462   victim = mspace_memalign(arena_to_mspace(&main_arena), align, sz);
463   THREAD_STAT(++main_arena.stat_starter);
464 
465   return victim;
466 }
467 
468 static void
free_starter(void * mem,const void * caller)469 free_starter(void* mem, const void *caller)
470 {
471   if (mem) {
472     mchunkptr p = mem2chunk(mem);
473     void *msp = arena_to_mspace(&main_arena);
474     if (is_mmapped(p))
475       munmap_chunk(msp, p);
476     else
477       mspace_free(msp, mem);
478   }
479   THREAD_STAT(++main_arena.stat_starter);
480 }
481 
482 #endif /* !defined NO_THREADS && USE_STARTER */
483 
484 /*----------------------------------------------------------------------*/
485 
486 #ifndef NO_THREADS
487 
488 /* atfork support.  */
489 
490 static void * (*save_malloc_hook) (size_t __size, const void *);
491 # if !defined _LIBC || !defined USE_TLS || (defined SHARED && !USE___THREAD)
492 static void * (*save_memalign_hook) (size_t __align, size_t __size,
493 				     const void *);
494 # endif
495 static void   (*save_free_hook) (void * __ptr, const void *);
496 static void*  save_arena;
497 
498 /* Magic value for the thread-specific arena pointer when
499    malloc_atfork() is in use.  */
500 
501 #define ATFORK_ARENA_PTR ((void*)-1)
502 
503 /* The following hooks are used while the `atfork' handling mechanism
504    is active. */
505 
506 static void*
malloc_atfork(size_t sz,const void * caller)507 malloc_atfork(size_t sz, const void *caller)
508 {
509   void *vptr = NULL;
510 
511   tsd_getspecific(arena_key, vptr);
512   if(vptr == ATFORK_ARENA_PTR) {
513     /* We are the only thread that may allocate at all.  */
514     return mspace_malloc(arena_to_mspace(&main_arena), sz);
515   } else {
516     /* Suspend the thread until the `atfork' handlers have completed.
517        By that time, the hooks will have been reset as well, so that
518        mALLOc() can be used again. */
519     (void)mutex_lock(&list_lock);
520     (void)mutex_unlock(&list_lock);
521     return public_mALLOc(sz);
522   }
523 }
524 
525 static void
free_atfork(void * mem,const void * caller)526 free_atfork(void* mem, const void *caller)
527 {
528   void *vptr = NULL;
529   struct malloc_arena *ar_ptr;
530   mchunkptr p;                          /* chunk corresponding to mem */
531 
532   if (mem == 0)                              /* free(0) has no effect */
533     return;
534 
535   p = mem2chunk(mem);
536 
537   if (is_mmapped(p)) {                      /* release mmapped memory. */
538     ar_ptr = arena_for_mmap_chunk(p);
539     munmap_chunk(arena_to_mspace(ar_ptr), p);
540     return;
541   }
542 
543   ar_ptr = arena_for_chunk(p);
544   tsd_getspecific(arena_key, vptr);
545   if(vptr != ATFORK_ARENA_PTR)
546     (void)mutex_lock(&ar_ptr->mutex);
547   mspace_free(arena_to_mspace(ar_ptr), mem);
548   if(vptr != ATFORK_ARENA_PTR)
549     (void)mutex_unlock(&ar_ptr->mutex);
550 }
551 
552 /* The following two functions are registered via thread_atfork() to
553    make sure that the mutexes remain in a consistent state in the
554    fork()ed version of a thread.  Also adapt the malloc and free hooks
555    temporarily, because the `atfork' handler mechanism may use
556    malloc/free internally (e.g. in LinuxThreads). */
557 
558 static void
ptmalloc_lock_all(void)559 ptmalloc_lock_all (void)
560 {
561   struct malloc_arena* ar_ptr;
562 
563   if(__malloc_initialized < 1)
564     return;
565   (void)mutex_lock(&list_lock);
566   for(ar_ptr = &main_arena;;) {
567     (void)mutex_lock(&ar_ptr->mutex);
568     ar_ptr = ar_ptr->next;
569     if(ar_ptr == &main_arena)
570       break;
571   }
572   save_malloc_hook = __malloc_hook;
573   save_free_hook = __free_hook;
574   __malloc_hook = malloc_atfork;
575   __free_hook = free_atfork;
576   /* Only the current thread may perform malloc/free calls now. */
577   tsd_getspecific(arena_key, save_arena);
578   tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
579 }
580 
581 static void
ptmalloc_unlock_all(void)582 ptmalloc_unlock_all (void)
583 {
584   struct malloc_arena *ar_ptr;
585 
586   if(__malloc_initialized < 1)
587     return;
588   tsd_setspecific(arena_key, save_arena);
589   __malloc_hook = save_malloc_hook;
590   __free_hook = save_free_hook;
591   for(ar_ptr = &main_arena;;) {
592     (void)mutex_unlock(&ar_ptr->mutex);
593     ar_ptr = ar_ptr->next;
594     if(ar_ptr == &main_arena) break;
595   }
596   (void)mutex_unlock(&list_lock);
597 }
598 
599 #ifdef __linux__
600 
601 /* In LinuxThreads, unlocking a mutex in the child process after a
602    fork() is currently unsafe, whereas re-initializing it is safe and
603    does not leak resources.  Therefore, a special atfork handler is
604    installed for the child. */
605 
606 static void
ptmalloc_unlock_all2(void)607 ptmalloc_unlock_all2(void)
608 {
609   struct malloc_arena *ar_ptr;
610 
611   if(__malloc_initialized < 1)
612     return;
613 #if defined _LIBC || 1 /*defined MALLOC_HOOKS*/
614   tsd_setspecific(arena_key, save_arena);
615   __malloc_hook = save_malloc_hook;
616   __free_hook = save_free_hook;
617 #endif
618   for(ar_ptr = &main_arena;;) {
619     (void)mutex_init(&ar_ptr->mutex);
620     ar_ptr = ar_ptr->next;
621     if(ar_ptr == &main_arena) break;
622   }
623   (void)mutex_init(&list_lock);
624 }
625 
626 #else
627 
628 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
629 
630 #endif
631 
632 #endif /* !defined NO_THREADS */
633 
634 /*---------------------------------------------------------------------*/
635 
636 #if !(USE_STARTER & 2)
637 static
638 #endif
639 void
ptmalloc_init(void)640 ptmalloc_init(void)
641 {
642   const char* s;
643   int secure = 0;
644   void *mspace;
645 
646   if(__malloc_initialized >= 0) return;
647   __malloc_initialized = 0;
648 
649   /*if (mp_.pagesize == 0)
650     ptmalloc_init_minimal();*/
651 
652 #ifndef NO_THREADS
653 # if USE_STARTER & 1
654   /* With some threads implementations, creating thread-specific data
655      or initializing a mutex may call malloc() itself.  Provide a
656      simple starter version (realloc() won't work). */
657   save_malloc_hook = __malloc_hook;
658   save_memalign_hook = __memalign_hook;
659   save_free_hook = __free_hook;
660   __malloc_hook = malloc_starter;
661   __memalign_hook = memalign_starter;
662   __free_hook = free_starter;
663 #  ifdef _LIBC
664   /* Initialize the pthreads interface. */
665   if (__pthread_initialize != NULL)
666     __pthread_initialize();
667 #  endif /* !defined _LIBC */
668 # endif /* USE_STARTER & 1 */
669 #endif /* !defined NO_THREADS */
670   mutex_init(&main_arena.mutex);
671   main_arena.next = &main_arena;
672   mspace = create_mspace_with_base((char*)&main_arena + MSPACE_OFFSET,
673 				   sizeof(main_arena) - MSPACE_OFFSET,
674 				   0);
675   assert(mspace == arena_to_mspace(&main_arena));
676 
677   mutex_init(&list_lock);
678   tsd_key_create(&arena_key, NULL);
679   tsd_setspecific(arena_key, (void *)&main_arena);
680   thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
681 #ifndef NO_THREADS
682 # if USE_STARTER & 1
683   __malloc_hook = save_malloc_hook;
684   __memalign_hook = save_memalign_hook;
685   __free_hook = save_free_hook;
686 # endif
687 # if USE_STARTER & 2
688   __malloc_hook = 0;
689   __memalign_hook = 0;
690   __free_hook = 0;
691 # endif
692 #endif
693 #ifdef _LIBC
694   secure = __libc_enable_secure;
695 #else
696   if (! secure) {
697     if ((s = getenv("MALLOC_TRIM_THRESHOLD_")))
698       public_mALLOPt(M_TRIM_THRESHOLD, atoi(s));
699     if ((s = getenv("MALLOC_TOP_PAD_")) ||
700 	(s = getenv("MALLOC_GRANULARITY_")))
701       public_mALLOPt(M_GRANULARITY, atoi(s));
702     if ((s = getenv("MALLOC_MMAP_THRESHOLD_")))
703       public_mALLOPt(M_MMAP_THRESHOLD, atoi(s));
704     /*if ((s = getenv("MALLOC_MMAP_MAX_"))) this is no longer available
705       public_mALLOPt(M_MMAP_MAX, atoi(s));*/
706   }
707   s = getenv("MALLOC_CHECK_");
708 #endif
709   if (s) {
710     /*if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
711       __malloc_check_init();*/
712   }
713   if (__malloc_initialize_hook != NULL)
714     (*__malloc_initialize_hook)();
715   __malloc_initialized = 1;
716 }
717 
718 /*------------------------ Public wrappers. --------------------------------*/
719 
720 void*
public_mALLOc(size_t bytes)721 public_mALLOc(size_t bytes)
722 {
723   struct malloc_arena* ar_ptr;
724   void *victim;
725 
726   void * (*hook) (size_t, const void *) = __malloc_hook;
727   if (hook != NULL)
728     return (*hook)(bytes, RETURN_ADDRESS (0));
729 
730   arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
731   if (!ar_ptr)
732     return 0;
733   if (ar_ptr != &main_arena)
734     bytes += FOOTER_OVERHEAD;
735   victim = mspace_malloc(arena_to_mspace(ar_ptr), bytes);
736   if (victim && ar_ptr != &main_arena)
737     set_non_main_arena(victim, ar_ptr);
738   (void)mutex_unlock(&ar_ptr->mutex);
739   assert(!victim || is_mmapped(mem2chunk(victim)) ||
740 	 ar_ptr == arena_for_chunk(mem2chunk(victim)));
741   return victim;
742 }
743 #ifdef libc_hidden_def
libc_hidden_def(public_mALLOc)744 libc_hidden_def(public_mALLOc)
745 #endif
746 
747 void
748 public_fREe(void* mem)
749 {
750   struct malloc_arena* ar_ptr;
751   mchunkptr p;                          /* chunk corresponding to mem */
752 
753   void (*hook) (void *, const void *) = __free_hook;
754   if (hook != NULL) {
755     (*hook)(mem, RETURN_ADDRESS (0));
756     return;
757   }
758 
759   if (mem == 0)                              /* free(0) has no effect */
760     return;
761 
762   p = mem2chunk(mem);
763 
764   if (is_mmapped(p)) {                      /* release mmapped memory. */
765     ar_ptr = arena_for_mmap_chunk(p);
766     munmap_chunk(arena_to_mspace(ar_ptr), p);
767     return;
768   }
769 
770   ar_ptr = arena_for_chunk(p);
771 #if THREAD_STATS
772   if(!mutex_trylock(&ar_ptr->mutex))
773     ++(ar_ptr->stat_lock_direct);
774   else {
775     (void)mutex_lock(&ar_ptr->mutex);
776     ++(ar_ptr->stat_lock_wait);
777   }
778 #else
779   (void)mutex_lock(&ar_ptr->mutex);
780 #endif
781   mspace_free(arena_to_mspace(ar_ptr), mem);
782   (void)mutex_unlock(&ar_ptr->mutex);
783 }
784 #ifdef libc_hidden_def
libc_hidden_def(public_fREe)785 libc_hidden_def (public_fREe)
786 #endif
787 
788 void*
789 public_rEALLOc(void* oldmem, size_t bytes)
790 {
791   struct malloc_arena* ar_ptr;
792 
793   mchunkptr oldp;             /* chunk corresponding to oldmem */
794 
795   void* newp;             /* chunk to return */
796 
797   void * (*hook) (void *, size_t, const void *) = __realloc_hook;
798   if (hook != NULL)
799     return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
800 
801 #if REALLOC_ZERO_BYTES_FREES
802   if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
803 #endif
804 
805   /* realloc of null is supposed to be same as malloc */
806   if (oldmem == 0)
807     return public_mALLOc(bytes);
808 
809   oldp    = mem2chunk(oldmem);
810   if (is_mmapped(oldp))
811     ar_ptr = arena_for_mmap_chunk(oldp); /* FIXME: use mmap_resize */
812   else
813     ar_ptr = arena_for_chunk(oldp);
814 #if THREAD_STATS
815   if(!mutex_trylock(&ar_ptr->mutex))
816     ++(ar_ptr->stat_lock_direct);
817   else {
818     (void)mutex_lock(&ar_ptr->mutex);
819     ++(ar_ptr->stat_lock_wait);
820   }
821 #else
822   (void)mutex_lock(&ar_ptr->mutex);
823 #endif
824 
825 #ifndef NO_THREADS
826   /* As in malloc(), remember this arena for the next allocation. */
827   tsd_setspecific(arena_key, (void *)ar_ptr);
828 #endif
829 
830   if (ar_ptr != &main_arena)
831     bytes += FOOTER_OVERHEAD;
832   newp = mspace_realloc(arena_to_mspace(ar_ptr), oldmem, bytes);
833 
834   if (newp && ar_ptr != &main_arena)
835     set_non_main_arena(newp, ar_ptr);
836   (void)mutex_unlock(&ar_ptr->mutex);
837 
838   assert(!newp || is_mmapped(mem2chunk(newp)) ||
839 	 ar_ptr == arena_for_chunk(mem2chunk(newp)));
840   return newp;
841 }
842 #ifdef libc_hidden_def
libc_hidden_def(public_rEALLOc)843 libc_hidden_def (public_rEALLOc)
844 #endif
845 
846 void*
847 public_mEMALIGn(size_t alignment, size_t bytes)
848 {
849   struct malloc_arena* ar_ptr;
850   void *p;
851 
852   void * (*hook) (size_t, size_t, const void *) = __memalign_hook;
853   if (hook != NULL)
854     return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
855 
856   /* If need less alignment than we give anyway, just relay to malloc */
857   if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
858 
859   /* Otherwise, ensure that it is at least a minimum chunk size */
860   if (alignment <  MIN_CHUNK_SIZE)
861     alignment = MIN_CHUNK_SIZE;
862 
863   arena_get(ar_ptr,
864 	    bytes + FOOTER_OVERHEAD + alignment + MIN_CHUNK_SIZE);
865   if(!ar_ptr)
866     return 0;
867 
868   if (ar_ptr != &main_arena)
869     bytes += FOOTER_OVERHEAD;
870   p = mspace_memalign(arena_to_mspace(ar_ptr), alignment, bytes);
871 
872   if (p && ar_ptr != &main_arena)
873     set_non_main_arena(p, ar_ptr);
874   (void)mutex_unlock(&ar_ptr->mutex);
875 
876   assert(!p || is_mmapped(mem2chunk(p)) ||
877 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
878   return p;
879 }
880 #ifdef libc_hidden_def
libc_hidden_def(public_mEMALIGn)881 libc_hidden_def (public_mEMALIGn)
882 #endif
883 
884 void*
885 public_vALLOc(size_t bytes)
886 {
887   struct malloc_arena* ar_ptr;
888   void *p;
889 
890   if(__malloc_initialized < 0)
891     ptmalloc_init ();
892   arena_get(ar_ptr, bytes + FOOTER_OVERHEAD + MIN_CHUNK_SIZE);
893   if(!ar_ptr)
894     return 0;
895   if (ar_ptr != &main_arena)
896     bytes += FOOTER_OVERHEAD;
897   p = mspace_memalign(arena_to_mspace(ar_ptr), 4096, bytes);
898 
899   if (p && ar_ptr != &main_arena)
900     set_non_main_arena(p, ar_ptr);
901   (void)mutex_unlock(&ar_ptr->mutex);
902   return p;
903 }
904 
905 int
public_pMEMALIGn(void ** memptr,size_t alignment,size_t size)906 public_pMEMALIGn (void **memptr, size_t alignment, size_t size)
907 {
908   void *mem;
909 
910   /* Test whether the SIZE argument is valid.  It must be a power of
911      two multiple of sizeof (void *).  */
912   if (alignment % sizeof (void *) != 0
913       || !my_powerof2 (alignment / sizeof (void *)) != 0
914       || alignment == 0)
915     return EINVAL;
916 
917   mem = public_mEMALIGn (alignment, size);
918 
919   if (mem != NULL) {
920     *memptr = mem;
921     return 0;
922   }
923 
924   return ENOMEM;
925 }
926 
927 void*
public_cALLOc(size_t n_elements,size_t elem_size)928 public_cALLOc(size_t n_elements, size_t elem_size)
929 {
930   struct malloc_arena* ar_ptr;
931   size_t bytes, sz;
932   void* mem;
933   void * (*hook) (size_t, const void *) = __malloc_hook;
934 
935   /* size_t is unsigned so the behavior on overflow is defined.  */
936   bytes = n_elements * elem_size;
937 #define HALF_INTERNAL_SIZE_T \
938   (((size_t) 1) << (8 * sizeof (size_t) / 2))
939   if (__builtin_expect ((n_elements | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
940     if (elem_size != 0 && bytes / elem_size != n_elements) {
941       /*MALLOC_FAILURE_ACTION;*/
942       return 0;
943     }
944   }
945 
946   if (hook != NULL) {
947     sz = bytes;
948     mem = (*hook)(sz, RETURN_ADDRESS (0));
949     if(mem == 0)
950       return 0;
951 #ifdef HAVE_MEMCPY
952     return memset(mem, 0, sz);
953 #else
954     while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
955     return mem;
956 #endif
957   }
958 
959   arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
960   if(!ar_ptr)
961     return 0;
962 
963   if (ar_ptr != &main_arena)
964     bytes += FOOTER_OVERHEAD;
965   mem = mspace_calloc(arena_to_mspace(ar_ptr), bytes, 1);
966 
967   if (mem && ar_ptr != &main_arena)
968     set_non_main_arena(mem, ar_ptr);
969   (void)mutex_unlock(&ar_ptr->mutex);
970 
971   assert(!mem || is_mmapped(mem2chunk(mem)) ||
972 	 ar_ptr == arena_for_chunk(mem2chunk(mem)));
973 
974   return mem;
975 }
976 
977 void**
public_iCALLOc(size_t n,size_t elem_size,void * chunks[])978 public_iCALLOc(size_t n, size_t elem_size, void* chunks[])
979 {
980   struct malloc_arena* ar_ptr;
981   void** m;
982 
983   arena_get(ar_ptr, n*(elem_size + FOOTER_OVERHEAD));
984   if (!ar_ptr)
985     return 0;
986 
987   if (ar_ptr != &main_arena)
988     elem_size += FOOTER_OVERHEAD;
989   m = mspace_independent_calloc(arena_to_mspace(ar_ptr), n, elem_size, chunks);
990 
991   if (m && ar_ptr != &main_arena) {
992     while (n > 0)
993       set_non_main_arena(m[--n], ar_ptr);
994   }
995   (void)mutex_unlock(&ar_ptr->mutex);
996   return m;
997 }
998 
999 void**
public_iCOMALLOc(size_t n,size_t sizes[],void * chunks[])1000 public_iCOMALLOc(size_t n, size_t sizes[], void* chunks[])
1001 {
1002   struct malloc_arena* ar_ptr;
1003   size_t* m_sizes;
1004   size_t i;
1005   void** m;
1006 
1007   arena_get(ar_ptr, n*sizeof(size_t));
1008   if (!ar_ptr)
1009     return 0;
1010 
1011   if (ar_ptr != &main_arena) {
1012     /* Temporary m_sizes[] array is ugly but it would be surprising to
1013        change the original sizes[]... */
1014     m_sizes = mspace_malloc(arena_to_mspace(ar_ptr), n*sizeof(size_t));
1015     if (!m_sizes) {
1016       (void)mutex_unlock(&ar_ptr->mutex);
1017       return 0;
1018     }
1019     for (i=0; i<n; ++i)
1020       m_sizes[i] = sizes[i] + FOOTER_OVERHEAD;
1021     if (!chunks) {
1022       chunks = mspace_malloc(arena_to_mspace(ar_ptr),
1023 			     n*sizeof(void*)+FOOTER_OVERHEAD);
1024       if (!chunks) {
1025 	mspace_free(arena_to_mspace(ar_ptr), m_sizes);
1026 	(void)mutex_unlock(&ar_ptr->mutex);
1027 	return 0;
1028       }
1029       set_non_main_arena(chunks, ar_ptr);
1030     }
1031   } else
1032     m_sizes = sizes;
1033 
1034   m = mspace_independent_comalloc(arena_to_mspace(ar_ptr), n, m_sizes, chunks);
1035 
1036   if (ar_ptr != &main_arena) {
1037     mspace_free(arena_to_mspace(ar_ptr), m_sizes);
1038     if (m)
1039       for (i=0; i<n; ++i)
1040 	set_non_main_arena(m[i], ar_ptr);
1041   }
1042   (void)mutex_unlock(&ar_ptr->mutex);
1043   return m;
1044 }
1045 
1046 #if 0 && !defined _LIBC
1047 
1048 void
1049 public_cFREe(void* m)
1050 {
1051   public_fREe(m);
1052 }
1053 
1054 #endif /* _LIBC */
1055 
1056 int
public_mTRIm(size_t s)1057 public_mTRIm(size_t s)
1058 {
1059   int result;
1060 
1061   (void)mutex_lock(&main_arena.mutex);
1062   result = mspace_trim(arena_to_mspace(&main_arena), s);
1063   (void)mutex_unlock(&main_arena.mutex);
1064   return result;
1065 }
1066 
1067 size_t
public_mUSABLe(void * mem)1068 public_mUSABLe(void* mem)
1069 {
1070   if (mem != 0) {
1071     mchunkptr p = mem2chunk(mem);
1072     if (cinuse(p))
1073       return chunksize(p) - overhead_for(p);
1074   }
1075   return 0;
1076 }
1077 
1078 int
public_mALLOPt(int p,int v)1079 public_mALLOPt(int p, int v)
1080 {
1081   int result;
1082   result = mspace_mallopt(p, v);
1083   return result;
1084 }
1085 
1086 void
public_mSTATs(void)1087 public_mSTATs(void)
1088 {
1089   int i;
1090   struct malloc_arena* ar_ptr;
1091   /*unsigned long in_use_b, system_b, avail_b;*/
1092 #if THREAD_STATS
1093   long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
1094 #endif
1095 
1096   if(__malloc_initialized < 0)
1097     ptmalloc_init ();
1098   for (i=0, ar_ptr = &main_arena;; ++i) {
1099     struct malloc_state* msp = arena_to_mspace(ar_ptr);
1100 
1101     fprintf(stderr, "Arena %d:\n", i);
1102     mspace_malloc_stats(msp);
1103 #if THREAD_STATS
1104     stat_lock_direct += ar_ptr->stat_lock_direct;
1105     stat_lock_loop += ar_ptr->stat_lock_loop;
1106     stat_lock_wait += ar_ptr->stat_lock_wait;
1107 #endif
1108     if (MALLOC_DEBUG > 1) {
1109       struct malloc_segment* mseg = &msp->seg;
1110       while (mseg) {
1111 	fprintf(stderr, " seg %08lx-%08lx\n", (unsigned long)mseg->base,
1112 		(unsigned long)(mseg->base + mseg->size));
1113 	mseg = mseg->next;
1114       }
1115     }
1116     ar_ptr = ar_ptr->next;
1117     if (ar_ptr == &main_arena)
1118       break;
1119   }
1120 #if THREAD_STATS
1121   fprintf(stderr, "locked directly  = %10ld\n", stat_lock_direct);
1122   fprintf(stderr, "locked in loop   = %10ld\n", stat_lock_loop);
1123   fprintf(stderr, "locked waiting   = %10ld\n", stat_lock_wait);
1124   fprintf(stderr, "locked total     = %10ld\n",
1125           stat_lock_direct + stat_lock_loop + stat_lock_wait);
1126   if (main_arena.stat_starter > 0)
1127     fprintf(stderr, "starter hooks    = %10ld\n", main_arena.stat_starter);
1128 #endif
1129 }
1130 
1131 /*
1132  * Local variables:
1133  * c-basic-offset: 2
1134  * End:
1135  */
1136