1 /* Malloc implementation for multiple threads without lock contention.
2    Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4    Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5 
6    The GNU C Library is free software; you can redistribute it and/or
7    modify it under the terms of the GNU Library General Public License as
8    published by the Free Software Foundation; either version 2 of the
9    License, or (at your option) any later version.
10 
11    The GNU C Library is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14    Library General Public License for more details.
15 
16    You should have received a copy of the GNU Library General Public
17    License along with the GNU C Library; see the file COPYING.LIB.  If not,
18    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19    Boston, MA 02111-1307, USA.  */
20 
21 /* $Id: hooks.c,v 1.12 2004/11/05 14:42:32 wg Exp $ */
22 
23 #ifndef DEFAULT_CHECK_ACTION
24 #define DEFAULT_CHECK_ACTION 1
25 #endif
26 
27 /* What to do if the standard debugging hooks are in place and a
28    corrupt pointer is detected: do nothing (0), print an error message
29    (1), or call abort() (2). */
30 
31 /* Hooks for debugging versions.  The initial hooks just call the
32    initialization routine, then do the normal work. */
33 
34 #if !(USE_STARTER & 2)
35 
36 static Void_t*
37 #if __STD_C
malloc_hook_ini(size_t sz,const __malloc_ptr_t caller)38 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
39 #else
40 malloc_hook_ini(sz, caller)
41      size_t sz; const __malloc_ptr_t caller;
42 #endif
43 {
44   __malloc_hook = NULL;
45   ptmalloc_init();
46   return public_mALLOc(sz);
47 }
48 
49 static Void_t*
50 #if __STD_C
realloc_hook_ini(Void_t * ptr,size_t sz,const __malloc_ptr_t caller)51 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
52 #else
53 realloc_hook_ini(ptr, sz, caller)
54      Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
55 #endif
56 {
57   __malloc_hook = NULL;
58   __realloc_hook = NULL;
59   ptmalloc_init();
60   return public_rEALLOc(ptr, sz);
61 }
62 
63 static Void_t*
64 #if __STD_C
memalign_hook_ini(size_t alignment,size_t sz,const __malloc_ptr_t caller)65 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
66 #else
67 memalign_hook_ini(alignment, sz, caller)
68      size_t alignment; size_t sz; const __malloc_ptr_t caller;
69 #endif
70 {
71   __memalign_hook = NULL;
72   ptmalloc_init();
73   return public_mEMALIGn(alignment, sz);
74 }
75 
76 #endif /* !(USE_STARTER & 2) */
77 
78 static int check_action = DEFAULT_CHECK_ACTION;
79 
80 /* Whether we are using malloc checking.  */
81 static int using_malloc_checking;
82 
83 /* A flag that is set by malloc_set_state, to signal that malloc checking
84    must not be enabled on the request from the user (via the MALLOC_CHECK_
85    environment variable).  It is reset by __malloc_check_init to tell
86    malloc_set_state that the user has requested malloc checking.
87 
88    The purpose of this flag is to make sure that malloc checking is not
89    enabled when the heap to be restored was constructed without malloc
90    checking, and thus does not contain the required magic bytes.
91    Otherwise the heap would be corrupted by calls to free and realloc.  If
92    it turns out that the heap was created with malloc checking and the
93    user has requested it malloc_set_state just calls __malloc_check_init
94    again to enable it.  On the other hand, reusing such a heap without
95    further malloc checking is safe.  */
96 static int disallow_malloc_check;
97 
98 /* Activate a standard set of debugging hooks. */
99 void
__malloc_check_init()100 __malloc_check_init()
101 {
102   if (disallow_malloc_check) {
103     disallow_malloc_check = 0;
104     return;
105   }
106   using_malloc_checking = 1;
107   __malloc_hook = malloc_check;
108   __free_hook = free_check;
109   __realloc_hook = realloc_check;
110   __memalign_hook = memalign_check;
111   if(check_action & 1)
112     fprintf(stderr, "malloc: using debugging hooks\n");
113 }
114 
115 /* A simple, standard set of debugging hooks.  Overhead is `only' one
116    byte per chunk; still this will catch most cases of double frees or
117    overruns.  The goal here is to avoid obscure crashes due to invalid
118    usage, unlike in the MALLOC_DEBUG code. */
119 
120 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
121 
122 /* Instrument a chunk with overrun detector byte(s) and convert it
123    into a user pointer with requested size sz. */
124 
125 static Void_t*
126 internal_function
127 #if __STD_C
mem2mem_check(Void_t * ptr,size_t sz)128 mem2mem_check(Void_t *ptr, size_t sz)
129 #else
130 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
131 #endif
132 {
133   mchunkptr p;
134   unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
135   size_t i;
136 
137   if (!ptr)
138     return ptr;
139   p = mem2chunk(ptr);
140   for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
141       i > sz;
142       i -= 0xFF) {
143     if(i-sz < 0x100) {
144       m_ptr[i] = (unsigned char)(i-sz);
145       break;
146     }
147     m_ptr[i] = 0xFF;
148   }
149   m_ptr[sz] = MAGICBYTE(p);
150   return (Void_t*)m_ptr;
151 }
152 
153 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
154    pointer.  If the provided pointer is not valid, return NULL. */
155 
156 static mchunkptr
157 internal_function
158 #if __STD_C
mem2chunk_check(Void_t * mem)159 mem2chunk_check(Void_t* mem)
160 #else
161 mem2chunk_check(mem) Void_t* mem;
162 #endif
163 {
164   mchunkptr p;
165   INTERNAL_SIZE_T sz, c;
166   unsigned char magic;
167 
168   if(!aligned_OK(mem)) return NULL;
169   p = mem2chunk(mem);
170   if (!chunk_is_mmapped(p)) {
171     /* Must be a chunk in conventional heap memory. */
172     int contig = contiguous(&main_arena);
173     sz = chunksize(p);
174     if((contig &&
175 	((char*)p<mp_.sbrk_base ||
176 	 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
177        sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
178        ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
179                             (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
180                             next_chunk(prev_chunk(p))!=p) ))
181       return NULL;
182     magic = MAGICBYTE(p);
183     for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
184       if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
185     }
186     ((unsigned char*)p)[sz] ^= 0xFF;
187   } else {
188     unsigned long offset, page_mask = malloc_getpagesize-1;
189 
190     /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
191        alignment relative to the beginning of a page.  Check this
192        first. */
193     offset = (unsigned long)mem & page_mask;
194     if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
195         offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
196         offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
197         offset<0x2000) ||
198        !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
199        ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
200        ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
201       return NULL;
202     magic = MAGICBYTE(p);
203     for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
204       if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
205     }
206     ((unsigned char*)p)[sz] ^= 0xFF;
207   }
208   return p;
209 }
210 
211 /* Check for corruption of the top chunk, and try to recover if
212    necessary. */
213 
214 static int
215 internal_function
216 #if __STD_C
top_check(void)217 top_check(void)
218 #else
219 top_check()
220 #endif
221 {
222   mchunkptr t = top(&main_arena);
223   char* brk, * new_brk;
224   INTERNAL_SIZE_T front_misalign, sbrk_size;
225   unsigned long pagesz = malloc_getpagesize;
226 
227   if (t == initial_top(&main_arena) ||
228       (!chunk_is_mmapped(t) &&
229        chunksize(t)>=MINSIZE &&
230        prev_inuse(t) &&
231        (!contiguous(&main_arena) ||
232 	(char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
233     return 0;
234 
235   if(check_action & 1)
236     fprintf(stderr, "malloc: top chunk is corrupt\n");
237   if(check_action & 2)
238     abort();
239 
240   /* Try to set up a new top chunk. */
241   brk = MORECORE(0);
242   front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
243   if (front_misalign > 0)
244     front_misalign = MALLOC_ALIGNMENT - front_misalign;
245   sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
246   sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
247   new_brk = (char*)(MORECORE (sbrk_size));
248   if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
249   /* Call the `morecore' hook if necessary.  */
250   if (__after_morecore_hook)
251     (*__after_morecore_hook) ();
252   main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
253 
254   top(&main_arena) = (mchunkptr)(brk + front_misalign);
255   set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
256 
257   return 0;
258 }
259 
260 static Void_t*
261 #if __STD_C
malloc_check(size_t sz,const Void_t * caller)262 malloc_check(size_t sz, const Void_t *caller)
263 #else
264 malloc_check(sz, caller) size_t sz; const Void_t *caller;
265 #endif
266 {
267   Void_t *victim;
268 
269   (void)mutex_lock(&main_arena.mutex);
270   victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
271   (void)mutex_unlock(&main_arena.mutex);
272   return mem2mem_check(victim, sz);
273 }
274 
275 static void
276 #if __STD_C
free_check(Void_t * mem,const Void_t * caller)277 free_check(Void_t* mem, const Void_t *caller)
278 #else
279 free_check(mem, caller) Void_t* mem; const Void_t *caller;
280 #endif
281 {
282   mchunkptr p;
283 
284   if(!mem) return;
285   (void)mutex_lock(&main_arena.mutex);
286   p = mem2chunk_check(mem);
287   if(!p) {
288     (void)mutex_unlock(&main_arena.mutex);
289     if(check_action & 1)
290       fprintf(stderr, "free(): invalid pointer %p!\n", mem);
291     if(check_action & 2)
292       abort();
293     return;
294   }
295 #if HAVE_MMAP
296   if (chunk_is_mmapped(p)) {
297     (void)mutex_unlock(&main_arena.mutex);
298     munmap_chunk(p);
299     return;
300   }
301 #endif
302 #if 0 /* Erase freed memory. */
303   memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
304 #endif
305   _int_free(&main_arena, mem);
306   (void)mutex_unlock(&main_arena.mutex);
307 }
308 
309 static Void_t*
310 #if __STD_C
realloc_check(Void_t * oldmem,size_t bytes,const Void_t * caller)311 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
312 #else
313 realloc_check(oldmem, bytes, caller)
314      Void_t* oldmem; size_t bytes; const Void_t *caller;
315 #endif
316 {
317   mchunkptr oldp;
318   INTERNAL_SIZE_T nb, oldsize;
319   Void_t* newmem = 0;
320 
321   if (oldmem == 0) return malloc_check(bytes, NULL);
322   (void)mutex_lock(&main_arena.mutex);
323   oldp = mem2chunk_check(oldmem);
324   (void)mutex_unlock(&main_arena.mutex);
325   if(!oldp) {
326     if(check_action & 1)
327       fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
328     if(check_action & 2)
329       abort();
330     return malloc_check(bytes, NULL);
331   }
332   oldsize = chunksize(oldp);
333 
334   checked_request2size(bytes+1, nb);
335   (void)mutex_lock(&main_arena.mutex);
336 
337 #if HAVE_MMAP
338   if (chunk_is_mmapped(oldp)) {
339 #if HAVE_MREMAP
340     mchunkptr newp = mremap_chunk(oldp, nb);
341     if(newp)
342       newmem = chunk2mem(newp);
343     else
344 #endif
345     {
346       /* Note the extra SIZE_SZ overhead. */
347       if(oldsize - SIZE_SZ >= nb)
348 	newmem = oldmem; /* do nothing */
349       else {
350         /* Must alloc, copy, free. */
351         if (top_check() >= 0)
352 	  newmem = _int_malloc(&main_arena, bytes+1);
353         if (newmem) {
354           MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
355           munmap_chunk(oldp);
356         }
357       }
358     }
359   } else {
360 #endif /* HAVE_MMAP */
361     if (top_check() >= 0)
362       newmem = _int_realloc(&main_arena, oldmem, bytes+1);
363 #if 0 /* Erase freed memory. */
364     if(newmem)
365       newp = mem2chunk(newmem);
366     nb = chunksize(newp);
367     if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
368       memset((char*)oldmem + 2*sizeof(mbinptr), 0,
369              oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
370     } else if(nb > oldsize+SIZE_SZ) {
371       memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
372 	     0, nb - (oldsize+SIZE_SZ));
373     }
374 #endif
375 #if HAVE_MMAP
376   }
377 #endif
378   (void)mutex_unlock(&main_arena.mutex);
379 
380   return mem2mem_check(newmem, bytes);
381 }
382 
383 static Void_t*
384 #if __STD_C
memalign_check(size_t alignment,size_t bytes,const Void_t * caller)385 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
386 #else
387 memalign_check(alignment, bytes, caller)
388      size_t alignment; size_t bytes; const Void_t *caller;
389 #endif
390 {
391   INTERNAL_SIZE_T nb;
392   Void_t* mem;
393 
394   if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
395   if (alignment <  MINSIZE) alignment = MINSIZE;
396 
397   checked_request2size(bytes+1, nb);
398   (void)mutex_lock(&main_arena.mutex);
399   mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
400     NULL;
401   (void)mutex_unlock(&main_arena.mutex);
402   return mem2mem_check(mem, bytes);
403 }
404 
405 #if !defined NO_THREADS && USE_STARTER
406 
407 /* The following hooks are used when the global initialization in
408    ptmalloc_init() hasn't completed yet. */
409 
410 static Void_t*
411 #if __STD_C
malloc_starter(size_t sz,const Void_t * caller)412 malloc_starter(size_t sz, const Void_t *caller)
413 #else
414 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
415 #endif
416 {
417   Void_t* victim;
418 
419   ptmalloc_init_minimal();
420   victim = _int_malloc(&main_arena, sz);
421 
422   return victim ? BOUNDED_N(victim, sz) : 0;
423 }
424 
425 static Void_t*
426 #if __STD_C
memalign_starter(size_t align,size_t sz,const Void_t * caller)427 memalign_starter(size_t align, size_t sz, const Void_t *caller)
428 #else
429 memalign_starter(align, sz, caller) size_t align, sz; const Void_t *caller;
430 #endif
431 {
432   Void_t* victim;
433 
434   ptmalloc_init_minimal();
435   victim = _int_memalign(&main_arena, align, sz);
436 
437   return victim ? BOUNDED_N(victim, sz) : 0;
438 }
439 
440 static void
441 #if __STD_C
free_starter(Void_t * mem,const Void_t * caller)442 free_starter(Void_t* mem, const Void_t *caller)
443 #else
444 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
445 #endif
446 {
447   mchunkptr p;
448 
449   if(!mem) return;
450   p = mem2chunk(mem);
451 #if HAVE_MMAP
452   if (chunk_is_mmapped(p)) {
453     munmap_chunk(p);
454     return;
455   }
456 #endif
457   _int_free(&main_arena, mem);
458 }
459 
460 #endif /* !defined NO_THREADS && USE_STARTER */
461 
462 
463 /* Get/set state: malloc_get_state() records the current state of all
464    malloc variables (_except_ for the actual heap contents and `hook'
465    function pointers) in a system dependent, opaque data structure.
466    This data structure is dynamically allocated and can be free()d
467    after use.  malloc_set_state() restores the state of all malloc
468    variables to the previously obtained state.  This is especially
469    useful when using this malloc as part of a shared library, and when
470    the heap contents are saved/restored via some other method.  The
471    primary example for this is GNU Emacs with its `dumping' procedure.
472    `Hook' function pointers are never saved or restored by these
473    functions, with two exceptions: If malloc checking was in use when
474    malloc_get_state() was called, then malloc_set_state() calls
475    __malloc_check_init() if possible; if malloc checking was not in
476    use in the recorded state but the user requested malloc checking,
477    then the hooks are reset to 0.  */
478 
479 #define MALLOC_STATE_MAGIC   0x444c4541l
480 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
481 
482 struct malloc_save_state {
483   long          magic;
484   long          version;
485   mbinptr       av[NBINS * 2 + 2];
486   char*         sbrk_base;
487   int           sbrked_mem_bytes;
488   unsigned long trim_threshold;
489   unsigned long top_pad;
490   unsigned int  n_mmaps_max;
491   unsigned long mmap_threshold;
492   int           check_action;
493   unsigned long max_sbrked_mem;
494   unsigned long max_total_mem;
495   unsigned int  n_mmaps;
496   unsigned int  max_n_mmaps;
497   unsigned long mmapped_mem;
498   unsigned long max_mmapped_mem;
499   int           using_malloc_checking;
500 };
501 
502 Void_t*
public_gET_STATe(void)503 public_gET_STATe(void)
504 {
505   struct malloc_save_state* ms;
506   int i;
507   mbinptr b;
508 
509   ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
510   if (!ms)
511     return 0;
512   (void)mutex_lock(&main_arena.mutex);
513   malloc_consolidate(&main_arena);
514   ms->magic = MALLOC_STATE_MAGIC;
515   ms->version = MALLOC_STATE_VERSION;
516   ms->av[0] = 0;
517   ms->av[1] = 0; /* used to be binblocks, now no longer used */
518   ms->av[2] = top(&main_arena);
519   ms->av[3] = 0; /* used to be undefined */
520   for(i=1; i<NBINS; i++) {
521     b = bin_at(&main_arena, i);
522     if(first(b) == b)
523       ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
524     else {
525       ms->av[2*i+2] = first(b);
526       ms->av[2*i+3] = last(b);
527     }
528   }
529   ms->sbrk_base = mp_.sbrk_base;
530   ms->sbrked_mem_bytes = main_arena.system_mem;
531   ms->trim_threshold = mp_.trim_threshold;
532   ms->top_pad = mp_.top_pad;
533   ms->n_mmaps_max = mp_.n_mmaps_max;
534   ms->mmap_threshold = mp_.mmap_threshold;
535   ms->check_action = check_action;
536   ms->max_sbrked_mem = main_arena.max_system_mem;
537 #ifdef NO_THREADS
538   ms->max_total_mem = mp_.max_total_mem;
539 #else
540   ms->max_total_mem = 0;
541 #endif
542   ms->n_mmaps = mp_.n_mmaps;
543   ms->max_n_mmaps = mp_.max_n_mmaps;
544   ms->mmapped_mem = mp_.mmapped_mem;
545   ms->max_mmapped_mem = mp_.max_mmapped_mem;
546   ms->using_malloc_checking = using_malloc_checking;
547   (void)mutex_unlock(&main_arena.mutex);
548   return (Void_t*)ms;
549 }
550 
551 int
public_sET_STATe(Void_t * msptr)552 public_sET_STATe(Void_t* msptr)
553 {
554   struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
555   int i;
556   mbinptr b;
557 
558   disallow_malloc_check = 1;
559   ptmalloc_init();
560   if(ms->magic != MALLOC_STATE_MAGIC) return -1;
561   /* Must fail if the major version is too high. */
562   if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
563   (void)mutex_lock(&main_arena.mutex);
564   /* There are no fastchunks.  */
565   clear_fastchunks(&main_arena);
566   set_max_fast(&main_arena, DEFAULT_MXFAST);
567   for (i=0; i<NFASTBINS; ++i)
568     main_arena.fastbins[i] = 0;
569   for (i=0; i<BINMAPSIZE; ++i)
570     main_arena.binmap[i] = 0;
571   top(&main_arena) = ms->av[2];
572   main_arena.last_remainder = 0;
573   for(i=1; i<NBINS; i++) {
574     b = bin_at(&main_arena, i);
575     if(ms->av[2*i+2] == 0) {
576       assert(ms->av[2*i+3] == 0);
577       first(b) = last(b) = b;
578     } else {
579       if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
580 			  largebin_index(chunksize(ms->av[2*i+3]))==i)) {
581 	first(b) = ms->av[2*i+2];
582 	last(b) = ms->av[2*i+3];
583 	/* Make sure the links to the bins within the heap are correct.  */
584 	first(b)->bk = b;
585 	last(b)->fd = b;
586 	/* Set bit in binblocks.  */
587 	mark_bin(&main_arena, i);
588       } else {
589 	/* Oops, index computation from chunksize must have changed.
590            Link the whole list into unsorted_chunks.  */
591 	first(b) = last(b) = b;
592 	b = unsorted_chunks(&main_arena);
593 	ms->av[2*i+2]->bk = b;
594 	ms->av[2*i+3]->fd = b->fd;
595 	b->fd->bk = ms->av[2*i+3];
596 	b->fd = ms->av[2*i+2];
597       }
598     }
599   }
600   mp_.sbrk_base = ms->sbrk_base;
601   main_arena.system_mem = ms->sbrked_mem_bytes;
602   mp_.trim_threshold = ms->trim_threshold;
603   mp_.top_pad = ms->top_pad;
604   mp_.n_mmaps_max = ms->n_mmaps_max;
605   mp_.mmap_threshold = ms->mmap_threshold;
606   check_action = ms->check_action;
607   main_arena.max_system_mem = ms->max_sbrked_mem;
608 #ifdef NO_THREADS
609   mp_.max_total_mem = ms->max_total_mem;
610 #endif
611   mp_.n_mmaps = ms->n_mmaps;
612   mp_.max_n_mmaps = ms->max_n_mmaps;
613   mp_.mmapped_mem = ms->mmapped_mem;
614   mp_.max_mmapped_mem = ms->max_mmapped_mem;
615   /* add version-dependent code here */
616   if (ms->version >= 1) {
617     /* Check whether it is safe to enable malloc checking, or whether
618        it is necessary to disable it.  */
619     if (ms->using_malloc_checking && !using_malloc_checking &&
620         !disallow_malloc_check)
621       __malloc_check_init ();
622     else if (!ms->using_malloc_checking && using_malloc_checking) {
623       __malloc_hook = 0;
624       __free_hook = 0;
625       __realloc_hook = 0;
626       __memalign_hook = 0;
627       using_malloc_checking = 0;
628     }
629   }
630   check_malloc_state(&main_arena);
631 
632   (void)mutex_unlock(&main_arena.mutex);
633   return 0;
634 }
635 
636 /*
637  * Local variables:
638  * c-basic-offset: 2
639  * End:
640  */
641