1 
2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*---                                         mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of MemCheck, a heavyweight Valgrind tool for
9    detecting memory errors.
10 
11    Copyright (C) 2000-2017 Julian Seward
12       jseward@acm.org
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27    02111-1307, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 */
31 
32 #include "pub_tool_basics.h"
33 #include "pub_tool_execontext.h"
34 #include "pub_tool_poolalloc.h"
35 #include "pub_tool_hashtable.h"
36 #include "pub_tool_libcbase.h"
37 #include "pub_tool_libcassert.h"
38 #include "pub_tool_libcprint.h"
39 #include "pub_tool_libcproc.h"
40 #include "pub_tool_mallocfree.h"
41 #include "pub_tool_options.h"
42 #include "pub_tool_replacemalloc.h"
43 #include "pub_tool_threadstate.h"
44 #include "pub_tool_tooliface.h"     // Needed for mc_include.h
45 #include "pub_tool_stacktrace.h"    // For VG_(get_and_pp_StackTrace)
46 #include "pub_tool_xarray.h"
47 #include "pub_tool_xtree.h"
48 #include "pub_tool_xtmemory.h"
49 
50 #include "mc_include.h"
51 
52 /*------------------------------------------------------------*/
53 /*--- Defns                                                ---*/
54 /*------------------------------------------------------------*/
55 
56 /* Stats ... */
57 static SizeT cmalloc_n_mallocs  = 0;
58 static SizeT cmalloc_n_frees    = 0;
59 static ULong cmalloc_bs_mallocd = 0;
60 
61 /* For debug printing to do with mempools: what stack trace
62    depth to show. */
63 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
64 
65 
66 /*------------------------------------------------------------*/
67 /*--- Tracking malloc'd and free'd blocks                  ---*/
68 /*------------------------------------------------------------*/
69 
70 SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
71 
72 /* Record malloc'd blocks. */
73 VgHashTable *MC_(malloc_list) = NULL;
74 
75 /* Memory pools: a hash table of MC_Mempools.  Search key is
76    MC_Mempool::pool. */
77 VgHashTable *MC_(mempool_list) = NULL;
78 
79 /* Pool allocator for MC_Chunk. */
80 PoolAlloc *MC_(chunk_poolalloc) = NULL;
81 static
82 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
83                             MC_AllocKind kind);
84 static inline
85 void delete_MC_Chunk (MC_Chunk* mc);
86 
87 /* Records blocks after freeing. */
88 /* Blocks freed by the client are queued in one of two lists of
89    freed blocks not yet physically freed:
90    "big blocks" freed list.
91    "small blocks" freed list
92    The blocks with a size >= MC_(clo_freelist_big_blocks)
93    are linked in the big blocks freed list.
94    This allows a client to allocate and free big blocks
95    (e.g. bigger than VG_(clo_freelist_vol)) without losing
96    immediately all protection against dangling pointers.
97    position [0] is for big blocks, [1] is for small blocks. */
98 static MC_Chunk* freed_list_start[2]  = {NULL, NULL};
99 static MC_Chunk* freed_list_end[2]    = {NULL, NULL};
100 
101 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
102    some of the oldest blocks in the queue at the same time. */
add_to_freed_queue(MC_Chunk * mc)103 static void add_to_freed_queue ( MC_Chunk* mc )
104 {
105    const Bool show = False;
106    const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
107 
108    /* Put it at the end of the freed list, unless the block
109       would be directly released any way : in this case, we
110       put it at the head of the freed list. */
111    if (freed_list_end[l] == NULL) {
112       tl_assert(freed_list_start[l] == NULL);
113       mc->next = NULL;
114       freed_list_end[l]    = freed_list_start[l] = mc;
115    } else {
116       tl_assert(freed_list_end[l]->next == NULL);
117       if (mc->szB >= MC_(clo_freelist_vol)) {
118          mc->next = freed_list_start[l];
119          freed_list_start[l] = mc;
120       } else {
121          mc->next = NULL;
122          freed_list_end[l]->next = mc;
123          freed_list_end[l]       = mc;
124       }
125    }
126    VG_(free_queue_volume) += (Long)mc->szB;
127    if (show)
128       VG_(printf)("mc_freelist: acquire: volume now %lld\n",
129                   VG_(free_queue_volume));
130    VG_(free_queue_length)++;
131 }
132 
133 /* Release enough of the oldest blocks to bring the free queue
134    volume below vg_clo_freelist_vol.
135    Start with big block list first.
136    On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
137    On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
release_oldest_block(void)138 static void release_oldest_block(void)
139 {
140    const Bool show = False;
141    int i;
142    tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
143    tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
144 
145    for (i = 0; i < 2; i++) {
146       while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
147              && freed_list_start[i] != NULL) {
148          MC_Chunk* mc1;
149 
150          tl_assert(freed_list_end[i] != NULL);
151 
152          mc1 = freed_list_start[i];
153          VG_(free_queue_volume) -= (Long)mc1->szB;
154          VG_(free_queue_length)--;
155          if (show)
156             VG_(printf)("mc_freelist: discard: volume now %lld\n",
157                         VG_(free_queue_volume));
158          tl_assert(VG_(free_queue_volume) >= 0);
159 
160          if (freed_list_start[i] == freed_list_end[i]) {
161             freed_list_start[i] = freed_list_end[i] = NULL;
162          } else {
163             freed_list_start[i] = mc1->next;
164          }
165          mc1->next = NULL; /* just paranoia */
166 
167          /* free MC_Chunk */
168          if (MC_AllocCustom != mc1->allockind)
169             VG_(cli_free) ( (void*)(mc1->data) );
170          delete_MC_Chunk ( mc1 );
171       }
172    }
173 }
174 
MC_(get_freed_block_bracketting)175 MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
176 {
177    int i;
178    for (i = 0; i < 2; i++) {
179       MC_Chunk*  mc;
180       mc = freed_list_start[i];
181       while (mc) {
182          if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
183                                     MC_(Malloc_Redzone_SzB) ))
184             return mc;
185          mc = mc->next;
186       }
187    }
188    return NULL;
189 }
190 
191 /* Allocate a shadow chunk, put it on the appropriate list.
192    If needed, release oldest blocks from freed list. */
193 static
create_MC_Chunk(ThreadId tid,Addr p,SizeT szB,MC_AllocKind kind)194 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
195                             MC_AllocKind kind)
196 {
197    MC_Chunk* mc  = VG_(allocEltPA)(MC_(chunk_poolalloc));
198    mc->data      = p;
199    mc->szB       = szB;
200    mc->allockind = kind;
201    switch ( MC_(n_where_pointers)() ) {
202       case 2: mc->where[1] = 0; // fallback to 1
203       case 1: mc->where[0] = 0; // fallback to 0
204       case 0: break;
205       default: tl_assert(0);
206    }
207    MC_(set_allocated_at) (tid, mc);
208 
209    /* Each time a new MC_Chunk is created, release oldest blocks
210       if the free list volume is exceeded. */
211    if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
212       release_oldest_block();
213 
214    /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
215       the mc->data field isn't visible to the leak checker.  If memory
216       management is working correctly, any pointer returned by VG_(malloc)
217       should be noaccess as far as the client is concerned. */
218    if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
219       VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
220    }
221    return mc;
222 }
223 
224 static inline
delete_MC_Chunk(MC_Chunk * mc)225 void delete_MC_Chunk (MC_Chunk* mc)
226 {
227    VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
228 }
229 
230 // True if mc is in the given block list.
in_block_list(const VgHashTable * block_list,MC_Chunk * mc)231 static Bool in_block_list (const VgHashTable *block_list, MC_Chunk* mc)
232 {
233    MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
234    if (found_mc) {
235       tl_assert (found_mc->data == mc->data);
236       /* If a user builds a pool from a malloc-ed superblock
237          and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
238          an address at the beginning of this superblock, then
239          this address will be twice in the block_list.
240          We handle this case by checking size and allockind.
241          Note: I suspect that having the same block
242          twice in MC_(malloc_list) is a recipe for bugs.
243          We might maybe better create a "standard" mempool to
244          handle all this more cleanly. */
245       if (found_mc->szB != mc->szB
246           || found_mc->allockind != mc->allockind)
247          return False;
248       tl_assert (found_mc == mc);
249       return True;
250    } else
251       return False;
252 }
253 
254 // True if mc is a live block (not yet freed).
live_block(MC_Chunk * mc)255 static Bool live_block (MC_Chunk* mc)
256 {
257    if (mc->allockind == MC_AllocCustom) {
258       MC_Mempool* mp;
259       VG_(HT_ResetIter)(MC_(mempool_list));
260       while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
261          if ( in_block_list (mp->chunks, mc) )
262             return True;
263       }
264    }
265    /* Note: we fallback here for a not found MC_AllocCustom
266       as such a block can be inserted in MC_(malloc_list)
267       by VALGRIND_MALLOCLIKE_BLOCK. */
268    return in_block_list ( MC_(malloc_list), mc );
269 }
270 
MC_(allocated_at)271 ExeContext* MC_(allocated_at) (MC_Chunk* mc)
272 {
273    switch (MC_(clo_keep_stacktraces)) {
274       case KS_none:            return VG_(null_ExeContext) ();
275       case KS_alloc:           return mc->where[0];
276       case KS_free:            return VG_(null_ExeContext) ();
277       case KS_alloc_then_free: return (live_block(mc) ?
278                                        mc->where[0] : VG_(null_ExeContext) ());
279       case KS_alloc_and_free:  return mc->where[0];
280       default: tl_assert (0);
281    }
282 }
283 
MC_(freed_at)284 ExeContext* MC_(freed_at) (MC_Chunk* mc)
285 {
286    switch (MC_(clo_keep_stacktraces)) {
287       case KS_none:            return VG_(null_ExeContext) ();
288       case KS_alloc:           return VG_(null_ExeContext) ();
289       case KS_free:            return (mc->where[0] ?
290                                        mc->where[0] : VG_(null_ExeContext) ());
291       case KS_alloc_then_free: return (live_block(mc) ?
292                                        VG_(null_ExeContext) () : mc->where[0]);
293       case KS_alloc_and_free:  return (mc->where[1] ?
294                                        mc->where[1] : VG_(null_ExeContext) ());
295       default: tl_assert (0);
296    }
297 }
298 
MC_(set_allocated_at)299 void  MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
300 {
301    switch (MC_(clo_keep_stacktraces)) {
302       case KS_none:            return;
303       case KS_alloc:           break;
304       case KS_free:            return;
305       case KS_alloc_then_free: break;
306       case KS_alloc_and_free:  break;
307       default: tl_assert (0);
308    }
309    mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
310    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
311        VG_(XTMemory_Full_alloc)(mc->szB, mc->where[0]);
312 }
313 
MC_(set_freed_at)314 void  MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
315 {
316    Int pos;
317    ExeContext* ec_free;
318 
319    switch (MC_(clo_keep_stacktraces)) {
320       case KS_none:            return;
321       case KS_alloc:
322                                if (LIKELY(VG_(clo_xtree_memory)
323                                           != Vg_XTMemory_Full))
324                                   return;
325                                pos = -1; break;
326       case KS_free:            pos = 0; break;
327       case KS_alloc_then_free: pos = 0; break;
328       case KS_alloc_and_free:  pos = 1; break;
329       default: tl_assert (0);
330    }
331    /* We need the execontext for the free operation, either to store
332       it in the mc chunk and/or for full xtree memory profiling.
333       Note: we are guaranteed to find the ec_alloc in mc->where[0], as
334       mc_post_clo_init verifies the consistency of --xtree-memory and
335       --keep-stacktraces. */
336    ec_free = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
337    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
338        VG_(XTMemory_Full_free)(mc->szB, mc->where[0], ec_free);
339    if (LIKELY(pos >= 0))
340       mc->where[pos] = ec_free;
341 }
342 
MC_(n_where_pointers)343 UInt MC_(n_where_pointers) (void)
344 {
345    switch (MC_(clo_keep_stacktraces)) {
346       case KS_none:            return 0;
347       case KS_alloc:
348       case KS_free:
349       case KS_alloc_then_free: return 1;
350       case KS_alloc_and_free:  return 2;
351       default: tl_assert (0);
352    }
353 }
354 
355 /*------------------------------------------------------------*/
356 /*--- client_malloc(), etc                                 ---*/
357 /*------------------------------------------------------------*/
358 
359 /* Allocate memory and note change in memory available */
MC_(new_block)360 void* MC_(new_block) ( ThreadId tid,
361                        Addr p, SizeT szB, SizeT alignB,
362                        Bool is_zeroed, MC_AllocKind kind,
363                        VgHashTable *table)
364 {
365    MC_Chunk* mc;
366 
367    // Allocate and zero if necessary
368    if (p) {
369       tl_assert(MC_AllocCustom == kind);
370    } else {
371       tl_assert(MC_AllocCustom != kind);
372       p = (Addr)VG_(cli_malloc)( alignB, szB );
373       if (!p) {
374          return NULL;
375       }
376       if (is_zeroed) {
377          VG_(memset)((void*)p, 0, szB);
378       } else
379       if (MC_(clo_malloc_fill) != -1) {
380          tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
381          VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
382       }
383    }
384 
385    // Only update stats if allocation succeeded.
386    cmalloc_n_mallocs ++;
387    cmalloc_bs_mallocd += (ULong)szB;
388    mc = create_MC_Chunk (tid, p, szB, kind);
389    VG_(HT_add_node)( table, mc );
390 
391    if (is_zeroed)
392       MC_(make_mem_defined)( p, szB );
393    else {
394       UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
395       tl_assert(VG_(is_plausible_ECU)(ecu));
396       MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
397    }
398 
399    return (void*)p;
400 }
401 
MC_(malloc)402 void* MC_(malloc) ( ThreadId tid, SizeT n )
403 {
404    if (MC_(record_fishy_value_error)(tid, "malloc", "size", n)) {
405       return NULL;
406    } else {
407       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
408          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
409    }
410 }
411 
MC_(__builtin_new)412 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
413 {
414    if (MC_(record_fishy_value_error)(tid, "__builtin_new", "size", n)) {
415       return NULL;
416    } else {
417       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
418          /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
419    }
420 }
421 
MC_(__builtin_vec_new)422 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
423 {
424    if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new", "size", n)) {
425       return NULL;
426    } else {
427       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
428          /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
429    }
430 }
431 
MC_(memalign)432 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
433 {
434    if (MC_(record_fishy_value_error)(tid, "memalign", "size", n)) {
435       return NULL;
436    } else {
437       return MC_(new_block) ( tid, 0, n, alignB,
438          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
439    }
440 }
441 
MC_(calloc)442 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
443 {
444    if (MC_(record_fishy_value_error)(tid, "calloc", "nmemb", nmemb) ||
445        MC_(record_fishy_value_error)(tid, "calloc", "size", size1)) {
446       return NULL;
447    } else {
448       return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
449          /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
450    }
451 }
452 
453 static
die_and_free_mem(ThreadId tid,MC_Chunk * mc,SizeT rzB)454 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
455 {
456    /* Note: we do not free fill the custom allocs produced
457       by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
458    if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
459       tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
460       VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
461    }
462 
463    /* Note: make redzones noaccess again -- just in case user made them
464       accessible with a client request... */
465    MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
466 
467    /* Record where freed */
468    MC_(set_freed_at) (tid, mc);
469    /* Put it out of harm's way for a while */
470    add_to_freed_queue ( mc );
471    /* If the free list volume is bigger than MC_(clo_freelist_vol),
472       we wait till the next block allocation to release blocks.
473       This increase the chance to discover dangling pointer usage,
474       even for big blocks being freed by the client. */
475 }
476 
477 
478 static
record_freemismatch_error(ThreadId tid,MC_Chunk * mc)479 void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
480 {
481    /* Only show such an error if the user hasn't disabled doing so. */
482    if (!MC_(clo_show_mismatched_frees))
483       return;
484 
485    /* MC_(record_freemismatch_error) reports errors for still
486       allocated blocks but we are in the middle of freeing it.  To
487       report the error correctly, we re-insert the chunk (making it
488       again a "clean allocated block", report the error, and then
489       re-remove the chunk.  This avoids to do a VG_(HT_lookup)
490       followed by a VG_(HT_remove) in all "non-erroneous cases". */
491    VG_(HT_add_node)( MC_(malloc_list), mc );
492    MC_(record_freemismatch_error) ( tid, mc );
493    if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
494       tl_assert(0);
495 }
496 
MC_(handle_free)497 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
498 {
499    MC_Chunk* mc;
500 
501    cmalloc_n_frees++;
502 
503    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
504    if (mc == NULL) {
505       MC_(record_free_error) ( tid, p );
506    } else {
507       /* check if it is a matching free() / delete / delete [] */
508       if (kind != mc->allockind) {
509          tl_assert(p == mc->data);
510          record_freemismatch_error ( tid, mc );
511       }
512       die_and_free_mem ( tid, mc, rzB );
513    }
514 }
515 
MC_(free)516 void MC_(free) ( ThreadId tid, void* p )
517 {
518    MC_(handle_free)(
519       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
520 }
521 
MC_(__builtin_delete)522 void MC_(__builtin_delete) ( ThreadId tid, void* p )
523 {
524    MC_(handle_free)(
525       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
526 }
527 
MC_(__builtin_vec_delete)528 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
529 {
530    MC_(handle_free)(
531       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
532 }
533 
MC_(realloc)534 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
535 {
536    MC_Chunk* old_mc;
537    MC_Chunk* new_mc;
538    Addr      a_new;
539    SizeT     old_szB;
540 
541    if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
542       return NULL;
543 
544    cmalloc_n_frees ++;
545    cmalloc_n_mallocs ++;
546    cmalloc_bs_mallocd += (ULong)new_szB;
547 
548    /* Remove the old block */
549    old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
550    if (old_mc == NULL) {
551       MC_(record_free_error) ( tid, (Addr)p_old );
552       /* We return to the program regardless. */
553       return NULL;
554    }
555 
556    /* check if its a matching free() / delete / delete [] */
557    if (MC_AllocMalloc != old_mc->allockind) {
558       /* can not realloc a range that was allocated with new or new [] */
559       tl_assert((Addr)p_old == old_mc->data);
560       record_freemismatch_error ( tid, old_mc );
561       /* but keep going anyway */
562    }
563 
564    old_szB = old_mc->szB;
565 
566    /* Get new memory */
567    a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
568 
569    if (a_new) {
570       /* In all cases, even when the new size is smaller or unchanged, we
571          reallocate and copy the contents, and make the old block
572          inaccessible.  This is so as to guarantee to catch all cases of
573          accesses via the old address after reallocation, regardless of
574          the change in size.  (Of course the ability to detect accesses
575          to the old block also depends on the size of the freed blocks
576          queue). */
577 
578       // Allocate a new chunk.
579       new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
580 
581       // Now insert the new mc (with a new 'data' field) into malloc_list.
582       VG_(HT_add_node)( MC_(malloc_list), new_mc );
583 
584       /* Retained part is copied, red zones set as normal */
585 
586       /* Redzone at the front */
587       MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
588                               MC_(Malloc_Redzone_SzB) );
589 
590       /* payload */
591       if (old_szB >= new_szB) {
592          /* new size is smaller or the same */
593 
594          /* Copy address range state and value from old to new */
595          MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
596          VG_(memcpy)((void*)a_new, p_old, new_szB);
597       } else {
598          /* new size is bigger */
599          UInt        ecu;
600 
601          /* Copy address range state and value from old to new */
602          MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
603          VG_(memcpy)((void*)a_new, p_old, old_szB);
604 
605          // If the block has grown, we mark the grown area as undefined.
606          // We have to do that after VG_(HT_add_node) to ensure the ecu
607          // execontext is for a fully allocated block.
608          ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
609          tl_assert(VG_(is_plausible_ECU)(ecu));
610          MC_(make_mem_undefined_w_otag)( a_new+old_szB,
611                                          new_szB-old_szB,
612                                          ecu | MC_OKIND_HEAP );
613 
614          /* Possibly fill new area with specified junk */
615          if (MC_(clo_malloc_fill) != -1) {
616             tl_assert(MC_(clo_malloc_fill) >= 0x00
617                       && MC_(clo_malloc_fill) <= 0xFF);
618             VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
619                                                 new_szB-old_szB);
620          }
621       }
622 
623       /* Redzone at the back. */
624       MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
625 
626       /* Possibly fill freed area with specified junk. */
627       if (MC_(clo_free_fill) != -1) {
628          tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
629          VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
630       }
631 
632       /* Free old memory */
633       /* Nb: we have to allocate a new MC_Chunk for the new memory rather
634          than recycling the old one, so that any erroneous accesses to the
635          old memory are reported. */
636       die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
637 
638    } else {
639       /* Could not allocate new client memory.
640          Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
641          unconditionally removed at the beginning of the function. */
642       VG_(HT_add_node)( MC_(malloc_list), old_mc );
643    }
644 
645    return (void*)a_new;
646 }
647 
MC_(malloc_usable_size)648 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
649 {
650    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
651 
652    // There may be slop, but pretend there isn't because only the asked-for
653    // area will be marked as addressable.
654    return ( mc ? mc->szB : 0 );
655 }
656 
657 /* This handles the in place resize of a block, as performed by the
658    VALGRIND_RESIZEINPLACE_BLOCK client request.  It is unrelated to,
659    and not used for, handling of the normal libc realloc()
660    function. */
MC_(handle_resizeInPlace)661 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
662                                SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
663 {
664    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
665    if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
666       /* Reject if: p is not found, or oldSizeB is wrong,
667          or new block would be empty. */
668       MC_(record_free_error) ( tid, p );
669       return;
670    }
671 
672    if (oldSizeB == newSizeB)
673       return;
674 
675    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
676        VG_(XTMemory_Full_resize_in_place)(oldSizeB,  newSizeB, mc->where[0]);
677 
678    mc->szB = newSizeB;
679    if (newSizeB < oldSizeB) {
680       MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
681    } else {
682       ExeContext* ec  = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
683       UInt        ecu = VG_(get_ECU_from_ExeContext)(ec);
684       MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
685                                       ecu | MC_OKIND_HEAP );
686       if (rzB > 0)
687          MC_(make_mem_noaccess)( p + newSizeB, rzB );
688    }
689 }
690 
691 
692 /*------------------------------------------------------------*/
693 /*--- Memory pool stuff.                                   ---*/
694 /*------------------------------------------------------------*/
695 
696 /* Set to 1 for intensive sanity checking.  Is very expensive though
697    and should not be used in production scenarios.  See #255966. */
698 #define MP_DETAILED_SANITY_CHECKS 0
699 
700 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
701 
free_mallocs_in_mempool_block(MC_Mempool * mp,Addr StartAddr,Addr EndAddr)702 static void free_mallocs_in_mempool_block (MC_Mempool* mp,
703                                            Addr StartAddr,
704                                            Addr EndAddr)
705 {
706    MC_Chunk *mc;
707    ThreadId tid;
708 
709    tl_assert(mp->auto_free);
710 
711    if (VG_(clo_verbosity) > 2) {
712       VG_(message)(Vg_UserMsg,
713           "free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
714           StartAddr, (SizeT) (EndAddr - StartAddr));
715    }
716 
717    tid = VG_(get_running_tid)();
718 
719    VG_(HT_ResetIter)(MC_(malloc_list));
720    while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
721       if (mc->data >= StartAddr && mc->data + mc->szB <= EndAddr) {
722 	 if (VG_(clo_verbosity) > 2) {
723 	    VG_(message)(Vg_UserMsg, "Auto-free of 0x%lx size=%lu\n",
724 			    mc->data, (mc->szB + 0UL));
725 	 }
726 
727 	 VG_(HT_remove_at_Iter)(MC_(malloc_list));
728 	 die_and_free_mem(tid, mc, mp->rzB);
729       }
730    }
731 }
732 
MC_(create_mempool)733 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed,
734                          Bool auto_free, Bool metapool)
735 {
736    MC_Mempool* mp;
737 
738    if (VG_(clo_verbosity) > 2 || (auto_free && !metapool)) {
739       VG_(message)(Vg_UserMsg,
740                    "create_mempool(0x%lx, rzB=%u, zeroed=%d,"
741                    " autofree=%d, metapool=%d)\n",
742                    pool, rzB, is_zeroed,
743                    auto_free, metapool);
744       VG_(get_and_pp_StackTrace)
745          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
746       if (auto_free && !metapool)
747          VG_(tool_panic)("Inappropriate use of mempool:"
748                          " an auto free pool must be a meta pool. Aborting\n");
749    }
750 
751    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
752    if (mp != NULL) {
753      VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
754    }
755 
756    mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
757    mp->pool       = pool;
758    mp->rzB        = rzB;
759    mp->is_zeroed  = is_zeroed;
760    mp->auto_free  = auto_free;
761    mp->metapool   = metapool;
762    mp->chunks     = VG_(HT_construct)( "MC_(create_mempool)" );
763    check_mempool_sane(mp);
764 
765    /* Paranoia ... ensure this area is off-limits to the client, so
766       the mp->data field isn't visible to the leak checker.  If memory
767       management is working correctly, anything pointer returned by
768       VG_(malloc) should be noaccess as far as the client is
769       concerned. */
770    if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
771       VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
772    }
773 
774    VG_(HT_add_node)( MC_(mempool_list), mp );
775 }
776 
MC_(destroy_mempool)777 void MC_(destroy_mempool)(Addr pool)
778 {
779    MC_Chunk*   mc;
780    MC_Mempool* mp;
781 
782    if (VG_(clo_verbosity) > 2) {
783       VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
784       VG_(get_and_pp_StackTrace)
785          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
786    }
787 
788    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
789 
790    if (mp == NULL) {
791       ThreadId tid = VG_(get_running_tid)();
792       MC_(record_illegal_mempool_error) ( tid, pool );
793       return;
794    }
795    check_mempool_sane(mp);
796 
797    // Clean up the chunks, one by one
798    VG_(HT_ResetIter)(mp->chunks);
799    while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
800       /* Note: make redzones noaccess again -- just in case user made them
801          accessible with a client request... */
802       MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
803    }
804    // Destroy the chunk table
805    VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
806 
807    VG_(free)(mp);
808 }
809 
810 static Int
mp_compar(const void * n1,const void * n2)811 mp_compar(const void* n1, const void* n2)
812 {
813    const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
814    const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
815    if (mc1->data < mc2->data) return -1;
816    if (mc1->data > mc2->data) return  1;
817    return 0;
818 }
819 
820 static void
check_mempool_sane(MC_Mempool * mp)821 check_mempool_sane(MC_Mempool* mp)
822 {
823    UInt n_chunks, i, bad = 0;
824    static UInt tick = 0;
825 
826    MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
827    if (!chunks)
828       return;
829 
830    if (VG_(clo_verbosity) > 1) {
831      if (tick++ >= 10000)
832        {
833 	 UInt total_pools = 0, total_chunks = 0;
834 	 MC_Mempool* mp2;
835 
836 	 VG_(HT_ResetIter)(MC_(mempool_list));
837 	 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
838 	   total_pools++;
839 	   VG_(HT_ResetIter)(mp2->chunks);
840 	   while (VG_(HT_Next)(mp2->chunks)) {
841 	     total_chunks++;
842 	   }
843 	 }
844 
845          VG_(message)(Vg_UserMsg,
846                       "Total mempools active: %u pools, %u chunks\n",
847 		      total_pools, total_chunks);
848 	 tick = 0;
849        }
850    }
851 
852 
853    VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
854 
855    /* Sanity check; assert that the blocks are now in order */
856    for (i = 0; i < n_chunks-1; i++) {
857       if (chunks[i]->data > chunks[i+1]->data) {
858          VG_(message)(Vg_UserMsg,
859                       "Mempool chunk %u / %u is out of order "
860                       "wrt. its successor\n",
861                       i+1, n_chunks);
862          bad = 1;
863       }
864    }
865 
866    /* Sanity check -- make sure they don't overlap */
867    for (i = 0; i < n_chunks-1; i++) {
868       if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
869          VG_(message)(Vg_UserMsg,
870                       "Mempool chunk %u / %u overlaps with its successor\n",
871                       i+1, n_chunks);
872          bad = 1;
873       }
874    }
875 
876    if (bad) {
877          VG_(message)(Vg_UserMsg,
878                 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
879                 n_chunks);
880          for (i = 0; i < n_chunks; ++i) {
881             VG_(message)(Vg_UserMsg,
882                          "Mempool chunk %u / %u: %lu bytes "
883                          "[%lx,%lx), allocated:\n",
884                          i+1,
885                          n_chunks,
886                          chunks[i]->szB + 0UL,
887                          chunks[i]->data,
888                          chunks[i]->data + chunks[i]->szB);
889 
890             VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
891          }
892    }
893    VG_(free)(chunks);
894 }
895 
MC_(mempool_alloc)896 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
897 {
898    MC_Mempool* mp;
899 
900    if (VG_(clo_verbosity) > 2) {
901       VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
902                                pool, addr, szB);
903       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
904    }
905 
906    mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
907    if (mp == NULL) {
908       MC_(record_illegal_mempool_error) ( tid, pool );
909    } else {
910       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
911       MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
912                      MC_AllocCustom, mp->chunks);
913       if (mp->rzB > 0) {
914          // This is not needed if the user application has properly
915          // marked the superblock noaccess when defining the mempool.
916          // We however still mark the redzones noaccess to still catch
917          // some bugs if user forgot.
918          MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
919          MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
920       }
921       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
922    }
923 }
924 
MC_(mempool_free)925 void MC_(mempool_free)(Addr pool, Addr addr)
926 {
927    MC_Mempool*  mp;
928    MC_Chunk*    mc;
929    ThreadId     tid = VG_(get_running_tid)();
930 
931    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
932    if (mp == NULL) {
933       MC_(record_illegal_mempool_error)(tid, pool);
934       return;
935    }
936 
937    if (VG_(clo_verbosity) > 2) {
938       VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
939       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
940    }
941 
942    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
943    mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
944    if (mc == NULL) {
945       MC_(record_free_error)(tid, (Addr)addr);
946       return;
947    }
948 
949    if (mp->auto_free) {
950       free_mallocs_in_mempool_block(mp, mc->data, mc->data + (mc->szB + 0UL));
951    }
952 
953    if (VG_(clo_verbosity) > 2) {
954       VG_(message)(Vg_UserMsg,
955                    "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
956                    pool, addr, mc->szB + 0UL);
957    }
958 
959    die_and_free_mem ( tid, mc, mp->rzB );
960    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
961 }
962 
963 
MC_(mempool_trim)964 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
965 {
966    MC_Mempool*  mp;
967    MC_Chunk*    mc;
968    ThreadId     tid = VG_(get_running_tid)();
969    UInt         n_shadows, i;
970    VgHashNode** chunks;
971 
972    if (VG_(clo_verbosity) > 2) {
973       VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
974                                pool, addr, szB);
975       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
976    }
977 
978    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
979    if (mp == NULL) {
980       MC_(record_illegal_mempool_error)(tid, pool);
981       return;
982    }
983 
984    check_mempool_sane(mp);
985    chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
986    if (n_shadows == 0) {
987      tl_assert(chunks == NULL);
988      return;
989    }
990 
991    tl_assert(chunks != NULL);
992    for (i = 0; i < n_shadows; ++i) {
993 
994       Addr lo, hi, min, max;
995 
996       mc = (MC_Chunk*) chunks[i];
997 
998       lo = mc->data;
999       hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
1000 
1001 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
1002 
1003       if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
1004 
1005          /* The current chunk is entirely within the trim extent: keep
1006             it. */
1007 
1008          continue;
1009 
1010       } else if ( (! EXTENT_CONTAINS(lo)) &&
1011                   (! EXTENT_CONTAINS(hi)) ) {
1012 
1013          /* The current chunk is entirely outside the trim extent:
1014             delete it. */
1015 
1016          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1017             MC_(record_free_error)(tid, (Addr)mc->data);
1018             VG_(free)(chunks);
1019             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1020             return;
1021          }
1022          die_and_free_mem ( tid, mc, mp->rzB );
1023 
1024       } else {
1025 
1026          /* The current chunk intersects the trim extent: remove,
1027             trim, and reinsert it. */
1028 
1029          tl_assert(EXTENT_CONTAINS(lo) ||
1030                    EXTENT_CONTAINS(hi));
1031          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1032             MC_(record_free_error)(tid, (Addr)mc->data);
1033             VG_(free)(chunks);
1034             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1035             return;
1036          }
1037 
1038          if (mc->data < addr) {
1039            min = mc->data;
1040            lo = addr;
1041          } else {
1042            min = addr;
1043            lo = mc->data;
1044          }
1045 
1046          if (mc->data + szB > addr + szB) {
1047            max = mc->data + szB;
1048            hi = addr + szB;
1049          } else {
1050            max = addr + szB;
1051            hi = mc->data + szB;
1052          }
1053 
1054          tl_assert(min <= lo);
1055          tl_assert(lo < hi);
1056          tl_assert(hi <= max);
1057 
1058          if (min < lo && !EXTENT_CONTAINS(min)) {
1059            MC_(make_mem_noaccess)( min, lo - min);
1060          }
1061 
1062          if (hi < max && !EXTENT_CONTAINS(max)) {
1063            MC_(make_mem_noaccess)( hi, max - hi );
1064          }
1065 
1066          mc->data = lo;
1067          mc->szB = (UInt) (hi - lo);
1068          VG_(HT_add_node)( mp->chunks, mc );
1069       }
1070 
1071 #undef EXTENT_CONTAINS
1072 
1073    }
1074    check_mempool_sane(mp);
1075    VG_(free)(chunks);
1076 }
1077 
MC_(move_mempool)1078 void MC_(move_mempool)(Addr poolA, Addr poolB)
1079 {
1080    MC_Mempool* mp;
1081 
1082    if (VG_(clo_verbosity) > 2) {
1083       VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
1084       VG_(get_and_pp_StackTrace)
1085          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1086    }
1087 
1088    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1089 
1090    if (mp == NULL) {
1091       ThreadId tid = VG_(get_running_tid)();
1092       MC_(record_illegal_mempool_error) ( tid, poolA );
1093       return;
1094    }
1095 
1096    mp->pool = poolB;
1097    VG_(HT_add_node)( MC_(mempool_list), mp );
1098 }
1099 
MC_(mempool_change)1100 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
1101 {
1102    MC_Mempool*  mp;
1103    MC_Chunk*    mc;
1104    ThreadId     tid = VG_(get_running_tid)();
1105 
1106    if (VG_(clo_verbosity) > 2) {
1107       VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
1108                    pool, addrA, addrB, szB);
1109       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1110    }
1111 
1112    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1113    if (mp == NULL) {
1114       MC_(record_illegal_mempool_error)(tid, pool);
1115       return;
1116    }
1117 
1118    check_mempool_sane(mp);
1119 
1120    mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1121    if (mc == NULL) {
1122       MC_(record_free_error)(tid, (Addr)addrA);
1123       return;
1124    }
1125 
1126    mc->data = addrB;
1127    mc->szB  = szB;
1128    VG_(HT_add_node)( mp->chunks, mc );
1129 
1130    check_mempool_sane(mp);
1131 }
1132 
MC_(mempool_exists)1133 Bool MC_(mempool_exists)(Addr pool)
1134 {
1135    MC_Mempool*  mp;
1136 
1137    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1138    if (mp == NULL) {
1139        return False;
1140    }
1141    return True;
1142 }
1143 
xtmemory_report_next_block(XT_Allocs * xta,ExeContext ** ec_alloc)1144 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
1145 {
1146    MC_Chunk* mc = VG_(HT_Next)(MC_(malloc_list));
1147    if (mc) {
1148       xta->nbytes = mc->szB;
1149       xta->nblocks = 1;
1150       *ec_alloc = MC_(allocated_at)(mc);
1151    } else
1152       xta->nblocks = 0;
1153 }
1154 
MC_(xtmemory_report)1155 void MC_(xtmemory_report) ( const HChar* filename, Bool fini )
1156 {
1157    // Make xtmemory_report_next_block ready to be called.
1158    VG_(HT_ResetIter)(MC_(malloc_list));
1159 
1160    VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
1161                         VG_(XT_filter_1top_and_maybe_below_main));
1162 }
1163 
1164 /*------------------------------------------------------------*/
1165 /*--- Statistics printing                                  ---*/
1166 /*------------------------------------------------------------*/
1167 
MC_(print_malloc_stats)1168 void MC_(print_malloc_stats) ( void )
1169 {
1170    MC_Chunk* mc;
1171    SizeT     nblocks = 0;
1172    ULong     nbytes  = 0;
1173 
1174    if (VG_(clo_verbosity) == 0)
1175       return;
1176    if (VG_(clo_xml))
1177       return;
1178 
1179    /* Count memory still in use. */
1180    VG_(HT_ResetIter)(MC_(malloc_list));
1181    while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1182       nblocks++;
1183       nbytes += (ULong)mc->szB;
1184    }
1185 
1186    VG_(umsg)(
1187       "HEAP SUMMARY:\n"
1188       "    in use at exit: %'llu bytes in %'lu blocks\n"
1189       "  total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1190       "\n",
1191       nbytes, nblocks,
1192       cmalloc_n_mallocs,
1193       cmalloc_n_frees, cmalloc_bs_mallocd
1194    );
1195 }
1196 
MC_(get_cmalloc_n_frees)1197 SizeT MC_(get_cmalloc_n_frees) ( void )
1198 {
1199    return cmalloc_n_frees;
1200 }
1201 
1202 
1203 /*--------------------------------------------------------------------*/
1204 /*--- end                                                          ---*/
1205 /*--------------------------------------------------------------------*/
1206