1 //--------------------------------------------------------------------*/
2 //--- Massif: a heap profiling tool.                     ms_main.c ---*/
3 //--------------------------------------------------------------------*/
4 
5 /*
6    This file is part of Massif, a Valgrind tool for profiling memory
7    usage of programs.
8 
9    Copyright (C) 2003-2017 Nicholas Nethercote
10       njn@valgrind.org
11 
12    This program is free software; you can redistribute it and/or
13    modify it under the terms of the GNU General Public License as
14    published by the Free Software Foundation; either version 2 of the
15    License, or (at your option) any later version.
16 
17    This program is distributed in the hope that it will be useful, but
18    WITHOUT ANY WARRANTY; without even the implied warranty of
19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20    General Public License for more details.
21 
22    You should have received a copy of the GNU General Public License
23    along with this program; if not, write to the Free Software
24    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25    02111-1307, USA.
26 
27    The GNU General Public License is contained in the file COPYING.
28 */
29 
30 //---------------------------------------------------------------------------
31 // XXX:
32 //---------------------------------------------------------------------------
33 // Todo -- nice, but less critical:
34 // - do a graph-drawing test
35 // - make file format more generic.  Obstacles:
36 //   - unit prefixes are not generic
37 //   - preset column widths for stats are not generic
38 //   - preset column headers are not generic
39 //   - "Massif arguments:" line is not generic
40 // - do snapshots on some specific client requests
41 //     - "show me the extra allocations since the last snapshot"
42 //     - "start/stop logging" (eg. quickly skip boring bits)
43 // - Add ability to draw multiple graphs, eg. heap-only, stack-only, total.
44 //   Give each graph a title.  (try to do it generically!)
45 // - make --show-below-main=no work
46 // - Options like --alloc-fn='operator new(unsigned, std::nothrow_t const&)'
47 //   don't work in a .valgrindrc file or in $VALGRIND_OPTS.
48 //   m_commandline.c:add_args_from_string() needs to respect single quotes.
49 // - With --stack=yes, want to add a stack trace for detailed snapshots so
50 //   it's clear where/why the peak is occurring. (Mattieu Castet)  Also,
51 //   possibly useful even with --stack=no? (Andi Yin)
52 //
53 // Performance:
54 // - To run the benchmarks:
55 //
56 //     perl perf/vg_perf --tools=massif --reps=3 perf/{heap,tinycc} massif
57 //     time valgrind --tool=massif --depth=100 konqueror
58 //
59 //   The other benchmarks don't do much allocation, and so give similar speeds
60 //   to Nulgrind.
61 //
62 //   Timing results on 'nevermore' (njn's machine) as of r7013:
63 //
64 //     heap      0.53s  ma:12.4s (23.5x, -----)
65 //     tinycc    0.46s  ma: 4.9s (10.7x, -----)
66 //     many-xpts 0.08s  ma: 2.0s (25.0x, -----)
67 //     konqueror 29.6s real  0:21.0s user
68 //
69 //   [Introduction of --time-unit=i as the default slowed things down by
70 //   roughly 0--20%.]
71 //
72 // Todo -- low priority:
73 // - In each XPt, record both bytes and the number of allocations, and
74 //   possibly the global number of allocations.
75 // - (Andy Lin) Give a stack trace on detailed snapshots?
76 // - (Artur Wisz) add a feature to Massif to ignore any heap blocks larger
77 //   than a certain size!  Because: "linux's malloc allows to set a
78 //   MMAP_THRESHOLD value, so we set it to 4096 - all blocks above that will
79 //   be handled directly by the kernel, and are guaranteed to be returned to
80 //   the system when freed. So we needed to profile only blocks below this
81 //   limit."
82 //
83 // File format working notes:
84 
85 #if 0
86 desc: --heap-admin=foo
87 cmd: date
88 time_unit: ms
89 #-----------
90 snapshot=0
91 #-----------
92 time=0
93 mem_heap_B=0
94 mem_heap_admin_B=0
95 mem_stacks_B=0
96 heap_tree=empty
97 #-----------
98 snapshot=1
99 #-----------
100 time=353
101 mem_heap_B=5
102 mem_heap_admin_B=0
103 mem_stacks_B=0
104 heap_tree=detailed
105 n1: 5 (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
106  n1: 5 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
107   n1: 5 0x279DE6: _nl_load_locale_from_archive (in /lib/libc-2.3.5.so)
108    n1: 5 0x278E97: _nl_find_locale (in /lib/libc-2.3.5.so)
109     n1: 5 0x278871: setlocale (in /lib/libc-2.3.5.so)
110      n1: 5 0x8049821: (within /bin/date)
111       n0: 5 0x26ED5E: (below main) (in /lib/libc-2.3.5.so)
112 
113 
114 n_events: n  time(ms)  total(B)    useful-heap(B)  admin-heap(B)  stacks(B)
115 t_events: B
116 n 0 0 0 0 0
117 n 0 0 0 0 0
118 t1: 5 <string...>
119  t1: 6 <string...>
120 
121 Ideas:
122 - each snapshot specifies an x-axis value and one or more y-axis values.
123 - can display the y-axis values separately if you like
124 - can completely separate connection between snapshots and trees.
125 
126 Challenges:
127 - how to specify and scale/abbreviate units on axes?
128 - how to combine multiple values into the y-axis?
129 
130 --------------------------------------------------------------------------------Command:            date
131 Massif arguments:   --heap-admin=foo
132 ms_print arguments: massif.out
133 --------------------------------------------------------------------------------
134     KB
135 6.472^                                                       :#
136      |                                                       :#  ::  .    .
137      ...
138      |                                     ::@  :@    :@ :@:::#  ::  :    ::::
139    0 +-----------------------------------@---@---@-----@--@---#-------------->ms     0                                                                     713
140 
141 Number of snapshots: 50
142  Detailed snapshots: [2, 11, 13, 19, 25, 32 (peak)]
143 --------------------------------------------------------------------------------  n       time(ms)         total(B)   useful-heap(B) admin-heap(B)    stacks(B)
144 --------------------------------------------------------------------------------  0              0                0                0             0            0
145   1            345                5                5             0            0
146   2            353                5                5             0            0
147 100.00% (5B) (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
148 ->100.00% (5B) 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
149 #endif
150 
151 //---------------------------------------------------------------------------
152 
153 #include "pub_tool_basics.h"
154 #include "pub_tool_vki.h"
155 #include "pub_tool_aspacemgr.h"
156 #include "pub_tool_debuginfo.h"
157 #include "pub_tool_hashtable.h"
158 #include "pub_tool_libcbase.h"
159 #include "pub_tool_libcassert.h"
160 #include "pub_tool_libcfile.h"
161 #include "pub_tool_libcprint.h"
162 #include "pub_tool_libcproc.h"
163 #include "pub_tool_machine.h"
164 #include "pub_tool_mallocfree.h"
165 #include "pub_tool_options.h"
166 #include "pub_tool_poolalloc.h"
167 #include "pub_tool_replacemalloc.h"
168 #include "pub_tool_stacktrace.h"
169 #include "pub_tool_threadstate.h"
170 #include "pub_tool_tooliface.h"
171 #include "pub_tool_xarray.h"
172 #include "pub_tool_xtree.h"
173 #include "pub_tool_xtmemory.h"
174 #include "pub_tool_clientstate.h"
175 #include "pub_tool_gdbserver.h"
176 
177 #include "pub_tool_clreq.h"           // For {MALLOC,FREE}LIKE_BLOCK
178 
179 //------------------------------------------------------------*/
180 //--- Overview of operation                                ---*/
181 //------------------------------------------------------------*/
182 
183 // The size of the stacks and heap is tracked.  The heap is tracked in a lot
184 // of detail, enough to tell how many bytes each line of code is responsible
185 // for, more or less.  The main data structure is an xtree maintaining the
186 // call tree beneath all the allocation functions like malloc().
187 // (Alternatively, if --pages-as-heap=yes is specified, memory is tracked at
188 // the page level, and each page is treated much like a heap block.  We use
189 // "heap" throughout below to cover this case because the concepts are all the
190 // same.)
191 //
192 // "Snapshots" are recordings of the memory usage.  There are two basic
193 // kinds:
194 // - Normal:  these record the current time, total memory size, total heap
195 //   size, heap admin size and stack size.
196 // - Detailed: these record those things in a normal snapshot, plus a very
197 //   detailed XTree (see below) indicating how the heap is structured.
198 //
199 // Snapshots are taken every so often.  There are two storage classes of
200 // snapshots:
201 // - Temporary:  Massif does a temporary snapshot every so often.  The idea
202 //   is to always have a certain number of temporary snapshots around.  So
203 //   we take them frequently to begin with, but decreasingly often as the
204 //   program continues to run.  Also, we remove some old ones after a while.
205 //   Overall it's a kind of exponential decay thing.  Most of these are
206 //   normal snapshots, a small fraction are detailed snapshots.
207 // - Permanent:  Massif takes a permanent (detailed) snapshot in some
208 //   circumstances.  They are:
209 //   - Peak snapshot:  When the memory usage peak is reached, it takes a
210 //     snapshot.  It keeps this, unless the peak is subsequently exceeded,
211 //     in which case it will overwrite the peak snapshot.
212 //   - User-requested snapshots:  These are done in response to client
213 //     requests.  They are always kept.
214 
215 // Used for printing things when clo_verbosity > 1.
216 #define VERB(verb, format, args...) \
217    if (UNLIKELY(VG_(clo_verbosity) > verb)) { \
218       VG_(dmsg)("Massif: " format, ##args);   \
219    }
220 
221 //------------------------------------------------------------//
222 //--- Statistics                                           ---//
223 //------------------------------------------------------------//
224 
225 // Konqueror startup, to give an idea of the numbers involved with a biggish
226 // program, with default depth:
227 //
228 //  depth=3                   depth=40
229 //  - 310,000 allocations
230 //  - 300,000 frees
231 //  -  15,000 XPts            800,000 XPts
232 //  -   1,800 top-XPts
233 
234 static UInt n_heap_allocs           = 0;
235 static UInt n_heap_reallocs         = 0;
236 static UInt n_heap_frees            = 0;
237 static UInt n_ignored_heap_allocs   = 0;
238 static UInt n_ignored_heap_frees    = 0;
239 static UInt n_ignored_heap_reallocs = 0;
240 static UInt n_stack_allocs          = 0;
241 static UInt n_stack_frees           = 0;
242 
243 static UInt n_skipped_snapshots     = 0;
244 static UInt n_real_snapshots        = 0;
245 static UInt n_detailed_snapshots    = 0;
246 static UInt n_peak_snapshots        = 0;
247 static UInt n_cullings              = 0;
248 
249 //------------------------------------------------------------//
250 //--- Globals                                              ---//
251 //------------------------------------------------------------//
252 
253 // Number of guest instructions executed so far.  Only used with
254 // --time-unit=i.
255 static Long guest_instrs_executed = 0;
256 
257 static SizeT heap_szB       = 0; // Live heap size
258 static SizeT heap_extra_szB = 0; // Live heap extra size -- slop + admin bytes
259 static SizeT stacks_szB     = 0; // Live stacks size
260 
261 // This is the total size from the current peak snapshot, or 0 if no peak
262 // snapshot has been taken yet.
263 static SizeT peak_snapshot_total_szB = 0;
264 
265 // Incremented every time memory is allocated/deallocated, by the
266 // allocated/deallocated amount;  includes heap, heap-admin and stack
267 // memory.  An alternative to milliseconds as a unit of program "time".
268 static ULong total_allocs_deallocs_szB = 0;
269 
270 // When running with --heap=yes --pages-as-heap=no, we don't start taking
271 // snapshots until the first basic block is executed, rather than doing it in
272 // ms_post_clo_init (which is the obvious spot), for two reasons.
273 // - It lets us ignore stack events prior to that, because they're not
274 //   really proper ones and just would screw things up.
275 // - Because there's still some core initialisation to do, and so there
276 //   would be an artificial time gap between the first and second snapshots.
277 //
278 // When running with --heap=yes --pages-as-heap=yes, snapshots start much
279 // earlier due to new_mem_startup so this isn't relevant.
280 //
281 static Bool have_started_executing_code = False;
282 
283 //------------------------------------------------------------//
284 //--- Alloc fns                                            ---//
285 //------------------------------------------------------------//
286 
287 static XArray* alloc_fns;
288 static XArray* ignore_fns;
289 
init_alloc_fns(void)290 static void init_alloc_fns(void)
291 {
292    // Create the list, and add the default elements.
293    alloc_fns = VG_(newXA)(VG_(malloc), "ms.main.iaf.1",
294                                        VG_(free), sizeof(HChar*));
295    #define DO(x)  { const HChar* s = x; VG_(addToXA)(alloc_fns, &s); }
296 
297    // Ordered roughly according to (presumed) frequency.
298    // Nb: The C++ "operator new*" ones are overloadable.  We include them
299    // always anyway, because even if they're overloaded, it would be a
300    // prodigiously stupid overloading that caused them to not allocate
301    // memory.
302    //
303    // XXX: because we don't look at the first stack entry (unless it's a
304    // custom allocation) there's not much point to having all these alloc
305    // functions here -- they should never appear anywhere (I think?) other
306    // than the top stack entry.  The only exceptions are those that in
307    // vg_replace_malloc.c are partly or fully implemented in terms of another
308    // alloc function: realloc (which uses malloc);  valloc,
309    // malloc_zone_valloc, posix_memalign and memalign_common (which use
310    // memalign).
311    //
312    DO("malloc"                                              );
313    DO("__builtin_new"                                       );
314    DO("operator new(unsigned)"                              );
315    DO("operator new(unsigned long)"                         );
316    DO("__builtin_vec_new"                                   );
317    DO("operator new[](unsigned)"                            );
318    DO("operator new[](unsigned long)"                       );
319    DO("calloc"                                              );
320    DO("realloc"                                             );
321    DO("memalign"                                            );
322    DO("posix_memalign"                                      );
323    DO("valloc"                                              );
324    DO("operator new(unsigned, std::nothrow_t const&)"       );
325    DO("operator new[](unsigned, std::nothrow_t const&)"     );
326    DO("operator new(unsigned long, std::nothrow_t const&)"  );
327    DO("operator new[](unsigned long, std::nothrow_t const&)");
328 #if defined(VGO_darwin)
329    DO("malloc_zone_malloc"                                  );
330    DO("malloc_zone_calloc"                                  );
331    DO("malloc_zone_realloc"                                 );
332    DO("malloc_zone_memalign"                                );
333    DO("malloc_zone_valloc"                                  );
334 #endif
335 }
336 
init_ignore_fns(void)337 static void init_ignore_fns(void)
338 {
339    // Create the (empty) list.
340    ignore_fns = VG_(newXA)(VG_(malloc), "ms.main.iif.1",
341                                         VG_(free), sizeof(HChar*));
342 }
343 
344 //------------------------------------------------------------//
345 //--- Command line args                                    ---//
346 //------------------------------------------------------------//
347 
348 #define MAX_DEPTH       200
349 
350 typedef enum { TimeI, TimeMS, TimeB } TimeUnit;
351 
TimeUnit_to_string(TimeUnit time_unit)352 static const HChar* TimeUnit_to_string(TimeUnit time_unit)
353 {
354    switch (time_unit) {
355    case TimeI:  return "i";
356    case TimeMS: return "ms";
357    case TimeB:  return "B";
358    default:     tl_assert2(0, "TimeUnit_to_string: unrecognised TimeUnit");
359    }
360 }
361 
362 static Bool   clo_heap            = True;
363    // clo_heap_admin is deliberately a word-sized type.  At one point it was
364    // a UInt, but this caused problems on 64-bit machines when it was
365    // multiplied by a small negative number and then promoted to a
366    // word-sized type -- it ended up with a value of 4.2 billion.  Sigh.
367 static SSizeT clo_heap_admin      = 8;
368 static Bool   clo_pages_as_heap   = False;
369 static Bool   clo_stacks          = False;
370 static Int    clo_depth           = 30;
371 static double clo_threshold       = 1.0;  // percentage
372 static double clo_peak_inaccuracy = 1.0;  // percentage
373 static Int    clo_time_unit       = TimeI;
374 static Int    clo_detailed_freq   = 10;
375 static Int    clo_max_snapshots   = 100;
376 static const HChar* clo_massif_out_file = "massif.out.%p";
377 
378 static XArray* args_for_massif;
379 
ms_process_cmd_line_option(const HChar * arg)380 static Bool ms_process_cmd_line_option(const HChar* arg)
381 {
382    const HChar* tmp_str;
383 
384    // Remember the arg for later use.
385    VG_(addToXA)(args_for_massif, &arg);
386 
387         if VG_BOOL_CLO(arg, "--heap",           clo_heap)   {}
388    else if VG_BINT_CLO(arg, "--heap-admin",     clo_heap_admin, 0, 1024) {}
389 
390    else if VG_BOOL_CLO(arg, "--stacks",         clo_stacks) {}
391 
392    else if VG_BOOL_CLO(arg, "--pages-as-heap",  clo_pages_as_heap) {}
393 
394    else if VG_BINT_CLO(arg, "--depth",          clo_depth, 1, MAX_DEPTH) {}
395 
396    else if VG_STR_CLO(arg, "--alloc-fn",        tmp_str) {
397       VG_(addToXA)(alloc_fns, &tmp_str);
398    }
399    else if VG_STR_CLO(arg, "--ignore-fn",       tmp_str) {
400       VG_(addToXA)(ignore_fns, &tmp_str);
401    }
402 
403    else if VG_DBL_CLO(arg, "--threshold",  clo_threshold) {
404       if (clo_threshold < 0 || clo_threshold > 100) {
405          VG_(fmsg_bad_option)(arg,
406             "--threshold must be between 0.0 and 100.0\n");
407       }
408    }
409 
410    else if VG_DBL_CLO(arg, "--peak-inaccuracy", clo_peak_inaccuracy) {}
411 
412    else if VG_XACT_CLO(arg, "--time-unit=i",    clo_time_unit, TimeI)  {}
413    else if VG_XACT_CLO(arg, "--time-unit=ms",   clo_time_unit, TimeMS) {}
414    else if VG_XACT_CLO(arg, "--time-unit=B",    clo_time_unit, TimeB)  {}
415 
416    else if VG_BINT_CLO(arg, "--detailed-freq",  clo_detailed_freq, 1, 1000000) {}
417 
418    else if VG_BINT_CLO(arg, "--max-snapshots",  clo_max_snapshots, 10, 1000) {}
419 
420    else if VG_STR_CLO(arg, "--massif-out-file", clo_massif_out_file) {}
421 
422    else
423       return VG_(replacement_malloc_process_cmd_line_option)(arg);
424 
425    return True;
426 }
427 
ms_print_usage(void)428 static void ms_print_usage(void)
429 {
430    VG_(printf)(
431 "    --heap=no|yes             profile heap blocks [yes]\n"
432 "    --heap-admin=<size>       average admin bytes per heap block;\n"
433 "                               ignored if --heap=no [8]\n"
434 "    --stacks=no|yes           profile stack(s) [no]\n"
435 "    --pages-as-heap=no|yes    profile memory at the page level [no]\n"
436 "    --depth=<number>          depth of contexts [30]\n"
437 "    --alloc-fn=<name>         specify <name> as an alloc function [empty]\n"
438 "    --ignore-fn=<name>        ignore heap allocations within <name> [empty]\n"
439 "    --threshold=<m.n>         significance threshold, as a percentage [1.0]\n"
440 "    --peak-inaccuracy=<m.n>   maximum peak inaccuracy, as a percentage [1.0]\n"
441 "    --time-unit=i|ms|B        time unit: instructions executed, milliseconds\n"
442 "                              or heap bytes alloc'd/dealloc'd [i]\n"
443 "    --detailed-freq=<N>       every Nth snapshot should be detailed [10]\n"
444 "    --max-snapshots=<N>       maximum number of snapshots recorded [100]\n"
445 "    --massif-out-file=<file>  output file name [massif.out.%%p]\n"
446    );
447 }
448 
ms_print_debug_usage(void)449 static void ms_print_debug_usage(void)
450 {
451    VG_(printf)(
452 "    (none)\n"
453    );
454 }
455 
456 
457 //------------------------------------------------------------//
458 //--- XTrees                                               ---//
459 //------------------------------------------------------------//
460 
461 // The details of the heap are represented by a single XTree.
462 // This XTree maintains the nr of allocated bytes for each
463 // stacktrace/execontext.
464 //
465 // The root of the Xtree will be output as a top node  'alloc functions',
466 //  which represents all allocation functions, eg:
467 // - malloc/calloc/realloc/memalign/new/new[];
468 // - user-specified allocation functions (using --alloc-fn);
469 // - custom allocation (MALLOCLIKE) points
470 static XTree* heap_xt;
471 /* heap_xt contains a SizeT: the nr of allocated bytes by this execontext. */
init_szB(void * value)472 static void init_szB(void* value)
473 {
474    *((SizeT*)value) = 0;
475 }
add_szB(void * to,const void * value)476 static void add_szB(void* to, const void* value)
477 {
478    *((SizeT*)to) += *((const SizeT*)value);
479 }
sub_szB(void * from,const void * value)480 static void sub_szB(void* from, const void* value)
481 {
482    *((SizeT*)from) -= *((const SizeT*)value);
483 }
alloc_szB(const void * value)484 static ULong alloc_szB(const void* value)
485 {
486    return (ULong)*((const SizeT*)value);
487 }
488 
489 
490 //------------------------------------------------------------//
491 //--- XTree Operations                                     ---//
492 //------------------------------------------------------------//
493 
494 // This is the limit on the number of filtered alloc-fns that can be in a
495 // single stacktrace.
496 #define MAX_OVERESTIMATE   50
497 #define MAX_IPS            (MAX_DEPTH + MAX_OVERESTIMATE)
498 
499 // filtering out uninteresting entries:
500 // alloc-fns and entries above alloc-fns, and entries below main-or-below-main.
501 //   Eg:       alloc-fn1 / alloc-fn2 / a / b / main / (below main) / c
502 //   becomes:  a / b / main
503 // Nb: it's possible to end up with an empty trace, eg. if 'main' is marked
504 // as an alloc-fn.  This is ok.
505 static
filter_IPs(Addr * ips,Int n_ips,UInt * top,UInt * n_ips_sel)506 void filter_IPs (Addr* ips, Int n_ips,
507                  UInt* top, UInt* n_ips_sel)
508 {
509    Int i;
510    Bool top_has_fnname = False;
511    const HChar *fnname;
512 
513    *top = 0;
514    *n_ips_sel = n_ips;
515 
516    // Advance *top as long as we find alloc functions
517    // PW Nov 2016 xtree work:
518    //  old massif code was doing something really strange(?buggy):
519    //  'sliding' a bunch of functions without names by removing an
520    //  alloc function 'inside' a stacktrace e.g.
521    //    0x1 0x2 0x3 alloc func1 main
522    //  became   0x1 0x2 0x3 func1 main
523    const DiEpoch ep = VG_(current_DiEpoch)();
524    for (i = *top; i < n_ips; i++) {
525       top_has_fnname = VG_(get_fnname)(ep, ips[*top], &fnname);
526       if (top_has_fnname &&  VG_(strIsMemberXA)(alloc_fns, fnname)) {
527          VERB(4, "filtering alloc fn %s\n", fnname);
528          (*top)++;
529          (*n_ips_sel)--;
530       } else {
531          break;
532       }
533    }
534 
535    // filter the whole stacktrace if this allocation has to be ignored.
536    if (*n_ips_sel > 0 && VG_(sizeXA)(ignore_fns) > 0) {
537       if (!top_has_fnname) {
538          // top has no fnname => search for the first entry that has a fnname
539          for (i = *top; i < n_ips && !top_has_fnname; i++) {
540             top_has_fnname = VG_(get_fnname)(ep, ips[i], &fnname);
541          }
542       }
543       if (top_has_fnname && VG_(strIsMemberXA)(ignore_fns, fnname)) {
544          VERB(4, "ignored allocation from fn %s\n", fnname);
545          *top = n_ips;
546          *n_ips_sel = 0;
547       }
548    }
549 
550    if (!VG_(clo_show_below_main) && *n_ips_sel > 0 ) {
551       // Technically, it would be better to use the 'real' epoch that
552       // was used to capture ips/n_ips. However, this searches
553       // for a main or below_main function. It is technically possible
554       // but unlikely that main or below main fn is in a dlclose-d library,
555       // so current epoch is reasonable enough, even if not perfect.
556       // FIXME PW EPOCH: would be better to also use the real ips epoch here,
557       // once m_xtree.c massif output format properly supports epoch.
558       const DiEpoch cur_ep = VG_(current_DiEpoch)();
559       Int mbm = VG_(XT_offset_main_or_below_main)(cur_ep, ips, n_ips);
560 
561       if (mbm < *top) {
562          // Special case: the first main (or below main) function is an
563          // alloc function.
564          *n_ips_sel = 1;
565          VERB(4, "main/below main: keeping 1 fn\n");
566       } else {
567          *n_ips_sel -= n_ips - mbm - 1;
568          VERB(4, "main/below main: filtering %d\n", n_ips - mbm - 1);
569       }
570    }
571 
572    // filter the frames if we have more than clo_depth
573    if (*n_ips_sel > clo_depth) {
574       VERB(4, "filtering IPs above clo_depth\n");
575       *n_ips_sel = clo_depth;
576    }
577 }
578 
579 // Capture a stacktrace, and make an ec of it, without the first entry
580 // if exclude_first_entry is True.
make_ec(ThreadId tid,Bool exclude_first_entry)581 static ExeContext* make_ec(ThreadId tid, Bool exclude_first_entry)
582 {
583    static Addr ips[MAX_IPS];
584 
585    // After this call, the IPs we want are in ips[0]..ips[n_ips-1].
586    Int n_ips = VG_(get_StackTrace)( tid, ips, clo_depth +  MAX_OVERESTIMATE,
587                                     NULL/*array to dump SP values in*/,
588                                     NULL/*array to dump FP values in*/,
589                                     0/*first_ip_delta*/ );
590    if (exclude_first_entry) {
591       if (n_ips > 1) {
592          const HChar *fnname;
593          VERB(4, "removing top fn %s from stacktrace\n",
594               VG_(get_fnname)(VG_(current_DiEpoch)(), ips[0], &fnname)
595               ? fnname : "???");
596          return VG_(make_ExeContext_from_StackTrace)(ips+1, n_ips-1);
597       } else {
598          VERB(4, "null execontext as removing top fn with n_ips %d\n", n_ips);
599          return VG_(null_ExeContext) ();
600       }
601    } else
602       return VG_(make_ExeContext_from_StackTrace)(ips, n_ips);
603 }
604 
605 // Create (or update) in heap_xt an xec corresponding to the stacktrace of tid.
606 // req_szB is added to the xec (unless ec is fully filtered).
607 // Returns the correspding XTree xec.
608 // exclude_first_entry is an optimisation: if True, automatically removes
609 // the top level IP from the stacktrace. Should be set to True if it is known
610 // that this is an alloc fn. The top function presumably will be something like
611 // malloc or __builtin_new that we're sure to filter out).
add_heap_xt(ThreadId tid,SizeT req_szB,Bool exclude_first_entry)612 static Xecu add_heap_xt( ThreadId tid, SizeT req_szB, Bool exclude_first_entry)
613 {
614    ExeContext *ec = make_ec(tid, exclude_first_entry);
615 
616    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
617       VG_(XTMemory_Full_alloc)(req_szB, ec);
618    return VG_(XT_add_to_ec) (heap_xt, ec, &req_szB);
619 }
620 
621 // Substract req_szB from the heap_xt where.
sub_heap_xt(Xecu where,SizeT req_szB,Bool exclude_first_entry)622 static void sub_heap_xt(Xecu where, SizeT req_szB, Bool exclude_first_entry)
623 {
624    tl_assert(clo_heap);
625 
626    if (0 == req_szB)
627       return;
628 
629    VG_(XT_sub_from_xecu) (heap_xt, where, &req_szB);
630    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full)) {
631       ExeContext *ec_free = make_ec(VG_(get_running_tid)(),
632                                     exclude_first_entry);
633       VG_(XTMemory_Full_free)(req_szB,
634                               VG_(XT_get_ec_from_xecu)(heap_xt, where),
635                               ec_free);
636    }
637 }
638 
639 
640 //------------------------------------------------------------//
641 //--- Snapshots                                            ---//
642 //------------------------------------------------------------//
643 
644 // Snapshots are done in a way so that we always have a reasonable number of
645 // them.  We start by taking them quickly.  Once we hit our limit, we cull
646 // some (eg. half), and start taking them more slowly.  Once we hit the
647 // limit again, we again cull and then take them even more slowly, and so
648 // on.
649 
650 #define UNUSED_SNAPSHOT_TIME  -333  // A conspicuous negative number.
651 
652 typedef
653    enum {
654       Normal = 77,
655       Peak,
656       Unused
657    }
658    SnapshotKind;
659 
660 typedef
661    struct {
662       SnapshotKind kind;
663       Time  time;
664       SizeT heap_szB;
665       SizeT heap_extra_szB;// Heap slop + admin bytes.
666       SizeT stacks_szB;
667       XTree* xt;    // Snapshot of heap_xt, if a detailed snapshot,
668    }                // otherwise NULL.
669    Snapshot;
670 
671 static UInt      next_snapshot_i = 0;  // Index of where next snapshot will go.
672 static Snapshot* snapshots;            // Array of snapshots.
673 
is_snapshot_in_use(Snapshot * snapshot)674 static Bool is_snapshot_in_use(Snapshot* snapshot)
675 {
676    if (Unused == snapshot->kind) {
677       // If snapshot is unused, check all the fields are unset.
678       tl_assert(snapshot->time           == UNUSED_SNAPSHOT_TIME);
679       tl_assert(snapshot->heap_extra_szB == 0);
680       tl_assert(snapshot->heap_szB       == 0);
681       tl_assert(snapshot->stacks_szB     == 0);
682       tl_assert(snapshot->xt             == NULL);
683       return False;
684    } else {
685       tl_assert(snapshot->time           != UNUSED_SNAPSHOT_TIME);
686       return True;
687    }
688 }
689 
is_detailed_snapshot(Snapshot * snapshot)690 static Bool is_detailed_snapshot(Snapshot* snapshot)
691 {
692    return (snapshot->xt ? True : False);
693 }
694 
is_uncullable_snapshot(Snapshot * snapshot)695 static Bool is_uncullable_snapshot(Snapshot* snapshot)
696 {
697    return &snapshots[0] == snapshot                   // First snapshot
698        || &snapshots[next_snapshot_i-1] == snapshot   // Last snapshot
699        || snapshot->kind == Peak;                     // Peak snapshot
700 }
701 
sanity_check_snapshot(Snapshot * snapshot)702 static void sanity_check_snapshot(Snapshot* snapshot)
703 {
704    // Not much we can sanity check.
705    tl_assert(snapshot->xt == NULL || snapshot->kind != Unused);
706 }
707 
708 // All the used entries should look used, all the unused ones should be clear.
sanity_check_snapshots_array(void)709 static void sanity_check_snapshots_array(void)
710 {
711    Int i;
712    for (i = 0; i < next_snapshot_i; i++) {
713       tl_assert( is_snapshot_in_use( & snapshots[i] ));
714    }
715    for (    ; i < clo_max_snapshots; i++) {
716       tl_assert(!is_snapshot_in_use( & snapshots[i] ));
717    }
718 }
719 
720 // This zeroes all the fields in the snapshot, but does not free the xt
721 // XTree if present.  It also does a sanity check unless asked not to;  we
722 // can't sanity check at startup when clearing the initial snapshots because
723 // they're full of junk.
clear_snapshot(Snapshot * snapshot,Bool do_sanity_check)724 static void clear_snapshot(Snapshot* snapshot, Bool do_sanity_check)
725 {
726    if (do_sanity_check) sanity_check_snapshot(snapshot);
727    snapshot->kind           = Unused;
728    snapshot->time           = UNUSED_SNAPSHOT_TIME;
729    snapshot->heap_extra_szB = 0;
730    snapshot->heap_szB       = 0;
731    snapshot->stacks_szB     = 0;
732    snapshot->xt             = NULL;
733 }
734 
735 // This zeroes all the fields in the snapshot, and frees the heap XTree xt if
736 // present.
delete_snapshot(Snapshot * snapshot)737 static void delete_snapshot(Snapshot* snapshot)
738 {
739    // Nb: if there's an XTree, we free it after calling clear_snapshot,
740    // because clear_snapshot does a sanity check which includes checking the
741    // XTree.
742    XTree* tmp_xt = snapshot->xt;
743    clear_snapshot(snapshot, /*do_sanity_check*/True);
744    if (tmp_xt) {
745        VG_(XT_delete)(tmp_xt);
746    }
747 }
748 
VERB_snapshot(Int verbosity,const HChar * prefix,Int i)749 static void VERB_snapshot(Int verbosity, const HChar* prefix, Int i)
750 {
751    Snapshot* snapshot = &snapshots[i];
752    const HChar* suffix;
753    switch (snapshot->kind) {
754    case Peak:   suffix = "p";                                            break;
755    case Normal: suffix = ( is_detailed_snapshot(snapshot) ? "d" : "." ); break;
756    case Unused: suffix = "u";                                            break;
757    default:
758       tl_assert2(0, "VERB_snapshot: unknown snapshot kind: %d", snapshot->kind);
759    }
760    VERB(verbosity, "%s S%s%3d (t:%lld, hp:%lu, ex:%lu, st:%lu)\n",
761       prefix, suffix, i,
762       snapshot->time,
763       snapshot->heap_szB,
764       snapshot->heap_extra_szB,
765       snapshot->stacks_szB
766    );
767 }
768 
769 // Cull half the snapshots;  we choose those that represent the smallest
770 // time-spans, because that gives us the most even distribution of snapshots
771 // over time.  (It's possible to lose interesting spikes, however.)
772 //
773 // Algorithm for N snapshots:  We find the snapshot representing the smallest
774 // timeframe, and remove it.  We repeat this until (N/2) snapshots are gone.
775 // We have to do this one snapshot at a time, rather than finding the (N/2)
776 // smallest snapshots in one hit, because when a snapshot is removed, its
777 // neighbours immediately cover greater timespans.  So it's O(N^2), but N is
778 // small, and it's not done very often.
779 //
780 // Once we're done, we return the new smallest interval between snapshots.
781 // That becomes our minimum time interval.
cull_snapshots(void)782 static UInt cull_snapshots(void)
783 {
784    Int  i, jp, j, jn, min_timespan_i;
785    Int  n_deleted = 0;
786    Time min_timespan;
787 
788    n_cullings++;
789 
790    // Sets j to the index of the first not-yet-removed snapshot at or after i
791    #define FIND_SNAPSHOT(i, j) \
792       for (j = i; \
793            j < clo_max_snapshots && !is_snapshot_in_use(&snapshots[j]); \
794            j++) { }
795 
796    VERB(2, "Culling...\n");
797 
798    // First we remove enough snapshots by clearing them in-place.  Once
799    // that's done, we can slide the remaining ones down.
800    for (i = 0; i < clo_max_snapshots/2; i++) {
801       // Find the snapshot representing the smallest timespan.  The timespan
802       // for snapshot n = d(N-1,N)+d(N,N+1), where d(A,B) is the time between
803       // snapshot A and B.  We don't consider the first and last snapshots for
804       // removal.
805       Snapshot* min_snapshot;
806       Int min_j;
807 
808       // Initial triple: (prev, curr, next) == (jp, j, jn)
809       // Initial min_timespan is the first one.
810       jp = 0;
811       FIND_SNAPSHOT(1,   j);
812       FIND_SNAPSHOT(j+1, jn);
813       min_timespan = 0x7fffffffffffffffLL;
814       min_j        = -1;
815       while (jn < clo_max_snapshots) {
816          Time timespan = snapshots[jn].time - snapshots[jp].time;
817          tl_assert(timespan >= 0);
818          // Nb: We never cull the peak snapshot.
819          if (Peak != snapshots[j].kind && timespan < min_timespan) {
820             min_timespan = timespan;
821             min_j        = j;
822          }
823          // Move on to next triple
824          jp = j;
825          j  = jn;
826          FIND_SNAPSHOT(jn+1, jn);
827       }
828       // We've found the least important snapshot, now delete it.  First
829       // print it if necessary.
830       tl_assert(-1 != min_j);    // Check we found a minimum.
831       min_snapshot = & snapshots[ min_j ];
832       if (VG_(clo_verbosity) > 1) {
833          HChar buf[64];   // large enough
834          VG_(snprintf)(buf, 64, " %3d (t-span = %lld)", i, min_timespan);
835          VERB_snapshot(2, buf, min_j);
836       }
837       delete_snapshot(min_snapshot);
838       n_deleted++;
839    }
840 
841    // Slide down the remaining snapshots over the removed ones.  First set i
842    // to point to the first empty slot, and j to the first full slot after
843    // i.  Then slide everything down.
844    for (i = 0;  is_snapshot_in_use( &snapshots[i] ); i++) { }
845    for (j = i; !is_snapshot_in_use( &snapshots[j] ); j++) { }
846    for (  ; j < clo_max_snapshots; j++) {
847       if (is_snapshot_in_use( &snapshots[j] )) {
848          snapshots[i++] = snapshots[j];
849          clear_snapshot(&snapshots[j], /*do_sanity_check*/True);
850       }
851    }
852    next_snapshot_i = i;
853 
854    // Check snapshots array looks ok after changes.
855    sanity_check_snapshots_array();
856 
857    // Find the minimum timespan remaining;  that will be our new minimum
858    // time interval.  Note that above we were finding timespans by measuring
859    // two intervals around a snapshot that was under consideration for
860    // deletion.  Here we only measure single intervals because all the
861    // deletions have occurred.
862    //
863    // But we have to be careful -- some snapshots (eg. snapshot 0, and the
864    // peak snapshot) are uncullable.  If two uncullable snapshots end up
865    // next to each other, they'll never be culled (assuming the peak doesn't
866    // change), and the time gap between them will not change.  However, the
867    // time between the remaining cullable snapshots will grow ever larger.
868    // This means that the min_timespan found will always be that between the
869    // two uncullable snapshots, and it will be much smaller than it should
870    // be.  To avoid this problem, when computing the minimum timespan, we
871    // ignore any timespans between two uncullable snapshots.
872    tl_assert(next_snapshot_i > 1);
873    min_timespan = 0x7fffffffffffffffLL;
874    min_timespan_i = -1;
875    for (i = 1; i < next_snapshot_i; i++) {
876       if (is_uncullable_snapshot(&snapshots[i]) &&
877           is_uncullable_snapshot(&snapshots[i-1]))
878       {
879          VERB(2, "(Ignoring interval %d--%d when computing minimum)\n", i-1, i);
880       } else {
881          Time timespan = snapshots[i].time - snapshots[i-1].time;
882          tl_assert(timespan >= 0);
883          if (timespan < min_timespan) {
884             min_timespan = timespan;
885             min_timespan_i = i;
886          }
887       }
888    }
889    tl_assert(-1 != min_timespan_i);    // Check we found a minimum.
890 
891    // Print remaining snapshots, if necessary.
892    if (VG_(clo_verbosity) > 1) {
893       VERB(2, "Finished culling (%3d of %3d deleted)\n",
894          n_deleted, clo_max_snapshots);
895       for (i = 0; i < next_snapshot_i; i++) {
896          VERB_snapshot(2, "  post-cull", i);
897       }
898       VERB(2, "New time interval = %lld (between snapshots %d and %d)\n",
899          min_timespan, min_timespan_i-1, min_timespan_i);
900    }
901 
902    return min_timespan;
903 }
904 
get_time(void)905 static Time get_time(void)
906 {
907    // Get current time, in whatever time unit we're using.
908    if (clo_time_unit == TimeI) {
909       return guest_instrs_executed;
910    } else if (clo_time_unit == TimeMS) {
911       // Some stuff happens between the millisecond timer being initialised
912       // to zero and us taking our first snapshot.  We determine that time
913       // gap so we can subtract it from all subsequent times so that our
914       // first snapshot is considered to be at t = 0ms.  Unfortunately, a
915       // bunch of symbols get read after the first snapshot is taken but
916       // before the second one (which is triggered by the first allocation),
917       // so when the time-unit is 'ms' we always have a big gap between the
918       // first two snapshots.  But at least users won't have to wonder why
919       // the first snapshot isn't at t=0.
920       static Bool is_first_get_time = True;
921       static Time start_time_ms;
922       if (is_first_get_time) {
923          start_time_ms = VG_(read_millisecond_timer)();
924          is_first_get_time = False;
925          return 0;
926       } else {
927          return VG_(read_millisecond_timer)() - start_time_ms;
928       }
929    } else if (clo_time_unit == TimeB) {
930       return total_allocs_deallocs_szB;
931    } else {
932       tl_assert2(0, "bad --time-unit value");
933    }
934 }
935 
936 // Take a snapshot, and only that -- decisions on whether to take a
937 // snapshot, or what kind of snapshot, are made elsewhere.
938 // Nb: we call the arg "my_time" because "time" shadows a global declaration
939 // in /usr/include/time.h on Darwin.
940 static void
take_snapshot(Snapshot * snapshot,SnapshotKind kind,Time my_time,Bool is_detailed)941 take_snapshot(Snapshot* snapshot, SnapshotKind kind, Time my_time,
942               Bool is_detailed)
943 {
944    tl_assert(!is_snapshot_in_use(snapshot));
945    if (!clo_pages_as_heap) {
946       tl_assert(have_started_executing_code);
947    }
948 
949    // Heap and heap admin.
950    if (clo_heap) {
951       snapshot->heap_szB = heap_szB;
952       if (is_detailed) {
953          snapshot->xt = VG_(XT_snapshot)(heap_xt);
954       }
955       snapshot->heap_extra_szB = heap_extra_szB;
956    }
957 
958    // Stack(s).
959    if (clo_stacks) {
960       snapshot->stacks_szB = stacks_szB;
961    }
962 
963    // Rest of snapshot.
964    snapshot->kind = kind;
965    snapshot->time = my_time;
966    sanity_check_snapshot(snapshot);
967 
968    // Update stats.
969    if (Peak == kind) n_peak_snapshots++;
970    if (is_detailed)  n_detailed_snapshots++;
971    n_real_snapshots++;
972 }
973 
974 
975 // Take a snapshot, if it's time, or if we've hit a peak.
976 static void
maybe_take_snapshot(SnapshotKind kind,const HChar * what)977 maybe_take_snapshot(SnapshotKind kind, const HChar* what)
978 {
979    // 'min_time_interval' is the minimum time interval between snapshots.
980    // If we try to take a snapshot and less than this much time has passed,
981    // we don't take it.  It gets larger as the program runs longer.  It's
982    // initialised to zero so that we begin by taking snapshots as quickly as
983    // possible.
984    static Time min_time_interval = 0;
985    // Zero allows startup snapshot.
986    static Time earliest_possible_time_of_next_snapshot = 0;
987    static Int  n_snapshots_since_last_detailed         = 0;
988    static Int  n_skipped_snapshots_since_last_snapshot = 0;
989 
990    Snapshot* snapshot;
991    Bool      is_detailed;
992    // Nb: we call this variable "my_time" because "time" shadows a global
993    // declaration in /usr/include/time.h on Darwin.
994    Time      my_time = get_time();
995 
996    switch (kind) {
997     case Normal:
998       // Only do a snapshot if it's time.
999       if (my_time < earliest_possible_time_of_next_snapshot) {
1000          n_skipped_snapshots++;
1001          n_skipped_snapshots_since_last_snapshot++;
1002          return;
1003       }
1004       is_detailed = (clo_detailed_freq-1 == n_snapshots_since_last_detailed);
1005       break;
1006 
1007     case Peak: {
1008       // Because we're about to do a deallocation, we're coming down from a
1009       // local peak.  If it is (a) actually a global peak, and (b) a certain
1010       // amount bigger than the previous peak, then we take a peak snapshot.
1011       // By not taking a snapshot for every peak, we save a lot of effort --
1012       // because many peaks remain peak only for a short time.
1013       SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
1014       SizeT excess_szB_for_new_peak =
1015          (SizeT)((peak_snapshot_total_szB * clo_peak_inaccuracy) / 100);
1016       if (total_szB <= peak_snapshot_total_szB + excess_szB_for_new_peak) {
1017          return;
1018       }
1019       is_detailed = True;
1020       break;
1021     }
1022 
1023     default:
1024       tl_assert2(0, "maybe_take_snapshot: unrecognised snapshot kind");
1025    }
1026 
1027    // Take the snapshot.
1028    snapshot = & snapshots[next_snapshot_i];
1029    take_snapshot(snapshot, kind, my_time, is_detailed);
1030 
1031    // Record if it was detailed.
1032    if (is_detailed) {
1033       n_snapshots_since_last_detailed = 0;
1034    } else {
1035       n_snapshots_since_last_detailed++;
1036    }
1037 
1038    // Update peak data, if it's a Peak snapshot.
1039    if (Peak == kind) {
1040       Int i, number_of_peaks_snapshots_found = 0;
1041 
1042       // Sanity check the size, then update our recorded peak.
1043       SizeT snapshot_total_szB =
1044          snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
1045       tl_assert2(snapshot_total_szB > peak_snapshot_total_szB,
1046          "%ld, %ld\n", snapshot_total_szB, peak_snapshot_total_szB);
1047       peak_snapshot_total_szB = snapshot_total_szB;
1048 
1049       // Find the old peak snapshot, if it exists, and mark it as normal.
1050       for (i = 0; i < next_snapshot_i; i++) {
1051          if (Peak == snapshots[i].kind) {
1052             snapshots[i].kind = Normal;
1053             number_of_peaks_snapshots_found++;
1054          }
1055       }
1056       tl_assert(number_of_peaks_snapshots_found <= 1);
1057    }
1058 
1059    // Finish up verbosity and stats stuff.
1060    if (n_skipped_snapshots_since_last_snapshot > 0) {
1061       VERB(2, "  (skipped %d snapshot%s)\n",
1062          n_skipped_snapshots_since_last_snapshot,
1063          ( 1 == n_skipped_snapshots_since_last_snapshot ? "" : "s") );
1064    }
1065    VERB_snapshot(2, what, next_snapshot_i);
1066    n_skipped_snapshots_since_last_snapshot = 0;
1067 
1068    // Cull the entries, if our snapshot table is full.
1069    next_snapshot_i++;
1070    if (clo_max_snapshots == next_snapshot_i) {
1071       min_time_interval = cull_snapshots();
1072    }
1073 
1074    // Work out the earliest time when the next snapshot can happen.
1075    earliest_possible_time_of_next_snapshot = my_time + min_time_interval;
1076 }
1077 
1078 
1079 //------------------------------------------------------------//
1080 //--- Sanity checking                                      ---//
1081 //------------------------------------------------------------//
1082 
ms_cheap_sanity_check(void)1083 static Bool ms_cheap_sanity_check ( void )
1084 {
1085    return True;   // Nothing useful we can cheaply check.
1086 }
1087 
ms_expensive_sanity_check(void)1088 static Bool ms_expensive_sanity_check ( void )
1089 {
1090    tl_assert(heap_xt);
1091    sanity_check_snapshots_array();
1092    return True;
1093 }
1094 
1095 
1096 //------------------------------------------------------------//
1097 //--- Heap management                                      ---//
1098 //------------------------------------------------------------//
1099 
1100 // Metadata for heap blocks.  Each one contains an Xecu,
1101 // which identifies the XTree ec at which it was allocated.  From
1102 // HP_Chunks, XTree ec 'space' field is incremented (at allocation) and
1103 // decremented (at deallocation).
1104 //
1105 // Nb: first two fields must match core's VgHashNode.
1106 typedef
1107    struct _HP_Chunk {
1108       struct _HP_Chunk* next;
1109       Addr              data;       // Ptr to actual block
1110       SizeT             req_szB;    // Size requested
1111       SizeT             slop_szB;   // Extra bytes given above those requested
1112       Xecu              where;      // Where allocated; XTree xecu from heap_xt
1113    }
1114    HP_Chunk;
1115 
1116 /* Pool allocator for HP_Chunk. */
1117 static PoolAlloc *HP_chunk_poolalloc = NULL;
1118 
1119 static VgHashTable *malloc_list  = NULL;   // HP_Chunks
1120 
update_alloc_stats(SSizeT szB_delta)1121 static void update_alloc_stats(SSizeT szB_delta)
1122 {
1123    // Update total_allocs_deallocs_szB.
1124    if (szB_delta < 0) szB_delta = -szB_delta;
1125    total_allocs_deallocs_szB += szB_delta;
1126 }
1127 
update_heap_stats(SSizeT heap_szB_delta,Int heap_extra_szB_delta)1128 static void update_heap_stats(SSizeT heap_szB_delta, Int heap_extra_szB_delta)
1129 {
1130    if (heap_szB_delta < 0)
1131       tl_assert(heap_szB >= -heap_szB_delta);
1132    if (heap_extra_szB_delta < 0)
1133       tl_assert(heap_extra_szB >= -heap_extra_szB_delta);
1134 
1135    heap_extra_szB += heap_extra_szB_delta;
1136    heap_szB       += heap_szB_delta;
1137 
1138    update_alloc_stats(heap_szB_delta + heap_extra_szB_delta);
1139 }
1140 
1141 static
record_block(ThreadId tid,void * p,SizeT req_szB,SizeT slop_szB,Bool exclude_first_entry,Bool maybe_snapshot)1142 void* record_block( ThreadId tid, void* p, SizeT req_szB, SizeT slop_szB,
1143                     Bool exclude_first_entry, Bool maybe_snapshot )
1144 {
1145    // Make new HP_Chunk node, add to malloc_list
1146    HP_Chunk* hc = VG_(allocEltPA)(HP_chunk_poolalloc);
1147    hc->req_szB  = req_szB;
1148    hc->slop_szB = slop_szB;
1149    hc->data     = (Addr)p;
1150    hc->where    = 0;
1151    VG_(HT_add_node)(malloc_list, hc);
1152 
1153    if (clo_heap) {
1154       VERB(3, "<<< record_block (%lu, %lu)\n", req_szB, slop_szB);
1155 
1156       hc->where = add_heap_xt( tid, req_szB, exclude_first_entry);
1157 
1158       if (VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0) {
1159          // Update statistics.
1160          n_heap_allocs++;
1161 
1162          // Update heap stats.
1163          update_heap_stats(req_szB, clo_heap_admin + slop_szB);
1164 
1165          // Maybe take a snapshot.
1166          if (maybe_snapshot) {
1167             maybe_take_snapshot(Normal, "  alloc");
1168          }
1169 
1170       } else {
1171          // Ignored allocation.
1172          n_ignored_heap_allocs++;
1173 
1174          VERB(3, "(ignored)\n");
1175       }
1176 
1177       VERB(3, ">>>\n");
1178    }
1179 
1180    return p;
1181 }
1182 
1183 static __inline__
alloc_and_record_block(ThreadId tid,SizeT req_szB,SizeT req_alignB,Bool is_zeroed)1184 void* alloc_and_record_block ( ThreadId tid, SizeT req_szB, SizeT req_alignB,
1185                                Bool is_zeroed )
1186 {
1187    SizeT actual_szB, slop_szB;
1188    void* p;
1189 
1190    if ((SSizeT)req_szB < 0) return NULL;
1191 
1192    // Allocate and zero if necessary.
1193    p = VG_(cli_malloc)( req_alignB, req_szB );
1194    if (!p) {
1195       return NULL;
1196    }
1197    if (is_zeroed) VG_(memset)(p, 0, req_szB);
1198    actual_szB = VG_(cli_malloc_usable_size)(p);
1199    tl_assert(actual_szB >= req_szB);
1200    slop_szB = actual_szB - req_szB;
1201 
1202    // Record block.
1203    record_block(tid, p, req_szB, slop_szB, /*exclude_first_entry*/True,
1204                 /*maybe_snapshot*/True);
1205 
1206    return p;
1207 }
1208 
1209 static __inline__
unrecord_block(void * p,Bool maybe_snapshot,Bool exclude_first_entry)1210 void unrecord_block ( void* p, Bool maybe_snapshot, Bool exclude_first_entry )
1211 {
1212    // Remove HP_Chunk from malloc_list
1213    HP_Chunk* hc = VG_(HT_remove)(malloc_list, (UWord)p);
1214    if (NULL == hc) {
1215       return;   // must have been a bogus free()
1216    }
1217 
1218    if (clo_heap) {
1219       VERB(3, "<<< unrecord_block\n");
1220 
1221       if (VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0) {
1222          // Update statistics.
1223          n_heap_frees++;
1224 
1225          // Maybe take a peak snapshot, since it's a deallocation.
1226          if (maybe_snapshot) {
1227             maybe_take_snapshot(Peak, "de-PEAK");
1228          }
1229 
1230          // Update heap stats.
1231          update_heap_stats(-hc->req_szB, -clo_heap_admin - hc->slop_szB);
1232 
1233          // Update XTree.
1234          sub_heap_xt(hc->where, hc->req_szB, exclude_first_entry);
1235 
1236          // Maybe take a snapshot.
1237          if (maybe_snapshot) {
1238             maybe_take_snapshot(Normal, "dealloc");
1239          }
1240 
1241       } else {
1242          n_ignored_heap_frees++;
1243 
1244          VERB(3, "(ignored)\n");
1245       }
1246 
1247       VERB(3, ">>> (-%lu, -%lu)\n", hc->req_szB, hc->slop_szB);
1248    }
1249 
1250    // Actually free the chunk, and the heap block (if necessary)
1251    VG_(freeEltPA) (HP_chunk_poolalloc, hc);  hc = NULL;
1252 }
1253 
1254 // Nb: --ignore-fn is tricky for realloc.  If the block's original alloc was
1255 // ignored, but the realloc is not requested to be ignored, and we are
1256 // shrinking the block, then we have to ignore the realloc -- otherwise we
1257 // could end up with negative heap sizes.  This isn't a danger if we are
1258 // growing such a block, but for consistency (it also simplifies things) we
1259 // ignore such reallocs as well.
1260 // PW Nov 2016 xtree work: why can't we just consider that a realloc of an
1261 // ignored  alloc is just a new alloc (i.e. do not remove the old sz from the
1262 // stats). Then everything would be fine, and a non ignored realloc would be
1263 // counted properly.
1264 static __inline__
realloc_block(ThreadId tid,void * p_old,SizeT new_req_szB)1265 void* realloc_block ( ThreadId tid, void* p_old, SizeT new_req_szB )
1266 {
1267    HP_Chunk* hc;
1268    void*     p_new;
1269    SizeT     old_req_szB, old_slop_szB, new_slop_szB, new_actual_szB;
1270    Xecu      old_where;
1271    Bool      is_ignored = False;
1272 
1273    // Remove the old block
1274    hc = VG_(HT_remove)(malloc_list, (UWord)p_old);
1275    if (hc == NULL) {
1276       return NULL;   // must have been a bogus realloc()
1277    }
1278 
1279    old_req_szB  = hc->req_szB;
1280    old_slop_szB = hc->slop_szB;
1281 
1282    tl_assert(!clo_pages_as_heap);  // Shouldn't be here if --pages-as-heap=yes.
1283    if (clo_heap) {
1284       VERB(3, "<<< realloc_block (%lu)\n", new_req_szB);
1285 
1286       if (VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0) {
1287          // Update statistics.
1288          n_heap_reallocs++;
1289 
1290          // Maybe take a peak snapshot, if it's (effectively) a deallocation.
1291          if (new_req_szB < old_req_szB) {
1292             maybe_take_snapshot(Peak, "re-PEAK");
1293          }
1294       } else {
1295          // The original malloc was ignored, so we have to ignore the
1296          // realloc as well.
1297          is_ignored = True;
1298       }
1299    }
1300 
1301    // Actually do the allocation, if necessary.
1302    if (new_req_szB <= old_req_szB + old_slop_szB) {
1303       // New size is smaller or same;  block not moved.
1304       p_new = p_old;
1305       new_slop_szB = old_slop_szB + (old_req_szB - new_req_szB);
1306 
1307    } else {
1308       // New size is bigger;  make new block, copy shared contents, free old.
1309       p_new = VG_(cli_malloc)(VG_(clo_alignment), new_req_szB);
1310       if (!p_new) {
1311          // Nb: if realloc fails, NULL is returned but the old block is not
1312          // touched.  What an awful function.
1313          return NULL;
1314       }
1315       VG_(memcpy)(p_new, p_old, old_req_szB + old_slop_szB);
1316       VG_(cli_free)(p_old);
1317       new_actual_szB = VG_(cli_malloc_usable_size)(p_new);
1318       tl_assert(new_actual_szB >= new_req_szB);
1319       new_slop_szB = new_actual_szB - new_req_szB;
1320    }
1321 
1322    if (p_new) {
1323       // Update HP_Chunk.
1324       hc->data     = (Addr)p_new;
1325       hc->req_szB  = new_req_szB;
1326       hc->slop_szB = new_slop_szB;
1327       old_where    = hc->where;
1328       hc->where    = 0;
1329 
1330       // Update XTree.
1331       if (clo_heap) {
1332          hc->where = add_heap_xt( tid, new_req_szB,
1333                                   /*exclude_first_entry*/True);
1334          if (!is_ignored && VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0) {
1335             sub_heap_xt(old_where, old_req_szB, /*exclude_first_entry*/True);
1336          } else {
1337             // The realloc itself is ignored.
1338             is_ignored = True;
1339 
1340             /* XTREE??? hack to have something compatible with pre
1341                m_xtree massif: if the previous alloc/realloc was
1342                ignored, and this one is not ignored, then keep the
1343                previous where, to continue marking this memory as
1344                ignored. */
1345             if (VG_(XT_n_ips_sel)(heap_xt, hc->where) > 0
1346                 && VG_(XT_n_ips_sel)(heap_xt, old_where) == 0)
1347                hc->where = old_where;
1348 
1349             // Update statistics.
1350             n_ignored_heap_reallocs++;
1351          }
1352       }
1353    }
1354 
1355    // Now insert the new hc (with a possibly new 'data' field) into
1356    // malloc_list.  If this realloc() did not increase the memory size, we
1357    // will have removed and then re-added hc unnecessarily.  But that's ok
1358    // because shrinking a block with realloc() is (presumably) much rarer
1359    // than growing it, and this way simplifies the growing case.
1360    VG_(HT_add_node)(malloc_list, hc);
1361 
1362    if (clo_heap) {
1363       if (!is_ignored) {
1364          // Update heap stats.
1365          update_heap_stats(new_req_szB - old_req_szB,
1366                            new_slop_szB - old_slop_szB);
1367 
1368          // Maybe take a snapshot.
1369          maybe_take_snapshot(Normal, "realloc");
1370       } else {
1371 
1372          VERB(3, "(ignored)\n");
1373       }
1374 
1375       VERB(3, ">>> (%ld, %ld)\n",
1376            (SSizeT)(new_req_szB - old_req_szB),
1377            (SSizeT)(new_slop_szB - old_slop_szB));
1378    }
1379 
1380    return p_new;
1381 }
1382 
1383 
1384 //------------------------------------------------------------//
1385 //--- malloc() et al replacement wrappers                  ---//
1386 //------------------------------------------------------------//
1387 
ms_malloc(ThreadId tid,SizeT szB)1388 static void* ms_malloc ( ThreadId tid, SizeT szB )
1389 {
1390    return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1391 }
1392 
ms___builtin_new(ThreadId tid,SizeT szB)1393 static void* ms___builtin_new ( ThreadId tid, SizeT szB )
1394 {
1395    return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1396 }
1397 
ms___builtin_vec_new(ThreadId tid,SizeT szB)1398 static void* ms___builtin_vec_new ( ThreadId tid, SizeT szB )
1399 {
1400    return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1401 }
1402 
ms_calloc(ThreadId tid,SizeT m,SizeT szB)1403 static void* ms_calloc ( ThreadId tid, SizeT m, SizeT szB )
1404 {
1405    return alloc_and_record_block( tid, m*szB, VG_(clo_alignment), /*is_zeroed*/True );
1406 }
1407 
ms_memalign(ThreadId tid,SizeT alignB,SizeT szB)1408 static void *ms_memalign ( ThreadId tid, SizeT alignB, SizeT szB )
1409 {
1410    return alloc_and_record_block( tid, szB, alignB, False );
1411 }
1412 
ms_free(ThreadId tid,void * p)1413 static void ms_free ( ThreadId tid __attribute__((unused)), void* p )
1414 {
1415    unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/True);
1416    VG_(cli_free)(p);
1417 }
1418 
ms___builtin_delete(ThreadId tid,void * p)1419 static void ms___builtin_delete ( ThreadId tid, void* p )
1420 {
1421    unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/True);
1422    VG_(cli_free)(p);
1423 }
1424 
ms___builtin_vec_delete(ThreadId tid,void * p)1425 static void ms___builtin_vec_delete ( ThreadId tid, void* p )
1426 {
1427    unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/True);
1428    VG_(cli_free)(p);
1429 }
1430 
ms_realloc(ThreadId tid,void * p_old,SizeT new_szB)1431 static void* ms_realloc ( ThreadId tid, void* p_old, SizeT new_szB )
1432 {
1433    return realloc_block(tid, p_old, new_szB);
1434 }
1435 
ms_malloc_usable_size(ThreadId tid,void * p)1436 static SizeT ms_malloc_usable_size ( ThreadId tid, void* p )
1437 {
1438    HP_Chunk* hc = VG_(HT_lookup)( malloc_list, (UWord)p );
1439 
1440    return ( hc ? hc->req_szB + hc->slop_szB : 0 );
1441 }
1442 
1443 //------------------------------------------------------------//
1444 //--- Page handling                                        ---//
1445 //------------------------------------------------------------//
1446 
1447 static
ms_record_page_mem(Addr a,SizeT len)1448 void ms_record_page_mem ( Addr a, SizeT len )
1449 {
1450    ThreadId tid = VG_(get_running_tid)();
1451    Addr end;
1452    tl_assert(VG_IS_PAGE_ALIGNED(len));
1453    tl_assert(len >= VKI_PAGE_SIZE);
1454    // Record the first N-1 pages as blocks, but don't do any snapshots.
1455    for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
1456       record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
1457                     /*exclude_first_entry*/False, /*maybe_snapshot*/False );
1458    }
1459    // Record the last page as a block, and maybe do a snapshot afterwards.
1460    record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
1461                  /*exclude_first_entry*/False, /*maybe_snapshot*/True );
1462 }
1463 
1464 static
ms_unrecord_page_mem(Addr a,SizeT len)1465 void ms_unrecord_page_mem( Addr a, SizeT len )
1466 {
1467    Addr end;
1468    tl_assert(VG_IS_PAGE_ALIGNED(len));
1469    tl_assert(len >= VKI_PAGE_SIZE);
1470    // Unrecord the first page. This might be the peak, so do a snapshot.
1471    unrecord_block((void*)a, /*maybe_snapshot*/True,
1472                   /*exclude_first_entry*/False);
1473    a += VKI_PAGE_SIZE;
1474    // Then unrecord the remaining pages, but without snapshots.
1475    for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
1476       unrecord_block((void*)a, /*maybe_snapshot*/False,
1477                      /*exclude_first_entry*/False);
1478    }
1479 }
1480 
1481 //------------------------------------------------------------//
1482 
1483 static
ms_new_mem_mmap(Addr a,SizeT len,Bool rr,Bool ww,Bool xx,ULong di_handle)1484 void ms_new_mem_mmap ( Addr a, SizeT len,
1485                        Bool rr, Bool ww, Bool xx, ULong di_handle )
1486 {
1487    tl_assert(VG_IS_PAGE_ALIGNED(len));
1488    ms_record_page_mem(a, len);
1489 }
1490 
1491 static
ms_new_mem_startup(Addr a,SizeT len,Bool rr,Bool ww,Bool xx,ULong di_handle)1492 void ms_new_mem_startup( Addr a, SizeT len,
1493                          Bool rr, Bool ww, Bool xx, ULong di_handle )
1494 {
1495    // startup maps are always be page-sized, except the trampoline page is
1496    // marked by the core as only being the size of the trampoline itself,
1497    // which is something like 57 bytes.  Round it up to page size.
1498    len = VG_PGROUNDUP(len);
1499    ms_record_page_mem(a, len);
1500 }
1501 
1502 static
ms_new_mem_brk(Addr a,SizeT len,ThreadId tid)1503 void ms_new_mem_brk ( Addr a, SizeT len, ThreadId tid )
1504 {
1505    // brk limit is not necessarily aligned on a page boundary.
1506    // If new memory being brk-ed implies to allocate a new page,
1507    // then call ms_record_page_mem with page aligned parameters
1508    // otherwise just ignore.
1509    Addr old_bottom_page = VG_PGROUNDDN(a - 1);
1510    Addr new_top_page = VG_PGROUNDDN(a + len - 1);
1511    if (old_bottom_page != new_top_page)
1512       ms_record_page_mem(VG_PGROUNDDN(a),
1513                          (new_top_page - old_bottom_page));
1514 }
1515 
1516 static
ms_copy_mem_remap(Addr from,Addr to,SizeT len)1517 void ms_copy_mem_remap( Addr from, Addr to, SizeT len)
1518 {
1519    tl_assert(VG_IS_PAGE_ALIGNED(len));
1520    ms_unrecord_page_mem(from, len);
1521    ms_record_page_mem(to, len);
1522 }
1523 
1524 static
ms_die_mem_munmap(Addr a,SizeT len)1525 void ms_die_mem_munmap( Addr a, SizeT len )
1526 {
1527    tl_assert(VG_IS_PAGE_ALIGNED(len));
1528    ms_unrecord_page_mem(a, len);
1529 }
1530 
1531 static
ms_die_mem_brk(Addr a,SizeT len)1532 void ms_die_mem_brk( Addr a, SizeT len )
1533 {
1534    // Call ms_unrecord_page_mem only if one or more pages are de-allocated.
1535    // See ms_new_mem_brk for more details.
1536    Addr new_bottom_page = VG_PGROUNDDN(a - 1);
1537    Addr old_top_page = VG_PGROUNDDN(a + len - 1);
1538    if (old_top_page != new_bottom_page)
1539       ms_unrecord_page_mem(VG_PGROUNDDN(a),
1540                            (old_top_page - new_bottom_page));
1541 
1542 }
1543 
1544 //------------------------------------------------------------//
1545 //--- Stacks                                               ---//
1546 //------------------------------------------------------------//
1547 
1548 // We really want the inlining to occur...
1549 #define INLINE    inline __attribute__((always_inline))
1550 
update_stack_stats(SSizeT stack_szB_delta)1551 static void update_stack_stats(SSizeT stack_szB_delta)
1552 {
1553    if (stack_szB_delta < 0) tl_assert(stacks_szB >= -stack_szB_delta);
1554    stacks_szB += stack_szB_delta;
1555 
1556    update_alloc_stats(stack_szB_delta);
1557 }
1558 
new_mem_stack_2(SizeT len,const HChar * what)1559 static INLINE void new_mem_stack_2(SizeT len, const HChar* what)
1560 {
1561    if (have_started_executing_code) {
1562       VERB(3, "<<< new_mem_stack (%lu)\n", len);
1563       n_stack_allocs++;
1564       update_stack_stats(len);
1565       maybe_take_snapshot(Normal, what);
1566       VERB(3, ">>>\n");
1567    }
1568 }
1569 
die_mem_stack_2(SizeT len,const HChar * what)1570 static INLINE void die_mem_stack_2(SizeT len, const HChar* what)
1571 {
1572    if (have_started_executing_code) {
1573       VERB(3, "<<< die_mem_stack (-%lu)\n", len);
1574       n_stack_frees++;
1575       maybe_take_snapshot(Peak,   "stkPEAK");
1576       update_stack_stats(-len);
1577       maybe_take_snapshot(Normal, what);
1578       VERB(3, ">>>\n");
1579    }
1580 }
1581 
new_mem_stack(Addr a,SizeT len)1582 static void new_mem_stack(Addr a, SizeT len)
1583 {
1584    new_mem_stack_2(len, "stk-new");
1585 }
1586 
die_mem_stack(Addr a,SizeT len)1587 static void die_mem_stack(Addr a, SizeT len)
1588 {
1589    die_mem_stack_2(len, "stk-die");
1590 }
1591 
new_mem_stack_signal(Addr a,SizeT len,ThreadId tid)1592 static void new_mem_stack_signal(Addr a, SizeT len, ThreadId tid)
1593 {
1594    new_mem_stack_2(len, "sig-new");
1595 }
1596 
die_mem_stack_signal(Addr a,SizeT len)1597 static void die_mem_stack_signal(Addr a, SizeT len)
1598 {
1599    die_mem_stack_2(len, "sig-die");
1600 }
1601 
1602 
1603 //------------------------------------------------------------//
1604 //--- Client Requests                                      ---//
1605 //------------------------------------------------------------//
1606 
print_monitor_help(void)1607 static void print_monitor_help ( void )
1608 {
1609    VG_(gdb_printf) (
1610 "\n"
1611 "massif monitor commands:\n"
1612 "  snapshot [<filename>]\n"
1613 "  detailed_snapshot [<filename>]\n"
1614 "      takes a snapshot (or a detailed snapshot)\n"
1615 "      and saves it in <filename>\n"
1616 "             default <filename> is massif.vgdb.out\n"
1617 "  all_snapshots [<filename>]\n"
1618 "      saves all snapshot(s) taken so far in <filename>\n"
1619 "             default <filename> is massif.vgdb.out\n"
1620 "  xtmemory [<filename>]\n"
1621 "        dump xtree memory profile in <filename> (default xtmemory.kcg.%%p.%%n)\n"
1622 "\n");
1623 }
1624 
1625 
1626 /* Forward declaration.
1627    return True if request recognised, False otherwise */
1628 static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req);
ms_handle_client_request(ThreadId tid,UWord * argv,UWord * ret)1629 static Bool ms_handle_client_request ( ThreadId tid, UWord* argv, UWord* ret )
1630 {
1631    switch (argv[0]) {
1632    case VG_USERREQ__MALLOCLIKE_BLOCK: {
1633       void* p   = (void*)argv[1];
1634       SizeT szB =        argv[2];
1635       record_block( tid, p, szB, /*slop_szB*/0, /*exclude_first_entry*/False,
1636                     /*maybe_snapshot*/True );
1637       *ret = 0;
1638       return True;
1639    }
1640    case VG_USERREQ__RESIZEINPLACE_BLOCK: {
1641       void* p        = (void*)argv[1];
1642       SizeT newSizeB =       argv[3];
1643 
1644       unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/False);
1645       record_block(tid, p, newSizeB, /*slop_szB*/0,
1646                    /*exclude_first_entry*/False, /*maybe_snapshot*/True);
1647       return True;
1648    }
1649    case VG_USERREQ__FREELIKE_BLOCK: {
1650       void* p = (void*)argv[1];
1651       unrecord_block(p, /*maybe_snapshot*/True, /*exclude_first_entry*/False);
1652       *ret = 0;
1653       return True;
1654    }
1655    case VG_USERREQ__GDB_MONITOR_COMMAND: {
1656      Bool handled = handle_gdb_monitor_command (tid, (HChar*)argv[1]);
1657      if (handled)
1658        *ret = 1;
1659      else
1660        *ret = 0;
1661      return handled;
1662    }
1663 
1664    default:
1665       *ret = 0;
1666       return False;
1667    }
1668 }
1669 
1670 //------------------------------------------------------------//
1671 //--- Instrumentation                                      ---//
1672 //------------------------------------------------------------//
1673 
add_counter_update(IRSB * sbOut,Int n)1674 static void add_counter_update(IRSB* sbOut, Int n)
1675 {
1676    #if defined(VG_BIGENDIAN)
1677    # define END Iend_BE
1678    #elif defined(VG_LITTLEENDIAN)
1679    # define END Iend_LE
1680    #else
1681    # error "Unknown endianness"
1682    #endif
1683    // Add code to increment 'guest_instrs_executed' by 'n', like this:
1684    //   WrTmp(t1, Load64(&guest_instrs_executed))
1685    //   WrTmp(t2, Add64(RdTmp(t1), Const(n)))
1686    //   Store(&guest_instrs_executed, t2)
1687    IRTemp t1 = newIRTemp(sbOut->tyenv, Ity_I64);
1688    IRTemp t2 = newIRTemp(sbOut->tyenv, Ity_I64);
1689    IRExpr* counter_addr = mkIRExpr_HWord( (HWord)&guest_instrs_executed );
1690 
1691    IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(END, Ity_I64, counter_addr));
1692    IRStmt* st2 =
1693       IRStmt_WrTmp(t2,
1694                    IRExpr_Binop(Iop_Add64, IRExpr_RdTmp(t1),
1695                                            IRExpr_Const(IRConst_U64(n))));
1696    IRStmt* st3 = IRStmt_Store(END, counter_addr, IRExpr_RdTmp(t2));
1697 
1698    addStmtToIRSB( sbOut, st1 );
1699    addStmtToIRSB( sbOut, st2 );
1700    addStmtToIRSB( sbOut, st3 );
1701 }
1702 
ms_instrument2(IRSB * sbIn)1703 static IRSB* ms_instrument2( IRSB* sbIn )
1704 {
1705    Int   i, n = 0;
1706    IRSB* sbOut;
1707 
1708    // We increment the instruction count in two places:
1709    // - just before any Ist_Exit statements;
1710    // - just before the IRSB's end.
1711    // In the former case, we zero 'n' and then continue instrumenting.
1712 
1713    sbOut = deepCopyIRSBExceptStmts(sbIn);
1714 
1715    for (i = 0; i < sbIn->stmts_used; i++) {
1716       IRStmt* st = sbIn->stmts[i];
1717 
1718       if (!st || st->tag == Ist_NoOp) continue;
1719 
1720       if (st->tag == Ist_IMark) {
1721          n++;
1722       } else if (st->tag == Ist_Exit) {
1723          if (n > 0) {
1724             // Add an increment before the Exit statement, then reset 'n'.
1725             add_counter_update(sbOut, n);
1726             n = 0;
1727          }
1728       }
1729       addStmtToIRSB( sbOut, st );
1730    }
1731 
1732    if (n > 0) {
1733       // Add an increment before the SB end.
1734       add_counter_update(sbOut, n);
1735    }
1736    return sbOut;
1737 }
1738 
1739 static
ms_instrument(VgCallbackClosure * closure,IRSB * sbIn,const VexGuestLayout * layout,const VexGuestExtents * vge,const VexArchInfo * archinfo_host,IRType gWordTy,IRType hWordTy)1740 IRSB* ms_instrument ( VgCallbackClosure* closure,
1741                       IRSB* sbIn,
1742                       const VexGuestLayout* layout,
1743                       const VexGuestExtents* vge,
1744                       const VexArchInfo* archinfo_host,
1745                       IRType gWordTy, IRType hWordTy )
1746 {
1747    if (! have_started_executing_code) {
1748       // Do an initial sample to guarantee that we have at least one.
1749       // We use 'maybe_take_snapshot' instead of 'take_snapshot' to ensure
1750       // 'maybe_take_snapshot's internal static variables are initialised.
1751       have_started_executing_code = True;
1752       maybe_take_snapshot(Normal, "startup");
1753    }
1754 
1755    if      (clo_time_unit == TimeI)  { return ms_instrument2(sbIn); }
1756    else if (clo_time_unit == TimeMS) { return sbIn; }
1757    else if (clo_time_unit == TimeB)  { return sbIn; }
1758    else                              { tl_assert2(0, "bad --time-unit value"); }
1759 }
1760 
1761 
1762 //------------------------------------------------------------//
1763 //--- Writing snapshots                                    ---//
1764 //------------------------------------------------------------//
1765 
pp_snapshot(MsFile * fp,Snapshot * snapshot,Int snapshot_n)1766 static void pp_snapshot(MsFile *fp, Snapshot* snapshot, Int snapshot_n)
1767 {
1768    const Massif_Header header = (Massif_Header) {
1769       .snapshot_n    = snapshot_n,
1770       .time          = snapshot->time,
1771       .sz_B          = snapshot->heap_szB,
1772       .extra_B       = snapshot->heap_extra_szB,
1773       .stacks_B      = snapshot->stacks_szB,
1774       .detailed      = is_detailed_snapshot(snapshot),
1775       .peak          = Peak == snapshot->kind,
1776       .top_node_desc = clo_pages_as_heap ?
1777         "(page allocation syscalls) mmap/mremap/brk, --alloc-fns, etc."
1778         : "(heap allocation functions) malloc/new/new[], --alloc-fns, etc.",
1779       .sig_threshold = clo_threshold
1780    };
1781 
1782    sanity_check_snapshot(snapshot);
1783 
1784    VG_(XT_massif_print)(fp, snapshot->xt, &header, alloc_szB);
1785 }
1786 
write_snapshots_to_file(const HChar * massif_out_file,Snapshot snapshots_array[],Int nr_elements)1787 static void write_snapshots_to_file(const HChar* massif_out_file,
1788                                     Snapshot snapshots_array[],
1789                                     Int nr_elements)
1790 {
1791    Int i;
1792    MsFile *fp;
1793 
1794    fp = VG_(XT_massif_open)(massif_out_file,
1795                             NULL,
1796                             args_for_massif,
1797                             TimeUnit_to_string(clo_time_unit));
1798    if (fp == NULL)
1799       return; // Error reported by VG_(XT_massif_open)
1800 
1801    for (i = 0; i < nr_elements; i++) {
1802       Snapshot* snapshot = & snapshots_array[i];
1803       pp_snapshot(fp, snapshot, i);     // Detailed snapshot!
1804    }
1805    VG_(XT_massif_close) (fp);
1806 }
1807 
write_snapshots_array_to_file(void)1808 static void write_snapshots_array_to_file(void)
1809 {
1810    // Setup output filename.  Nb: it's important to do this now, ie. as late
1811    // as possible.  If we do it at start-up and the program forks and the
1812    // output file format string contains a %p (pid) specifier, both the
1813    // parent and child will incorrectly write to the same file;  this
1814    // happened in 3.3.0.
1815    HChar* massif_out_file =
1816       VG_(expand_file_name)("--massif-out-file", clo_massif_out_file);
1817    write_snapshots_to_file (massif_out_file, snapshots, next_snapshot_i);
1818    VG_(free)(massif_out_file);
1819 }
1820 
handle_snapshot_monitor_command(const HChar * filename,Bool detailed)1821 static void handle_snapshot_monitor_command (const HChar *filename,
1822                                              Bool detailed)
1823 {
1824    Snapshot snapshot;
1825 
1826    if (!clo_pages_as_heap && !have_started_executing_code) {
1827       // See comments of variable have_started_executing_code.
1828       VG_(gdb_printf)
1829          ("error: cannot take snapshot before execution has started\n");
1830       return;
1831    }
1832 
1833    clear_snapshot(&snapshot, /* do_sanity_check */ False);
1834    take_snapshot(&snapshot, Normal, get_time(), detailed);
1835    write_snapshots_to_file ((filename == NULL) ?
1836                             "massif.vgdb.out" : filename,
1837                             &snapshot,
1838                             1);
1839    delete_snapshot(&snapshot);
1840 }
1841 
handle_all_snapshots_monitor_command(const HChar * filename)1842 static void handle_all_snapshots_monitor_command (const HChar *filename)
1843 {
1844    if (!clo_pages_as_heap && !have_started_executing_code) {
1845       // See comments of variable have_started_executing_code.
1846       VG_(gdb_printf)
1847          ("error: cannot take snapshot before execution has started\n");
1848       return;
1849    }
1850 
1851    write_snapshots_to_file ((filename == NULL) ?
1852                             "massif.vgdb.out" : filename,
1853                             snapshots, next_snapshot_i);
1854 }
1855 
xtmemory_report_next_block(XT_Allocs * xta,ExeContext ** ec_alloc)1856 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
1857 {
1858    const HP_Chunk* hc = VG_(HT_Next)(malloc_list);
1859    if (hc) {
1860       xta->nbytes = hc->req_szB;
1861       xta->nblocks = 1;
1862       *ec_alloc = VG_(XT_get_ec_from_xecu)(heap_xt, hc->where);
1863    } else
1864       xta->nblocks = 0;
1865 }
ms_xtmemory_report(const HChar * filename,Bool fini)1866 static void ms_xtmemory_report ( const HChar* filename, Bool fini )
1867 {
1868    // Make xtmemory_report_next_block ready to be called.
1869    VG_(HT_ResetIter)(malloc_list);
1870    VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
1871                         VG_(XT_filter_maybe_below_main));
1872    /* As massif already filters one top function, use as filter
1873       VG_(XT_filter_maybe_below_main). */
1874 }
1875 
handle_gdb_monitor_command(ThreadId tid,HChar * req)1876 static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
1877 {
1878    HChar* wcmd;
1879    HChar s[VG_(strlen)(req) + 1]; /* copy for strtok_r */
1880    HChar *ssaveptr;
1881 
1882    VG_(strcpy) (s, req);
1883 
1884    wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
1885    switch (VG_(keyword_id) ("help snapshot detailed_snapshot all_snapshots"
1886                             " xtmemory",
1887                             wcmd, kwd_report_duplicated_matches)) {
1888    case -2: /* multiple matches */
1889       return True;
1890    case -1: /* not found */
1891       return False;
1892    case  0: /* help */
1893       print_monitor_help();
1894       return True;
1895    case  1: { /* snapshot */
1896       HChar* filename;
1897       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
1898       handle_snapshot_monitor_command (filename, False /* detailed */);
1899       return True;
1900    }
1901    case  2: { /* detailed_snapshot */
1902       HChar* filename;
1903       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
1904       handle_snapshot_monitor_command (filename, True /* detailed */);
1905       return True;
1906    }
1907    case  3: { /* all_snapshots */
1908       HChar* filename;
1909       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
1910       handle_all_snapshots_monitor_command (filename);
1911       return True;
1912    }
1913    case  4: { /* xtmemory */
1914       HChar* filename;
1915       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
1916       ms_xtmemory_report (filename, False);
1917       return True;
1918    }
1919    default:
1920       tl_assert(0);
1921       return False;
1922    }
1923 }
1924 
ms_print_stats(void)1925 static void ms_print_stats (void)
1926 {
1927 #define STATS(format, args...) \
1928       VG_(dmsg)("Massif: " format, ##args)
1929 
1930    STATS("heap allocs:           %u\n", n_heap_allocs);
1931    STATS("heap reallocs:         %u\n", n_heap_reallocs);
1932    STATS("heap frees:            %u\n", n_heap_frees);
1933    STATS("ignored heap allocs:   %u\n", n_ignored_heap_allocs);
1934    STATS("ignored heap frees:    %u\n", n_ignored_heap_frees);
1935    STATS("ignored heap reallocs: %u\n", n_ignored_heap_reallocs);
1936    STATS("stack allocs:          %u\n", n_stack_allocs);
1937    STATS("skipped snapshots:     %u\n", n_skipped_snapshots);
1938    STATS("real snapshots:        %u\n", n_real_snapshots);
1939    STATS("detailed snapshots:    %u\n", n_detailed_snapshots);
1940    STATS("peak snapshots:        %u\n", n_peak_snapshots);
1941    STATS("cullings:              %u\n", n_cullings);
1942 #undef STATS
1943 }
1944 
1945 
1946 //------------------------------------------------------------//
1947 //--- Finalisation                                         ---//
1948 //------------------------------------------------------------//
1949 
ms_fini(Int exit_status)1950 static void ms_fini(Int exit_status)
1951 {
1952    ms_xtmemory_report(VG_(clo_xtree_memory_file), True);
1953 
1954    // Output.
1955    write_snapshots_array_to_file();
1956 
1957    if (VG_(clo_stats))
1958       ms_print_stats();
1959 }
1960 
1961 
1962 //------------------------------------------------------------//
1963 //--- Initialisation                                       ---//
1964 //------------------------------------------------------------//
1965 
ms_post_clo_init(void)1966 static void ms_post_clo_init(void)
1967 {
1968    Int i;
1969    HChar* LD_PRELOAD_val;
1970 
1971    /* We will record execontext up to clo_depth + overestimate and
1972       we will store this as ec => we need to increase the backtrace size
1973       if smaller than what we will store. */
1974    if (VG_(clo_backtrace_size) < clo_depth + MAX_OVERESTIMATE)
1975       VG_(clo_backtrace_size) = clo_depth + MAX_OVERESTIMATE;
1976 
1977    // Check options.
1978    if (clo_pages_as_heap) {
1979       if (clo_stacks) {
1980          VG_(fmsg_bad_option)("--pages-as-heap=yes",
1981             "Cannot be used together with --stacks=yes");
1982       }
1983    }
1984    if (!clo_heap) {
1985       clo_pages_as_heap = False;
1986    }
1987 
1988    // If --pages-as-heap=yes we don't want malloc replacement to occur.  So we
1989    // disable vgpreload_massif-$PLATFORM.so by removing it from LD_PRELOAD (or
1990    // platform-equivalent). This is a bit of a hack, but LD_PRELOAD is setup
1991    // well before tool initialisation, so this seems the best way to do it.
1992    if (clo_pages_as_heap) {
1993       HChar* s1;
1994       HChar* s2;
1995 
1996       clo_heap_admin = 0;     // No heap admin on pages.
1997 
1998       LD_PRELOAD_val = VG_(getenv)( VG_(LD_PRELOAD_var_name) );
1999       tl_assert(LD_PRELOAD_val);
2000 
2001       VERB(2, "clo_pages_as_heap orig LD_PRELOAD '%s'\n", LD_PRELOAD_val);
2002 
2003       // Make sure the vgpreload_core-$PLATFORM entry is there, for sanity.
2004       s1 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_core");
2005       tl_assert(s1);
2006 
2007       // Now find the vgpreload_massif-$PLATFORM entry.
2008       s1 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_massif");
2009       tl_assert(s1);
2010       s2 = s1;
2011 
2012       // Position s1 on the previous ':', which must be there because
2013       // of the preceding vgpreload_core-$PLATFORM entry.
2014       for (; *s1 != ':'; s1--)
2015          ;
2016 
2017       // Position s2 on the next ':' or \0
2018       for (; *s2 != ':' && *s2 != '\0'; s2++)
2019          ;
2020 
2021       // Move all characters from s2 to s1
2022       while ((*s1++ = *s2++))
2023          ;
2024 
2025       VERB(2, "clo_pages_as_heap cleaned LD_PRELOAD '%s'\n", LD_PRELOAD_val);
2026    }
2027 
2028    // Print alloc-fns and ignore-fns, if necessary.
2029    if (VG_(clo_verbosity) > 1) {
2030       VERB(1, "alloc-fns:\n");
2031       for (i = 0; i < VG_(sizeXA)(alloc_fns); i++) {
2032          HChar** fn_ptr = VG_(indexXA)(alloc_fns, i);
2033          VERB(1, "  %s\n", *fn_ptr);
2034       }
2035 
2036       VERB(1, "ignore-fns:\n");
2037       if (0 == VG_(sizeXA)(ignore_fns)) {
2038          VERB(1, "  <empty>\n");
2039       }
2040       for (i = 0; i < VG_(sizeXA)(ignore_fns); i++) {
2041          HChar** fn_ptr = VG_(indexXA)(ignore_fns, i);
2042          VERB(1, "  %d: %s\n", i, *fn_ptr);
2043       }
2044    }
2045 
2046    // Events to track.
2047    if (clo_stacks) {
2048       VG_(track_new_mem_stack)        ( new_mem_stack        );
2049       VG_(track_die_mem_stack)        ( die_mem_stack        );
2050       VG_(track_new_mem_stack_signal) ( new_mem_stack_signal );
2051       VG_(track_die_mem_stack_signal) ( die_mem_stack_signal );
2052    }
2053 
2054    if (clo_pages_as_heap) {
2055       VG_(track_new_mem_startup) ( ms_new_mem_startup );
2056       VG_(track_new_mem_brk)     ( ms_new_mem_brk     );
2057       VG_(track_new_mem_mmap)    ( ms_new_mem_mmap    );
2058 
2059       VG_(track_copy_mem_remap)  ( ms_copy_mem_remap  );
2060 
2061       VG_(track_die_mem_brk)     ( ms_die_mem_brk     );
2062       VG_(track_die_mem_munmap)  ( ms_die_mem_munmap  );
2063    }
2064 
2065    // Initialise snapshot array, and sanity-check it.
2066    snapshots = VG_(malloc)("ms.main.mpoci.1",
2067                            sizeof(Snapshot) * clo_max_snapshots);
2068    // We don't want to do snapshot sanity checks here, because they're
2069    // currently uninitialised.
2070    for (i = 0; i < clo_max_snapshots; i++) {
2071       clear_snapshot( & snapshots[i], /*do_sanity_check*/False );
2072    }
2073    sanity_check_snapshots_array();
2074 
2075    if (VG_(clo_xtree_memory) == Vg_XTMemory_Full)
2076       // Activate full xtree memory profiling.
2077       // As massif already filters one top function, use as filter
2078       // VG_(XT_filter_maybe_below_main).
2079       VG_(XTMemory_Full_init)(VG_(XT_filter_maybe_below_main));
2080 
2081 }
2082 
ms_pre_clo_init(void)2083 static void ms_pre_clo_init(void)
2084 {
2085    VG_(details_name)            ("Massif");
2086    VG_(details_version)         (NULL);
2087    VG_(details_description)     ("a heap profiler");
2088    VG_(details_copyright_author)(
2089       "Copyright (C) 2003-2017, and GNU GPL'd, by Nicholas Nethercote");
2090    VG_(details_bug_reports_to)  (VG_BUGS_TO);
2091 
2092    VG_(details_avg_translation_sizeB) ( 330 );
2093 
2094    VG_(clo_vex_control).iropt_register_updates_default
2095       = VG_(clo_px_file_backed)
2096       = VexRegUpdSpAtMemAccess; // overridable by the user.
2097 
2098    // Basic functions.
2099    VG_(basic_tool_funcs)          (ms_post_clo_init,
2100                                    ms_instrument,
2101                                    ms_fini);
2102 
2103    // Needs.
2104    VG_(needs_libc_freeres)();
2105    VG_(needs_cxx_freeres)();
2106    VG_(needs_command_line_options)(ms_process_cmd_line_option,
2107                                    ms_print_usage,
2108                                    ms_print_debug_usage);
2109    VG_(needs_client_requests)     (ms_handle_client_request);
2110    VG_(needs_sanity_checks)       (ms_cheap_sanity_check,
2111                                    ms_expensive_sanity_check);
2112    VG_(needs_print_stats)         (ms_print_stats);
2113    VG_(needs_malloc_replacement)  (ms_malloc,
2114                                    ms___builtin_new,
2115                                    ms___builtin_vec_new,
2116                                    ms_memalign,
2117                                    ms_calloc,
2118                                    ms_free,
2119                                    ms___builtin_delete,
2120                                    ms___builtin_vec_delete,
2121                                    ms_realloc,
2122                                    ms_malloc_usable_size,
2123                                    0 );
2124 
2125    // HP_Chunks.
2126    HP_chunk_poolalloc = VG_(newPA)
2127       (sizeof(HP_Chunk),
2128        1000,
2129        VG_(malloc),
2130        "massif MC_Chunk pool",
2131        VG_(free));
2132    malloc_list = VG_(HT_construct)( "Massif's malloc list" );
2133 
2134    // Heap XTree
2135    heap_xt = VG_(XT_create)(VG_(malloc),
2136                             "ms.xtrees",
2137                             VG_(free),
2138                             sizeof(SizeT),
2139                             init_szB, add_szB, sub_szB,
2140                             filter_IPs);
2141 
2142    // Initialise alloc_fns and ignore_fns.
2143    init_alloc_fns();
2144    init_ignore_fns();
2145 
2146    // Initialise args_for_massif.
2147    args_for_massif = VG_(newXA)(VG_(malloc), "ms.main.mprci.1",
2148                                 VG_(free), sizeof(HChar*));
2149 }
2150 
2151 VG_DETERMINE_INTERFACE_VERSION(ms_pre_clo_init)
2152 
2153 //--------------------------------------------------------------------//
2154 //--- end                                                          ---//
2155 //--------------------------------------------------------------------//
2156