1 //===-- memprof_rtl.cpp --------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemProfiler, a memory profiler.
10 //
11 // Main file of the MemProf run-time library.
12 //===----------------------------------------------------------------------===//
13 
14 #include "memprof_allocator.h"
15 #include "memprof_interceptors.h"
16 #include "memprof_interface_internal.h"
17 #include "memprof_internal.h"
18 #include "memprof_mapping.h"
19 #include "memprof_stack.h"
20 #include "memprof_stats.h"
21 #include "memprof_thread.h"
22 #include "sanitizer_common/sanitizer_atomic.h"
23 #include "sanitizer_common/sanitizer_flags.h"
24 #include "sanitizer_common/sanitizer_libc.h"
25 #include "sanitizer_common/sanitizer_symbolizer.h"
26 
27 #include <time.h>
28 
29 uptr __memprof_shadow_memory_dynamic_address; // Global interface symbol.
30 
31 // Allow the user to specify a profile output file via the binary.
32 SANITIZER_WEAK_ATTRIBUTE char __memprof_profile_filename[1];
33 
34 namespace __memprof {
35 
MemprofDie()36 static void MemprofDie() {
37   static atomic_uint32_t num_calls;
38   if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
39     // Don't die twice - run a busy loop.
40     while (1) {
41     }
42   }
43   if (common_flags()->print_module_map >= 1)
44     DumpProcessMap();
45   if (flags()->unmap_shadow_on_exit) {
46     if (kHighShadowEnd)
47       UnmapOrDie((void *)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
48   }
49 }
50 
CheckUnwind()51 static void CheckUnwind() {
52   GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check);
53   stack.Print();
54 }
55 
56 // -------------------------- Globals --------------------- {{{1
57 int memprof_inited;
58 int memprof_init_done;
59 bool memprof_init_is_running;
60 int memprof_timestamp_inited;
61 long memprof_init_timestamp_s;
62 
63 uptr kHighMemEnd;
64 
65 // -------------------------- Run-time entry ------------------- {{{1
66 // exported functions
67 
68 #define MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() __memprof::RecordAccess(addr);
69 
70 #define MEMPROF_MEMORY_ACCESS_CALLBACK(type)                                   \
71   extern "C" NOINLINE INTERFACE_ATTRIBUTE void __memprof_##type(uptr addr) {   \
72     MEMPROF_MEMORY_ACCESS_CALLBACK_BODY()                                      \
73   }
74 
75 MEMPROF_MEMORY_ACCESS_CALLBACK(load)
MEMPROF_MEMORY_ACCESS_CALLBACK(store)76 MEMPROF_MEMORY_ACCESS_CALLBACK(store)
77 
78 // Force the linker to keep the symbols for various MemProf interface
79 // functions. We want to keep those in the executable in order to let the
80 // instrumented dynamic libraries access the symbol even if it is not used by
81 // the executable itself. This should help if the build system is removing dead
82 // code at link time.
83 static NOINLINE void force_interface_symbols() {
84   volatile int fake_condition = 0; // prevent dead condition elimination.
85   // clang-format off
86   switch (fake_condition) {
87     case 1: __memprof_record_access(nullptr); break;
88     case 2: __memprof_record_access_range(nullptr, 0); break;
89   }
90   // clang-format on
91 }
92 
memprof_atexit()93 static void memprof_atexit() {
94   Printf("MemProfiler exit stats:\n");
95   __memprof_print_accumulated_stats();
96 }
97 
InitializeHighMemEnd()98 static void InitializeHighMemEnd() {
99   kHighMemEnd = GetMaxUserVirtualAddress();
100   // Increase kHighMemEnd to make sure it's properly
101   // aligned together with kHighMemBeg:
102   kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
103 }
104 
PrintAddressSpaceLayout()105 void PrintAddressSpaceLayout() {
106   if (kHighMemBeg) {
107     Printf("|| `[%p, %p]` || HighMem    ||\n", (void *)kHighMemBeg,
108            (void *)kHighMemEnd);
109     Printf("|| `[%p, %p]` || HighShadow ||\n", (void *)kHighShadowBeg,
110            (void *)kHighShadowEnd);
111   }
112   Printf("|| `[%p, %p]` || ShadowGap  ||\n", (void *)kShadowGapBeg,
113          (void *)kShadowGapEnd);
114   if (kLowShadowBeg) {
115     Printf("|| `[%p, %p]` || LowShadow  ||\n", (void *)kLowShadowBeg,
116            (void *)kLowShadowEnd);
117     Printf("|| `[%p, %p]` || LowMem     ||\n", (void *)kLowMemBeg,
118            (void *)kLowMemEnd);
119   }
120   Printf("MemToShadow(shadow): %p %p", (void *)MEM_TO_SHADOW(kLowShadowBeg),
121          (void *)MEM_TO_SHADOW(kLowShadowEnd));
122   if (kHighMemBeg) {
123     Printf(" %p %p", (void *)MEM_TO_SHADOW(kHighShadowBeg),
124            (void *)MEM_TO_SHADOW(kHighShadowEnd));
125   }
126   Printf("\n");
127   Printf("malloc_context_size=%zu\n",
128          (uptr)common_flags()->malloc_context_size);
129 
130   Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
131   Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
132   Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
133   CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
134 }
135 
__anon31684ac90102null136 static bool UNUSED __local_memprof_dyninit = [] {
137   MaybeStartBackgroudThread();
138   SetSoftRssLimitExceededCallback(MemprofSoftRssLimitExceededCallback);
139 
140   return false;
141 }();
142 
MemprofInitInternal()143 static void MemprofInitInternal() {
144   if (LIKELY(memprof_inited))
145     return;
146   SanitizerToolName = "MemProfiler";
147   CHECK(!memprof_init_is_running && "MemProf init calls itself!");
148   memprof_init_is_running = true;
149 
150   CacheBinaryName();
151 
152   // Initialize flags. This must be done early, because most of the
153   // initialization steps look at flags().
154   InitializeFlags();
155 
156   AvoidCVE_2016_2143();
157 
158   SetMallocContextSize(common_flags()->malloc_context_size);
159 
160   InitializeHighMemEnd();
161 
162   // Make sure we are not statically linked.
163   MemprofDoesNotSupportStaticLinkage();
164 
165   // Install tool-specific callbacks in sanitizer_common.
166   AddDieCallback(MemprofDie);
167   SetCheckUnwindCallback(CheckUnwind);
168 
169   // Use profile name specified via the binary itself if it exists, and hasn't
170   // been overrriden by a flag at runtime.
171   if (__memprof_profile_filename[0] != 0 && !common_flags()->log_path)
172     __sanitizer_set_report_path(__memprof_profile_filename);
173   else
174     __sanitizer_set_report_path(common_flags()->log_path);
175 
176   __sanitizer::InitializePlatformEarly();
177 
178   // Re-exec ourselves if we need to set additional env or command line args.
179   MaybeReexec();
180 
181   // Setup internal allocator callback.
182   SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
183 
184   InitializeMemprofInterceptors();
185   CheckASLR();
186 
187   ReplaceSystemMalloc();
188 
189   DisableCoreDumperIfNecessary();
190 
191   InitializeShadowMemory();
192 
193   TSDInit(PlatformTSDDtor);
194 
195   InitializeAllocator();
196 
197   // On Linux MemprofThread::ThreadStart() calls malloc() that's why
198   // memprof_inited should be set to 1 prior to initializing the threads.
199   memprof_inited = 1;
200   memprof_init_is_running = false;
201 
202   if (flags()->atexit)
203     Atexit(memprof_atexit);
204 
205   InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
206 
207   // interceptors
208   InitTlsSize();
209 
210   // Create main thread.
211   MemprofThread *main_thread = CreateMainThread();
212   CHECK_EQ(0, main_thread->tid());
213   force_interface_symbols(); // no-op.
214   SanitizerInitializeUnwinder();
215 
216   Symbolizer::LateInitialize();
217 
218   VReport(1, "MemProfiler Init done\n");
219 
220   memprof_init_done = 1;
221 }
222 
MemprofInitTime()223 void MemprofInitTime() {
224   if (LIKELY(memprof_timestamp_inited))
225     return;
226   timespec ts;
227   clock_gettime(CLOCK_REALTIME, &ts);
228   memprof_init_timestamp_s = ts.tv_sec;
229   memprof_timestamp_inited = 1;
230 }
231 
232 // Initialize as requested from some part of MemProf runtime library
233 // (interceptors, allocator, etc).
MemprofInitFromRtl()234 void MemprofInitFromRtl() { MemprofInitInternal(); }
235 
236 #if MEMPROF_DYNAMIC
237 // Initialize runtime in case it's LD_PRELOAD-ed into uninstrumented executable
238 // (and thus normal initializers from .preinit_array or modules haven't run).
239 
240 class MemprofInitializer {
241 public:
MemprofInitializer()242   MemprofInitializer() { MemprofInitFromRtl(); }
243 };
244 
245 static MemprofInitializer memprof_initializer;
246 #endif // MEMPROF_DYNAMIC
247 
248 } // namespace __memprof
249 
250 // ---------------------- Interface ---------------- {{{1
251 using namespace __memprof;
252 
253 // Initialize as requested from instrumented application code.
__memprof_init()254 void __memprof_init() {
255   MemprofInitTime();
256   MemprofInitInternal();
257 }
258 
__memprof_preinit()259 void __memprof_preinit() { MemprofInitInternal(); }
260 
__memprof_version_mismatch_check_v1()261 void __memprof_version_mismatch_check_v1() {}
262 
__memprof_record_access(void const volatile * addr)263 void __memprof_record_access(void const volatile *addr) {
264   __memprof::RecordAccess((uptr)addr);
265 }
266 
267 // We only record the access on the first location in the range,
268 // since we will later accumulate the access counts across the
269 // full allocation, and we don't want to inflate the hotness from
270 // a memory intrinsic on a large range of memory.
271 // TODO: Should we do something else so we can better track utilization?
__memprof_record_access_range(void const volatile * addr,UNUSED uptr size)272 void __memprof_record_access_range(void const volatile *addr,
273                                    UNUSED uptr size) {
274   __memprof::RecordAccess((uptr)addr);
275 }
276 
277 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u16
__sanitizer_unaligned_load16(const uu16 * p)278 __sanitizer_unaligned_load16(const uu16 *p) {
279   __memprof_record_access(p);
280   return *p;
281 }
282 
283 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u32
__sanitizer_unaligned_load32(const uu32 * p)284 __sanitizer_unaligned_load32(const uu32 *p) {
285   __memprof_record_access(p);
286   return *p;
287 }
288 
289 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64
__sanitizer_unaligned_load64(const uu64 * p)290 __sanitizer_unaligned_load64(const uu64 *p) {
291   __memprof_record_access(p);
292   return *p;
293 }
294 
295 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_unaligned_store16(uu16 * p,u16 x)296 __sanitizer_unaligned_store16(uu16 *p, u16 x) {
297   __memprof_record_access(p);
298   *p = x;
299 }
300 
301 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_unaligned_store32(uu32 * p,u32 x)302 __sanitizer_unaligned_store32(uu32 *p, u32 x) {
303   __memprof_record_access(p);
304   *p = x;
305 }
306 
307 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_unaligned_store64(uu64 * p,u64 x)308 __sanitizer_unaligned_store64(uu64 *p, u64 x) {
309   __memprof_record_access(p);
310   *p = x;
311 }
312