1 //=-- lsan_common.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of LeakSanitizer.
9 // Implementation of common leak checking functionality.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "lsan_common.h"
14
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_flag_parser.h"
17 #include "sanitizer_common/sanitizer_flags.h"
18 #include "sanitizer_common/sanitizer_placement_new.h"
19 #include "sanitizer_common/sanitizer_procmaps.h"
20 #include "sanitizer_common/sanitizer_report_decorator.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_suppressions.h"
24 #include "sanitizer_common/sanitizer_thread_registry.h"
25 #include "sanitizer_common/sanitizer_tls_get_addr.h"
26
27 #if CAN_SANITIZE_LEAKS
28 namespace __lsan {
29
30 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
31 // also to protect the global list of root regions.
32 BlockingMutex global_mutex(LINKER_INITIALIZED);
33
34 Flags lsan_flags;
35
DisableCounterUnderflow()36 void DisableCounterUnderflow() {
37 if (common_flags()->detect_leaks) {
38 Report("Unmatched call to __lsan_enable().\n");
39 Die();
40 }
41 }
42
SetDefaults()43 void Flags::SetDefaults() {
44 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
45 #include "lsan_flags.inc"
46 #undef LSAN_FLAG
47 }
48
RegisterLsanFlags(FlagParser * parser,Flags * f)49 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
50 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
51 RegisterFlag(parser, #Name, Description, &f->Name);
52 #include "lsan_flags.inc"
53 #undef LSAN_FLAG
54 }
55
56 #define LOG_POINTERS(...) \
57 do { \
58 if (flags()->log_pointers) Report(__VA_ARGS__); \
59 } while (0)
60
61 #define LOG_THREADS(...) \
62 do { \
63 if (flags()->log_threads) Report(__VA_ARGS__); \
64 } while (0)
65
66 ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
67 static SuppressionContext *suppression_ctx = nullptr;
68 static const char kSuppressionLeak[] = "leak";
69 static const char *kSuppressionTypes[] = { kSuppressionLeak };
70 static const char kStdSuppressions[] =
71 #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
72 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
73 // definition.
74 "leak:*pthread_exit*\n"
75 #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
76 #if SANITIZER_MAC
77 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
78 "leak:*_os_trace*\n"
79 #endif
80 // TLS leak in some glibc versions, described in
81 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
82 "leak:*tls_get_addr*\n";
83
InitializeSuppressions()84 void InitializeSuppressions() {
85 CHECK_EQ(nullptr, suppression_ctx);
86 suppression_ctx = new (suppression_placeholder) // NOLINT
87 SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
88 suppression_ctx->ParseFromFile(flags()->suppressions);
89 if (&__lsan_default_suppressions)
90 suppression_ctx->Parse(__lsan_default_suppressions());
91 suppression_ctx->Parse(kStdSuppressions);
92 }
93
GetSuppressionContext()94 static SuppressionContext *GetSuppressionContext() {
95 CHECK(suppression_ctx);
96 return suppression_ctx;
97 }
98
99 static InternalMmapVector<RootRegion> *root_regions;
100
GetRootRegions()101 InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
102
InitializeRootRegions()103 void InitializeRootRegions() {
104 CHECK(!root_regions);
105 ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
106 root_regions = new (placeholder) InternalMmapVector<RootRegion>(); // NOLINT
107 }
108
MaybeCallLsanDefaultOptions()109 const char *MaybeCallLsanDefaultOptions() {
110 return (&__lsan_default_options) ? __lsan_default_options() : "";
111 }
112
InitCommonLsan()113 void InitCommonLsan() {
114 InitializeRootRegions();
115 if (common_flags()->detect_leaks) {
116 // Initialization which can fail or print warnings should only be done if
117 // LSan is actually enabled.
118 InitializeSuppressions();
119 InitializePlatformSpecificModules();
120 }
121 }
122
123 class Decorator: public __sanitizer::SanitizerCommonDecorator {
124 public:
Decorator()125 Decorator() : SanitizerCommonDecorator() { }
Error()126 const char *Error() { return Red(); }
Leak()127 const char *Leak() { return Blue(); }
128 };
129
CanBeAHeapPointer(uptr p)130 static inline bool CanBeAHeapPointer(uptr p) {
131 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
132 // bound on heap addresses.
133 const uptr kMinAddress = 4 * 4096;
134 if (p < kMinAddress) return false;
135 #if defined(__x86_64__)
136 // Accept only canonical form user-space addresses.
137 return ((p >> 47) == 0);
138 #elif defined(__mips64)
139 return ((p >> 40) == 0);
140 #elif defined(__aarch64__)
141 unsigned runtimeVMA =
142 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
143 return ((p >> runtimeVMA) == 0);
144 #else
145 return true;
146 #endif
147 }
148
149 // Scans the memory range, looking for byte patterns that point into allocator
150 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
151 // There are two usage modes for this function: finding reachable chunks
152 // (|tag| = kReachable) and finding indirectly leaked chunks
153 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
154 // so |frontier| = 0.
ScanRangeForPointers(uptr begin,uptr end,Frontier * frontier,const char * region_type,ChunkTag tag)155 void ScanRangeForPointers(uptr begin, uptr end,
156 Frontier *frontier,
157 const char *region_type, ChunkTag tag) {
158 CHECK(tag == kReachable || tag == kIndirectlyLeaked);
159 const uptr alignment = flags()->pointer_alignment();
160 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
161 uptr pp = begin;
162 if (pp % alignment)
163 pp = pp + alignment - pp % alignment;
164 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
165 void *p = *reinterpret_cast<void **>(pp);
166 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
167 uptr chunk = PointsIntoChunk(p);
168 if (!chunk) continue;
169 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
170 if (chunk == begin) continue;
171 LsanMetadata m(chunk);
172 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
173
174 // Do this check relatively late so we can log only the interesting cases.
175 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
176 LOG_POINTERS(
177 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
178 "%zu.\n",
179 pp, p, chunk, chunk + m.requested_size(), m.requested_size());
180 continue;
181 }
182
183 m.set_tag(tag);
184 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
185 chunk, chunk + m.requested_size(), m.requested_size());
186 if (frontier)
187 frontier->push_back(chunk);
188 }
189 }
190
191 // Scans a global range for pointers
ScanGlobalRange(uptr begin,uptr end,Frontier * frontier)192 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
193 uptr allocator_begin = 0, allocator_end = 0;
194 GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
195 if (begin <= allocator_begin && allocator_begin < end) {
196 CHECK_LE(allocator_begin, allocator_end);
197 CHECK_LE(allocator_end, end);
198 if (begin < allocator_begin)
199 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
200 kReachable);
201 if (allocator_end < end)
202 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
203 } else {
204 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
205 }
206 }
207
ForEachExtraStackRangeCb(uptr begin,uptr end,void * arg)208 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
209 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
210 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
211 }
212
213 // Scans thread data (stacks and TLS) for heap pointers.
ProcessThreads(SuspendedThreadsList const & suspended_threads,Frontier * frontier)214 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
215 Frontier *frontier) {
216 InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
217 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
218 uptr registers_end =
219 reinterpret_cast<uptr>(registers.data() + registers.size());
220 for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
221 tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
222 LOG_THREADS("Processing thread %d.\n", os_id);
223 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
224 DTLS *dtls;
225 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
226 &tls_begin, &tls_end,
227 &cache_begin, &cache_end, &dtls);
228 if (!thread_found) {
229 // If a thread can't be found in the thread registry, it's probably in the
230 // process of destruction. Log this event and move on.
231 LOG_THREADS("Thread %d not found in registry.\n", os_id);
232 continue;
233 }
234 uptr sp;
235 PtraceRegistersStatus have_registers =
236 suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
237 if (have_registers != REGISTERS_AVAILABLE) {
238 Report("Unable to get registers from thread %d.\n", os_id);
239 // If unable to get SP, consider the entire stack to be reachable unless
240 // GetRegistersAndSP failed with ESRCH.
241 if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
242 sp = stack_begin;
243 }
244
245 if (flags()->use_registers && have_registers)
246 ScanRangeForPointers(registers_begin, registers_end, frontier,
247 "REGISTERS", kReachable);
248
249 if (flags()->use_stacks) {
250 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
251 if (sp < stack_begin || sp >= stack_end) {
252 // SP is outside the recorded stack range (e.g. the thread is running a
253 // signal handler on alternate stack, or swapcontext was used).
254 // Again, consider the entire stack range to be reachable.
255 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
256 uptr page_size = GetPageSizeCached();
257 int skipped = 0;
258 while (stack_begin < stack_end &&
259 !IsAccessibleMemoryRange(stack_begin, 1)) {
260 skipped++;
261 stack_begin += page_size;
262 }
263 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
264 skipped, stack_begin, stack_end);
265 } else {
266 // Shrink the stack range to ignore out-of-scope values.
267 stack_begin = sp;
268 }
269 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
270 kReachable);
271 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
272 }
273
274 if (flags()->use_tls) {
275 if (tls_begin) {
276 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
277 // If the tls and cache ranges don't overlap, scan full tls range,
278 // otherwise, only scan the non-overlapping portions
279 if (cache_begin == cache_end || tls_end < cache_begin ||
280 tls_begin > cache_end) {
281 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
282 } else {
283 if (tls_begin < cache_begin)
284 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
285 kReachable);
286 if (tls_end > cache_end)
287 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
288 kReachable);
289 }
290 }
291 if (dtls && !DTLSInDestruction(dtls)) {
292 for (uptr j = 0; j < dtls->dtv_size; ++j) {
293 uptr dtls_beg = dtls->dtv[j].beg;
294 uptr dtls_end = dtls_beg + dtls->dtv[j].size;
295 if (dtls_beg < dtls_end) {
296 LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
297 ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
298 kReachable);
299 }
300 }
301 } else {
302 // We are handling a thread with DTLS under destruction. Log about
303 // this and continue.
304 LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
305 }
306 }
307 }
308 }
309
ScanRootRegion(Frontier * frontier,const RootRegion & root_region,uptr region_begin,uptr region_end,bool is_readable)310 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
311 uptr region_begin, uptr region_end, bool is_readable) {
312 uptr intersection_begin = Max(root_region.begin, region_begin);
313 uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
314 if (intersection_begin >= intersection_end) return;
315 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
316 root_region.begin, root_region.begin + root_region.size,
317 region_begin, region_end,
318 is_readable ? "readable" : "unreadable");
319 if (is_readable)
320 ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
321 kReachable);
322 }
323
ProcessRootRegion(Frontier * frontier,const RootRegion & root_region)324 static void ProcessRootRegion(Frontier *frontier,
325 const RootRegion &root_region) {
326 MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
327 MemoryMappedSegment segment;
328 while (proc_maps.Next(&segment)) {
329 ScanRootRegion(frontier, root_region, segment.start, segment.end,
330 segment.IsReadable());
331 }
332 }
333
334 // Scans root regions for heap pointers.
ProcessRootRegions(Frontier * frontier)335 static void ProcessRootRegions(Frontier *frontier) {
336 if (!flags()->use_root_regions) return;
337 CHECK(root_regions);
338 for (uptr i = 0; i < root_regions->size(); i++) {
339 ProcessRootRegion(frontier, (*root_regions)[i]);
340 }
341 }
342
FloodFillTag(Frontier * frontier,ChunkTag tag)343 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
344 while (frontier->size()) {
345 uptr next_chunk = frontier->back();
346 frontier->pop_back();
347 LsanMetadata m(next_chunk);
348 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
349 "HEAP", tag);
350 }
351 }
352
353 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
354 // which are reachable from it as indirectly leaked.
MarkIndirectlyLeakedCb(uptr chunk,void * arg)355 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
356 chunk = GetUserBegin(chunk);
357 LsanMetadata m(chunk);
358 if (m.allocated() && m.tag() != kReachable) {
359 ScanRangeForPointers(chunk, chunk + m.requested_size(),
360 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
361 }
362 }
363
364 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
365 // frontier.
CollectIgnoredCb(uptr chunk,void * arg)366 static void CollectIgnoredCb(uptr chunk, void *arg) {
367 CHECK(arg);
368 chunk = GetUserBegin(chunk);
369 LsanMetadata m(chunk);
370 if (m.allocated() && m.tag() == kIgnored) {
371 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
372 chunk, chunk + m.requested_size(), m.requested_size());
373 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
374 }
375 }
376
GetCallerPC(u32 stack_id,StackDepotReverseMap * map)377 static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
378 CHECK(stack_id);
379 StackTrace stack = map->Get(stack_id);
380 // The top frame is our malloc/calloc/etc. The next frame is the caller.
381 if (stack.size >= 2)
382 return stack.trace[1];
383 return 0;
384 }
385
386 struct InvalidPCParam {
387 Frontier *frontier;
388 StackDepotReverseMap *stack_depot_reverse_map;
389 bool skip_linker_allocations;
390 };
391
392 // ForEachChunk callback. If the caller pc is invalid or is within the linker,
393 // mark as reachable. Called by ProcessPlatformSpecificAllocations.
MarkInvalidPCCb(uptr chunk,void * arg)394 static void MarkInvalidPCCb(uptr chunk, void *arg) {
395 CHECK(arg);
396 InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
397 chunk = GetUserBegin(chunk);
398 LsanMetadata m(chunk);
399 if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
400 u32 stack_id = m.stack_trace_id();
401 uptr caller_pc = 0;
402 if (stack_id > 0)
403 caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
404 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
405 // it as reachable, as we can't properly report its allocation stack anyway.
406 if (caller_pc == 0 || (param->skip_linker_allocations &&
407 GetLinker()->containsAddress(caller_pc))) {
408 m.set_tag(kReachable);
409 param->frontier->push_back(chunk);
410 }
411 }
412 }
413
414 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
415 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
416 // modules accounting etc.
417 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
418 // They are allocated with a __libc_memalign() call in allocate_and_init()
419 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
420 // blocks, but we can make sure they come from our own allocator by intercepting
421 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
422 // addresses are stored in a dynamically allocated array (the DTV) which is
423 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
424 // being reachable from the static TLS, and the dynamic TLS being reachable from
425 // the DTV. This is because the initial DTV is allocated before our interception
426 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
427 // can't special-case it either, since we don't know its size.
428 // Our solution is to include in the root set all allocations made from
429 // ld-linux.so (which is where allocate_and_init() is implemented). This is
430 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
431 // which we don't care about).
432 // On all other platforms, this simply checks to ensure that the caller pc is
433 // valid before reporting chunks as leaked.
ProcessPC(Frontier * frontier)434 void ProcessPC(Frontier *frontier) {
435 StackDepotReverseMap stack_depot_reverse_map;
436 InvalidPCParam arg;
437 arg.frontier = frontier;
438 arg.stack_depot_reverse_map = &stack_depot_reverse_map;
439 arg.skip_linker_allocations =
440 flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
441 ForEachChunk(MarkInvalidPCCb, &arg);
442 }
443
444 // Sets the appropriate tag on each chunk.
ClassifyAllChunks(SuspendedThreadsList const & suspended_threads)445 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
446 // Holds the flood fill frontier.
447 Frontier frontier;
448
449 ForEachChunk(CollectIgnoredCb, &frontier);
450 ProcessGlobalRegions(&frontier);
451 ProcessThreads(suspended_threads, &frontier);
452 ProcessRootRegions(&frontier);
453 FloodFillTag(&frontier, kReachable);
454
455 CHECK_EQ(0, frontier.size());
456 ProcessPC(&frontier);
457
458 // The check here is relatively expensive, so we do this in a separate flood
459 // fill. That way we can skip the check for chunks that are reachable
460 // otherwise.
461 LOG_POINTERS("Processing platform-specific allocations.\n");
462 ProcessPlatformSpecificAllocations(&frontier);
463 FloodFillTag(&frontier, kReachable);
464
465 // Iterate over leaked chunks and mark those that are reachable from other
466 // leaked chunks.
467 LOG_POINTERS("Scanning leaked chunks.\n");
468 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
469 }
470
471 // ForEachChunk callback. Resets the tags to pre-leak-check state.
ResetTagsCb(uptr chunk,void * arg)472 static void ResetTagsCb(uptr chunk, void *arg) {
473 (void)arg;
474 chunk = GetUserBegin(chunk);
475 LsanMetadata m(chunk);
476 if (m.allocated() && m.tag() != kIgnored)
477 m.set_tag(kDirectlyLeaked);
478 }
479
PrintStackTraceById(u32 stack_trace_id)480 static void PrintStackTraceById(u32 stack_trace_id) {
481 CHECK(stack_trace_id);
482 StackDepotGet(stack_trace_id).Print();
483 }
484
485 // ForEachChunk callback. Aggregates information about unreachable chunks into
486 // a LeakReport.
CollectLeaksCb(uptr chunk,void * arg)487 static void CollectLeaksCb(uptr chunk, void *arg) {
488 CHECK(arg);
489 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
490 chunk = GetUserBegin(chunk);
491 LsanMetadata m(chunk);
492 if (!m.allocated()) return;
493 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
494 u32 resolution = flags()->resolution;
495 u32 stack_trace_id = 0;
496 if (resolution > 0) {
497 StackTrace stack = StackDepotGet(m.stack_trace_id());
498 stack.size = Min(stack.size, resolution);
499 stack_trace_id = StackDepotPut(stack);
500 } else {
501 stack_trace_id = m.stack_trace_id();
502 }
503 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
504 m.tag());
505 }
506 }
507
PrintMatchedSuppressions()508 static void PrintMatchedSuppressions() {
509 InternalMmapVector<Suppression *> matched;
510 GetSuppressionContext()->GetMatched(&matched);
511 if (!matched.size())
512 return;
513 const char *line = "-----------------------------------------------------";
514 Printf("%s\n", line);
515 Printf("Suppressions used:\n");
516 Printf(" count bytes template\n");
517 for (uptr i = 0; i < matched.size(); i++)
518 Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
519 &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
520 Printf("%s\n\n", line);
521 }
522
523 struct CheckForLeaksParam {
524 bool success;
525 LeakReport leak_report;
526 };
527
ReportIfNotSuspended(ThreadContextBase * tctx,void * arg)528 static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
529 const InternalMmapVector<tid_t> &suspended_threads =
530 *(const InternalMmapVector<tid_t> *)arg;
531 if (tctx->status == ThreadStatusRunning) {
532 uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
533 tctx->os_id, CompareLess<int>());
534 if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
535 Report("Running thread %d was not suspended. False leaks are possible.\n",
536 tctx->os_id);
537 };
538 }
539
ReportUnsuspendedThreads(const SuspendedThreadsList & suspended_threads)540 static void ReportUnsuspendedThreads(
541 const SuspendedThreadsList &suspended_threads) {
542 InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
543 for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
544 threads[i] = suspended_threads.GetThreadID(i);
545
546 Sort(threads.data(), threads.size());
547
548 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
549 &ReportIfNotSuspended, &threads);
550 }
551
CheckForLeaksCallback(const SuspendedThreadsList & suspended_threads,void * arg)552 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
553 void *arg) {
554 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
555 CHECK(param);
556 CHECK(!param->success);
557 ReportUnsuspendedThreads(suspended_threads);
558 ClassifyAllChunks(suspended_threads);
559 ForEachChunk(CollectLeaksCb, ¶m->leak_report);
560 // Clean up for subsequent leak checks. This assumes we did not overwrite any
561 // kIgnored tags.
562 ForEachChunk(ResetTagsCb, nullptr);
563 param->success = true;
564 }
565
CheckForLeaks()566 static bool CheckForLeaks() {
567 if (&__lsan_is_turned_off && __lsan_is_turned_off())
568 return false;
569 EnsureMainThreadIDIsCorrect();
570 CheckForLeaksParam param;
571 param.success = false;
572 LockThreadRegistry();
573 LockAllocator();
574 DoStopTheWorld(CheckForLeaksCallback, ¶m);
575 UnlockAllocator();
576 UnlockThreadRegistry();
577
578 if (!param.success) {
579 Report("LeakSanitizer has encountered a fatal error.\n");
580 Report(
581 "HINT: For debugging, try setting environment variable "
582 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
583 Report(
584 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
585 Die();
586 }
587 param.leak_report.ApplySuppressions();
588 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
589 if (unsuppressed_count > 0) {
590 Decorator d;
591 Printf("\n"
592 "================================================================="
593 "\n");
594 Printf("%s", d.Error());
595 Report("ERROR: LeakSanitizer: detected memory leaks\n");
596 Printf("%s", d.Default());
597 param.leak_report.ReportTopLeaks(flags()->max_leaks);
598 }
599 if (common_flags()->print_suppressions)
600 PrintMatchedSuppressions();
601 if (unsuppressed_count > 0) {
602 param.leak_report.PrintSummary();
603 return true;
604 }
605 return false;
606 }
607
608 static bool has_reported_leaks = false;
HasReportedLeaks()609 bool HasReportedLeaks() { return has_reported_leaks; }
610
DoLeakCheck()611 void DoLeakCheck() {
612 BlockingMutexLock l(&global_mutex);
613 static bool already_done;
614 if (already_done) return;
615 already_done = true;
616 has_reported_leaks = CheckForLeaks();
617 if (has_reported_leaks) HandleLeaks();
618 }
619
DoRecoverableLeakCheck()620 static int DoRecoverableLeakCheck() {
621 BlockingMutexLock l(&global_mutex);
622 bool have_leaks = CheckForLeaks();
623 return have_leaks ? 1 : 0;
624 }
625
DoRecoverableLeakCheckVoid()626 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
627
GetSuppressionForAddr(uptr addr)628 static Suppression *GetSuppressionForAddr(uptr addr) {
629 Suppression *s = nullptr;
630
631 // Suppress by module name.
632 SuppressionContext *suppressions = GetSuppressionContext();
633 if (const char *module_name =
634 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
635 if (suppressions->Match(module_name, kSuppressionLeak, &s))
636 return s;
637
638 // Suppress by file or function name.
639 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
640 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
641 if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
642 suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
643 break;
644 }
645 }
646 frames->ClearAll();
647 return s;
648 }
649
GetSuppressionForStack(u32 stack_trace_id)650 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
651 StackTrace stack = StackDepotGet(stack_trace_id);
652 for (uptr i = 0; i < stack.size; i++) {
653 Suppression *s = GetSuppressionForAddr(
654 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
655 if (s) return s;
656 }
657 return nullptr;
658 }
659
660 ///// LeakReport implementation. /////
661
662 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
663 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
664 // in real-world applications.
665 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
666 // use a hash table.
667 const uptr kMaxLeaksConsidered = 5000;
668
AddLeakedChunk(uptr chunk,u32 stack_trace_id,uptr leaked_size,ChunkTag tag)669 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
670 uptr leaked_size, ChunkTag tag) {
671 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
672 bool is_directly_leaked = (tag == kDirectlyLeaked);
673 uptr i;
674 for (i = 0; i < leaks_.size(); i++) {
675 if (leaks_[i].stack_trace_id == stack_trace_id &&
676 leaks_[i].is_directly_leaked == is_directly_leaked) {
677 leaks_[i].hit_count++;
678 leaks_[i].total_size += leaked_size;
679 break;
680 }
681 }
682 if (i == leaks_.size()) {
683 if (leaks_.size() == kMaxLeaksConsidered) return;
684 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
685 is_directly_leaked, /* is_suppressed */ false };
686 leaks_.push_back(leak);
687 }
688 if (flags()->report_objects) {
689 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
690 leaked_objects_.push_back(obj);
691 }
692 }
693
LeakComparator(const Leak & leak1,const Leak & leak2)694 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
695 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
696 return leak1.total_size > leak2.total_size;
697 else
698 return leak1.is_directly_leaked;
699 }
700
ReportTopLeaks(uptr num_leaks_to_report)701 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
702 CHECK(leaks_.size() <= kMaxLeaksConsidered);
703 Printf("\n");
704 if (leaks_.size() == kMaxLeaksConsidered)
705 Printf("Too many leaks! Only the first %zu leaks encountered will be "
706 "reported.\n",
707 kMaxLeaksConsidered);
708
709 uptr unsuppressed_count = UnsuppressedLeakCount();
710 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
711 Printf("The %zu top leak(s):\n", num_leaks_to_report);
712 Sort(leaks_.data(), leaks_.size(), &LeakComparator);
713 uptr leaks_reported = 0;
714 for (uptr i = 0; i < leaks_.size(); i++) {
715 if (leaks_[i].is_suppressed) continue;
716 PrintReportForLeak(i);
717 leaks_reported++;
718 if (leaks_reported == num_leaks_to_report) break;
719 }
720 if (leaks_reported < unsuppressed_count) {
721 uptr remaining = unsuppressed_count - leaks_reported;
722 Printf("Omitting %zu more leak(s).\n", remaining);
723 }
724 }
725
PrintReportForLeak(uptr index)726 void LeakReport::PrintReportForLeak(uptr index) {
727 Decorator d;
728 Printf("%s", d.Leak());
729 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
730 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
731 leaks_[index].total_size, leaks_[index].hit_count);
732 Printf("%s", d.Default());
733
734 PrintStackTraceById(leaks_[index].stack_trace_id);
735
736 if (flags()->report_objects) {
737 Printf("Objects leaked above:\n");
738 PrintLeakedObjectsForLeak(index);
739 Printf("\n");
740 }
741 }
742
PrintLeakedObjectsForLeak(uptr index)743 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
744 u32 leak_id = leaks_[index].id;
745 for (uptr j = 0; j < leaked_objects_.size(); j++) {
746 if (leaked_objects_[j].leak_id == leak_id)
747 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
748 leaked_objects_[j].size);
749 }
750 }
751
PrintSummary()752 void LeakReport::PrintSummary() {
753 CHECK(leaks_.size() <= kMaxLeaksConsidered);
754 uptr bytes = 0, allocations = 0;
755 for (uptr i = 0; i < leaks_.size(); i++) {
756 if (leaks_[i].is_suppressed) continue;
757 bytes += leaks_[i].total_size;
758 allocations += leaks_[i].hit_count;
759 }
760 InternalScopedString summary(kMaxSummaryLength);
761 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
762 allocations);
763 ReportErrorSummary(summary.data());
764 }
765
ApplySuppressions()766 void LeakReport::ApplySuppressions() {
767 for (uptr i = 0; i < leaks_.size(); i++) {
768 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
769 if (s) {
770 s->weight += leaks_[i].total_size;
771 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
772 leaks_[i].hit_count);
773 leaks_[i].is_suppressed = true;
774 }
775 }
776 }
777
UnsuppressedLeakCount()778 uptr LeakReport::UnsuppressedLeakCount() {
779 uptr result = 0;
780 for (uptr i = 0; i < leaks_.size(); i++)
781 if (!leaks_[i].is_suppressed) result++;
782 return result;
783 }
784
785 } // namespace __lsan
786 #else // CAN_SANITIZE_LEAKS
787 namespace __lsan {
InitCommonLsan()788 void InitCommonLsan() { }
DoLeakCheck()789 void DoLeakCheck() { }
DoRecoverableLeakCheckVoid()790 void DoRecoverableLeakCheckVoid() { }
DisableInThisThread()791 void DisableInThisThread() { }
EnableInThisThread()792 void EnableInThisThread() { }
793 }
794 #endif // CAN_SANITIZE_LEAKS
795
796 using namespace __lsan; // NOLINT
797
798 extern "C" {
799 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_ignore_object(const void * p)800 void __lsan_ignore_object(const void *p) {
801 #if CAN_SANITIZE_LEAKS
802 if (!common_flags()->detect_leaks)
803 return;
804 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
805 // locked.
806 BlockingMutexLock l(&global_mutex);
807 IgnoreObjectResult res = IgnoreObjectLocked(p);
808 if (res == kIgnoreObjectInvalid)
809 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
810 if (res == kIgnoreObjectAlreadyIgnored)
811 VReport(1, "__lsan_ignore_object(): "
812 "heap object at %p is already being ignored\n", p);
813 if (res == kIgnoreObjectSuccess)
814 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
815 #endif // CAN_SANITIZE_LEAKS
816 }
817
818 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_register_root_region(const void * begin,uptr size)819 void __lsan_register_root_region(const void *begin, uptr size) {
820 #if CAN_SANITIZE_LEAKS
821 BlockingMutexLock l(&global_mutex);
822 CHECK(root_regions);
823 RootRegion region = {reinterpret_cast<uptr>(begin), size};
824 root_regions->push_back(region);
825 VReport(1, "Registered root region at %p of size %llu\n", begin, size);
826 #endif // CAN_SANITIZE_LEAKS
827 }
828
829 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_unregister_root_region(const void * begin,uptr size)830 void __lsan_unregister_root_region(const void *begin, uptr size) {
831 #if CAN_SANITIZE_LEAKS
832 BlockingMutexLock l(&global_mutex);
833 CHECK(root_regions);
834 bool removed = false;
835 for (uptr i = 0; i < root_regions->size(); i++) {
836 RootRegion region = (*root_regions)[i];
837 if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
838 removed = true;
839 uptr last_index = root_regions->size() - 1;
840 (*root_regions)[i] = (*root_regions)[last_index];
841 root_regions->pop_back();
842 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
843 break;
844 }
845 }
846 if (!removed) {
847 Report(
848 "__lsan_unregister_root_region(): region at %p of size %llu has not "
849 "been registered.\n",
850 begin, size);
851 Die();
852 }
853 #endif // CAN_SANITIZE_LEAKS
854 }
855
856 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_disable()857 void __lsan_disable() {
858 #if CAN_SANITIZE_LEAKS
859 __lsan::DisableInThisThread();
860 #endif
861 }
862
863 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_enable()864 void __lsan_enable() {
865 #if CAN_SANITIZE_LEAKS
866 __lsan::EnableInThisThread();
867 #endif
868 }
869
870 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_leak_check()871 void __lsan_do_leak_check() {
872 #if CAN_SANITIZE_LEAKS
873 if (common_flags()->detect_leaks)
874 __lsan::DoLeakCheck();
875 #endif // CAN_SANITIZE_LEAKS
876 }
877
878 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_recoverable_leak_check()879 int __lsan_do_recoverable_leak_check() {
880 #if CAN_SANITIZE_LEAKS
881 if (common_flags()->detect_leaks)
882 return __lsan::DoRecoverableLeakCheck();
883 #endif // CAN_SANITIZE_LEAKS
884 return 0;
885 }
886
887 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
888 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__lsan_default_options()889 const char * __lsan_default_options() {
890 return "";
891 }
892
893 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__lsan_is_turned_off()894 int __lsan_is_turned_off() {
895 return 0;
896 }
897
898 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__lsan_default_suppressions()899 const char *__lsan_default_suppressions() {
900 return "";
901 }
902 #endif
903 } // extern "C"
904