1 //=-- lsan_common.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "lsan_common.h"
15 
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
27 
28 #if CAN_SANITIZE_LEAKS
29 namespace __lsan {
30 
31 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32 // also to protect the global list of root regions.
33 BlockingMutex global_mutex(LINKER_INITIALIZED);
34 
35 Flags lsan_flags;
36 
37 
38 void DisableCounterUnderflow() {
39   if (common_flags()->detect_leaks) {
40     Report("Unmatched call to __lsan_enable().\n");
41     Die();
42   }
43 }
44 
45 void Flags::SetDefaults() {
46 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
47 #include "lsan_flags.inc"
48 #undef LSAN_FLAG
49 }
50 
51 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
52 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
53   RegisterFlag(parser, #Name, Description, &f->Name);
54 #include "lsan_flags.inc"
55 #undef LSAN_FLAG
56 }
57 
58 #define LOG_POINTERS(...)                           \
59   do {                                              \
60     if (flags()->log_pointers) Report(__VA_ARGS__); \
61   } while (0)
62 
63 #define LOG_THREADS(...)                           \
64   do {                                             \
65     if (flags()->log_threads) Report(__VA_ARGS__); \
66   } while (0)
67 
68 class LeakSuppressionContext {
69   bool parsed = false;
70   SuppressionContext context;
71   bool suppressed_stacks_sorted = true;
72   InternalMmapVector<u32> suppressed_stacks;
73 
74   Suppression *GetSuppressionForAddr(uptr addr);
75   void LazyInit();
76 
77  public:
78   LeakSuppressionContext(const char *supprression_types[],
79                          int suppression_types_num)
80       : context(supprression_types, suppression_types_num) {}
81 
82   Suppression *GetSuppressionForStack(u32 stack_trace_id);
83 
84   const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
85     if (!suppressed_stacks_sorted) {
86       suppressed_stacks_sorted = true;
87       SortAndDedup(suppressed_stacks);
88     }
89     return suppressed_stacks;
90   }
91   void PrintMatchedSuppressions();
92 };
93 
94 ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
95 static LeakSuppressionContext *suppression_ctx = nullptr;
96 static const char kSuppressionLeak[] = "leak";
97 static const char *kSuppressionTypes[] = { kSuppressionLeak };
98 static const char kStdSuppressions[] =
99 #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
100     // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
101     // definition.
102     "leak:*pthread_exit*\n"
103 #endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
104 #if SANITIZER_MAC
105     // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
106     "leak:*_os_trace*\n"
107 #endif
108     // TLS leak in some glibc versions, described in
109     // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
110     "leak:*tls_get_addr*\n";
111 
112 void InitializeSuppressions() {
113   CHECK_EQ(nullptr, suppression_ctx);
114   suppression_ctx = new (suppression_placeholder)
115       LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
116 }
117 
118 void LeakSuppressionContext::LazyInit() {
119   if (!parsed) {
120     parsed = true;
121     context.ParseFromFile(flags()->suppressions);
122     if (&__lsan_default_suppressions)
123       context.Parse(__lsan_default_suppressions());
124     context.Parse(kStdSuppressions);
125   }
126 }
127 
128 static LeakSuppressionContext *GetSuppressionContext() {
129   CHECK(suppression_ctx);
130   return suppression_ctx;
131 }
132 
133 static InternalMmapVector<RootRegion> *root_regions;
134 
135 InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
136 
137 void InitializeRootRegions() {
138   CHECK(!root_regions);
139   ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
140   root_regions = new (placeholder) InternalMmapVector<RootRegion>();
141 }
142 
143 void InitCommonLsan() {
144   InitializeRootRegions();
145   if (common_flags()->detect_leaks) {
146     // Initialization which can fail or print warnings should only be done if
147     // LSan is actually enabled.
148     InitializeSuppressions();
149     InitializePlatformSpecificModules();
150   }
151 }
152 
153 class Decorator: public __sanitizer::SanitizerCommonDecorator {
154  public:
155   Decorator() : SanitizerCommonDecorator() { }
156   const char *Error() { return Red(); }
157   const char *Leak() { return Blue(); }
158 };
159 
160 static inline bool CanBeAHeapPointer(uptr p) {
161   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
162   // bound on heap addresses.
163   const uptr kMinAddress = 4 * 4096;
164   if (p < kMinAddress) return false;
165 #if defined(__x86_64__)
166   // Accept only canonical form user-space addresses.
167   return ((p >> 47) == 0);
168 #elif defined(__mips64)
169   return ((p >> 40) == 0);
170 #elif defined(__aarch64__)
171   unsigned runtimeVMA =
172     (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
173   return ((p >> runtimeVMA) == 0);
174 #else
175   return true;
176 #endif
177 }
178 
179 // Scans the memory range, looking for byte patterns that point into allocator
180 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
181 // There are two usage modes for this function: finding reachable chunks
182 // (|tag| = kReachable) and finding indirectly leaked chunks
183 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
184 // so |frontier| = 0.
185 void ScanRangeForPointers(uptr begin, uptr end,
186                           Frontier *frontier,
187                           const char *region_type, ChunkTag tag) {
188   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
189   const uptr alignment = flags()->pointer_alignment();
190   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
191   uptr pp = begin;
192   if (pp % alignment)
193     pp = pp + alignment - pp % alignment;
194   for (; pp + sizeof(void *) <= end; pp += alignment) {
195     void *p = *reinterpret_cast<void **>(pp);
196     if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
197     uptr chunk = PointsIntoChunk(p);
198     if (!chunk) continue;
199     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
200     if (chunk == begin) continue;
201     LsanMetadata m(chunk);
202     if (m.tag() == kReachable || m.tag() == kIgnored) continue;
203 
204     // Do this check relatively late so we can log only the interesting cases.
205     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
206       LOG_POINTERS(
207           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
208           "%zu.\n",
209           pp, p, chunk, chunk + m.requested_size(), m.requested_size());
210       continue;
211     }
212 
213     m.set_tag(tag);
214     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
215                  chunk, chunk + m.requested_size(), m.requested_size());
216     if (frontier)
217       frontier->push_back(chunk);
218   }
219 }
220 
221 // Scans a global range for pointers
222 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
223   uptr allocator_begin = 0, allocator_end = 0;
224   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
225   if (begin <= allocator_begin && allocator_begin < end) {
226     CHECK_LE(allocator_begin, allocator_end);
227     CHECK_LE(allocator_end, end);
228     if (begin < allocator_begin)
229       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
230                            kReachable);
231     if (allocator_end < end)
232       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
233   } else {
234     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
235   }
236 }
237 
238 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
239   Frontier *frontier = reinterpret_cast<Frontier *>(arg);
240   ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
241 }
242 
243 #if SANITIZER_FUCHSIA
244 
245 // Fuchsia handles all threads together with its own callback.
246 static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
247 
248 #else
249 
250 #if SANITIZER_ANDROID
251 // FIXME: Move this out into *libcdep.cpp
252 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
253     pid_t, void (*cb)(void *, void *, uptr, void *), void *);
254 #endif
255 
256 static void ProcessThreadRegistry(Frontier *frontier) {
257   InternalMmapVector<uptr> ptrs;
258   GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
259       GetAdditionalThreadContextPtrs, &ptrs);
260 
261   for (uptr i = 0; i < ptrs.size(); ++i) {
262     void *ptr = reinterpret_cast<void *>(ptrs[i]);
263     uptr chunk = PointsIntoChunk(ptr);
264     if (!chunk)
265       continue;
266     LsanMetadata m(chunk);
267     if (!m.allocated())
268       continue;
269 
270     // Mark as reachable and add to frontier.
271     LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
272     m.set_tag(kReachable);
273     frontier->push_back(chunk);
274   }
275 }
276 
277 // Scans thread data (stacks and TLS) for heap pointers.
278 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
279                            Frontier *frontier) {
280   InternalMmapVector<uptr> registers;
281   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
282     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
283     LOG_THREADS("Processing thread %d.\n", os_id);
284     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
285     DTLS *dtls;
286     bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
287                                               &tls_begin, &tls_end,
288                                               &cache_begin, &cache_end, &dtls);
289     if (!thread_found) {
290       // If a thread can't be found in the thread registry, it's probably in the
291       // process of destruction. Log this event and move on.
292       LOG_THREADS("Thread %d not found in registry.\n", os_id);
293       continue;
294     }
295     uptr sp;
296     PtraceRegistersStatus have_registers =
297         suspended_threads.GetRegistersAndSP(i, &registers, &sp);
298     if (have_registers != REGISTERS_AVAILABLE) {
299       Report("Unable to get registers from thread %d.\n", os_id);
300       // If unable to get SP, consider the entire stack to be reachable unless
301       // GetRegistersAndSP failed with ESRCH.
302       if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
303       sp = stack_begin;
304     }
305 
306     if (flags()->use_registers && have_registers) {
307       uptr registers_begin = reinterpret_cast<uptr>(registers.data());
308       uptr registers_end =
309           reinterpret_cast<uptr>(registers.data() + registers.size());
310       ScanRangeForPointers(registers_begin, registers_end, frontier,
311                            "REGISTERS", kReachable);
312     }
313 
314     if (flags()->use_stacks) {
315       LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
316       if (sp < stack_begin || sp >= stack_end) {
317         // SP is outside the recorded stack range (e.g. the thread is running a
318         // signal handler on alternate stack, or swapcontext was used).
319         // Again, consider the entire stack range to be reachable.
320         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
321         uptr page_size = GetPageSizeCached();
322         int skipped = 0;
323         while (stack_begin < stack_end &&
324                !IsAccessibleMemoryRange(stack_begin, 1)) {
325           skipped++;
326           stack_begin += page_size;
327         }
328         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
329                     skipped, stack_begin, stack_end);
330       } else {
331         // Shrink the stack range to ignore out-of-scope values.
332         stack_begin = sp;
333       }
334       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
335                            kReachable);
336       ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
337     }
338 
339     if (flags()->use_tls) {
340       if (tls_begin) {
341         LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
342         // If the tls and cache ranges don't overlap, scan full tls range,
343         // otherwise, only scan the non-overlapping portions
344         if (cache_begin == cache_end || tls_end < cache_begin ||
345             tls_begin > cache_end) {
346           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
347         } else {
348           if (tls_begin < cache_begin)
349             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
350                                  kReachable);
351           if (tls_end > cache_end)
352             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
353                                  kReachable);
354         }
355       }
356 #if SANITIZER_ANDROID
357       auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
358                      void *arg) -> void {
359         ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
360                              reinterpret_cast<uptr>(dtls_end),
361                              reinterpret_cast<Frontier *>(arg), "DTLS",
362                              kReachable);
363       };
364 
365       // FIXME: There might be a race-condition here (and in Bionic) if the
366       // thread is suspended in the middle of updating its DTLS. IOWs, we
367       // could scan already freed memory. (probably fine for now)
368       __libc_iterate_dynamic_tls(os_id, cb, frontier);
369 #else
370       if (dtls && !DTLSInDestruction(dtls)) {
371         ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
372           uptr dtls_beg = dtv.beg;
373           uptr dtls_end = dtls_beg + dtv.size;
374           if (dtls_beg < dtls_end) {
375             LOG_THREADS("DTLS %zu at %p-%p.\n", id, dtls_beg, dtls_end);
376             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
377                                  kReachable);
378           }
379         });
380       } else {
381         // We are handling a thread with DTLS under destruction. Log about
382         // this and continue.
383         LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
384       }
385 #endif
386     }
387   }
388 
389   // Add pointers reachable from ThreadContexts
390   ProcessThreadRegistry(frontier);
391 }
392 
393 #endif  // SANITIZER_FUCHSIA
394 
395 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
396                     uptr region_begin, uptr region_end, bool is_readable) {
397   uptr intersection_begin = Max(root_region.begin, region_begin);
398   uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
399   if (intersection_begin >= intersection_end) return;
400   LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
401                root_region.begin, root_region.begin + root_region.size,
402                region_begin, region_end,
403                is_readable ? "readable" : "unreadable");
404   if (is_readable)
405     ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
406                          kReachable);
407 }
408 
409 static void ProcessRootRegion(Frontier *frontier,
410                               const RootRegion &root_region) {
411   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
412   MemoryMappedSegment segment;
413   while (proc_maps.Next(&segment)) {
414     ScanRootRegion(frontier, root_region, segment.start, segment.end,
415                    segment.IsReadable());
416   }
417 }
418 
419 // Scans root regions for heap pointers.
420 static void ProcessRootRegions(Frontier *frontier) {
421   if (!flags()->use_root_regions) return;
422   CHECK(root_regions);
423   for (uptr i = 0; i < root_regions->size(); i++) {
424     ProcessRootRegion(frontier, (*root_regions)[i]);
425   }
426 }
427 
428 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
429   while (frontier->size()) {
430     uptr next_chunk = frontier->back();
431     frontier->pop_back();
432     LsanMetadata m(next_chunk);
433     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
434                          "HEAP", tag);
435   }
436 }
437 
438 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
439 // which are reachable from it as indirectly leaked.
440 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
441   chunk = GetUserBegin(chunk);
442   LsanMetadata m(chunk);
443   if (m.allocated() && m.tag() != kReachable) {
444     ScanRangeForPointers(chunk, chunk + m.requested_size(),
445                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
446   }
447 }
448 
449 static void IgnoredSuppressedCb(uptr chunk, void *arg) {
450   CHECK(arg);
451   chunk = GetUserBegin(chunk);
452   LsanMetadata m(chunk);
453   if (!m.allocated() || m.tag() == kIgnored)
454     return;
455 
456   const InternalMmapVector<u32> &suppressed =
457       *static_cast<const InternalMmapVector<u32> *>(arg);
458   uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
459   if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
460     return;
461 
462   LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", chunk,
463                chunk + m.requested_size(), m.requested_size());
464   m.set_tag(kIgnored);
465 }
466 
467 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
468 // frontier.
469 static void CollectIgnoredCb(uptr chunk, void *arg) {
470   CHECK(arg);
471   chunk = GetUserBegin(chunk);
472   LsanMetadata m(chunk);
473   if (m.allocated() && m.tag() == kIgnored) {
474     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
475                  chunk, chunk + m.requested_size(), m.requested_size());
476     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
477   }
478 }
479 
480 static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
481   CHECK(stack_id);
482   StackTrace stack = map->Get(stack_id);
483   // The top frame is our malloc/calloc/etc. The next frame is the caller.
484   if (stack.size >= 2)
485     return stack.trace[1];
486   return 0;
487 }
488 
489 struct InvalidPCParam {
490   Frontier *frontier;
491   StackDepotReverseMap *stack_depot_reverse_map;
492   bool skip_linker_allocations;
493 };
494 
495 // ForEachChunk callback. If the caller pc is invalid or is within the linker,
496 // mark as reachable. Called by ProcessPlatformSpecificAllocations.
497 static void MarkInvalidPCCb(uptr chunk, void *arg) {
498   CHECK(arg);
499   InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
500   chunk = GetUserBegin(chunk);
501   LsanMetadata m(chunk);
502   if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
503     u32 stack_id = m.stack_trace_id();
504     uptr caller_pc = 0;
505     if (stack_id > 0)
506       caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
507     // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
508     // it as reachable, as we can't properly report its allocation stack anyway.
509     if (caller_pc == 0 || (param->skip_linker_allocations &&
510                            GetLinker()->containsAddress(caller_pc))) {
511       m.set_tag(kReachable);
512       param->frontier->push_back(chunk);
513     }
514   }
515 }
516 
517 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
518 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
519 // modules accounting etc.
520 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
521 // They are allocated with a __libc_memalign() call in allocate_and_init()
522 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
523 // blocks, but we can make sure they come from our own allocator by intercepting
524 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
525 // addresses are stored in a dynamically allocated array (the DTV) which is
526 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
527 // being reachable from the static TLS, and the dynamic TLS being reachable from
528 // the DTV. This is because the initial DTV is allocated before our interception
529 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
530 // can't special-case it either, since we don't know its size.
531 // Our solution is to include in the root set all allocations made from
532 // ld-linux.so (which is where allocate_and_init() is implemented). This is
533 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
534 // which we don't care about).
535 // On all other platforms, this simply checks to ensure that the caller pc is
536 // valid before reporting chunks as leaked.
537 void ProcessPC(Frontier *frontier) {
538   StackDepotReverseMap stack_depot_reverse_map;
539   InvalidPCParam arg;
540   arg.frontier = frontier;
541   arg.stack_depot_reverse_map = &stack_depot_reverse_map;
542   arg.skip_linker_allocations =
543       flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
544   ForEachChunk(MarkInvalidPCCb, &arg);
545 }
546 
547 // Sets the appropriate tag on each chunk.
548 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
549                               Frontier *frontier) {
550   const InternalMmapVector<u32> &suppressed_stacks =
551       GetSuppressionContext()->GetSortedSuppressedStacks();
552   if (!suppressed_stacks.empty()) {
553     ForEachChunk(IgnoredSuppressedCb,
554                  const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
555   }
556   ForEachChunk(CollectIgnoredCb, frontier);
557   ProcessGlobalRegions(frontier);
558   ProcessThreads(suspended_threads, frontier);
559   ProcessRootRegions(frontier);
560   FloodFillTag(frontier, kReachable);
561 
562   CHECK_EQ(0, frontier->size());
563   ProcessPC(frontier);
564 
565   // The check here is relatively expensive, so we do this in a separate flood
566   // fill. That way we can skip the check for chunks that are reachable
567   // otherwise.
568   LOG_POINTERS("Processing platform-specific allocations.\n");
569   ProcessPlatformSpecificAllocations(frontier);
570   FloodFillTag(frontier, kReachable);
571 
572   // Iterate over leaked chunks and mark those that are reachable from other
573   // leaked chunks.
574   LOG_POINTERS("Scanning leaked chunks.\n");
575   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
576 }
577 
578 // ForEachChunk callback. Resets the tags to pre-leak-check state.
579 static void ResetTagsCb(uptr chunk, void *arg) {
580   (void)arg;
581   chunk = GetUserBegin(chunk);
582   LsanMetadata m(chunk);
583   if (m.allocated() && m.tag() != kIgnored)
584     m.set_tag(kDirectlyLeaked);
585 }
586 
587 static void PrintStackTraceById(u32 stack_trace_id) {
588   CHECK(stack_trace_id);
589   StackDepotGet(stack_trace_id).Print();
590 }
591 
592 // ForEachChunk callback. Aggregates information about unreachable chunks into
593 // a LeakReport.
594 static void CollectLeaksCb(uptr chunk, void *arg) {
595   CHECK(arg);
596   LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
597   chunk = GetUserBegin(chunk);
598   LsanMetadata m(chunk);
599   if (!m.allocated()) return;
600   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
601     u32 resolution = flags()->resolution;
602     u32 stack_trace_id = 0;
603     if (resolution > 0) {
604       StackTrace stack = StackDepotGet(m.stack_trace_id());
605       stack.size = Min(stack.size, resolution);
606       stack_trace_id = StackDepotPut(stack);
607     } else {
608       stack_trace_id = m.stack_trace_id();
609     }
610     leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
611                                 m.tag());
612   }
613 }
614 
615 void LeakSuppressionContext::PrintMatchedSuppressions() {
616   InternalMmapVector<Suppression *> matched;
617   context.GetMatched(&matched);
618   if (!matched.size())
619     return;
620   const char *line = "-----------------------------------------------------";
621   Printf("%s\n", line);
622   Printf("Suppressions used:\n");
623   Printf("  count      bytes template\n");
624   for (uptr i = 0; i < matched.size(); i++) {
625     Printf("%7zu %10zu %s\n",
626            static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
627            matched[i]->weight, matched[i]->templ);
628   }
629   Printf("%s\n\n", line);
630 }
631 
632 static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
633   const InternalMmapVector<tid_t> &suspended_threads =
634       *(const InternalMmapVector<tid_t> *)arg;
635   if (tctx->status == ThreadStatusRunning) {
636     uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
637     if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
638       Report("Running thread %d was not suspended. False leaks are possible.\n",
639              tctx->os_id);
640   }
641 }
642 
643 #if SANITIZER_FUCHSIA
644 
645 // Fuchsia provides a libc interface that guarantees all threads are
646 // covered, and SuspendedThreadList is never really used.
647 static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
648 
649 #else  // !SANITIZER_FUCHSIA
650 
651 static void ReportUnsuspendedThreads(
652     const SuspendedThreadsList &suspended_threads) {
653   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
654   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
655     threads[i] = suspended_threads.GetThreadID(i);
656 
657   Sort(threads.data(), threads.size());
658 
659   GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
660       &ReportIfNotSuspended, &threads);
661 }
662 
663 #endif  // !SANITIZER_FUCHSIA
664 
665 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
666                                   void *arg) {
667   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
668   CHECK(param);
669   CHECK(!param->success);
670   ReportUnsuspendedThreads(suspended_threads);
671   ClassifyAllChunks(suspended_threads, &param->frontier);
672   ForEachChunk(CollectLeaksCb, &param->leak_report);
673   // Clean up for subsequent leak checks. This assumes we did not overwrite any
674   // kIgnored tags.
675   ForEachChunk(ResetTagsCb, nullptr);
676   param->success = true;
677 }
678 
679 static bool PrintResults(LeakReport &report) {
680   uptr unsuppressed_count = report.UnsuppressedLeakCount();
681   if (unsuppressed_count) {
682     Decorator d;
683     Printf(
684         "\n"
685         "================================================================="
686         "\n");
687     Printf("%s", d.Error());
688     Report("ERROR: LeakSanitizer: detected memory leaks\n");
689     Printf("%s", d.Default());
690     report.ReportTopLeaks(flags()->max_leaks);
691   }
692   if (common_flags()->print_suppressions)
693     GetSuppressionContext()->PrintMatchedSuppressions();
694   if (unsuppressed_count > 0) {
695     report.PrintSummary();
696     return true;
697   }
698   return false;
699 }
700 
701 static bool CheckForLeaks() {
702   if (&__lsan_is_turned_off && __lsan_is_turned_off())
703     return false;
704   // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
705   // suppressions. However if a stack id was previously suppressed, it should be
706   // suppressed in future checks as well.
707   for (int i = 0;; ++i) {
708     EnsureMainThreadIDIsCorrect();
709     CheckForLeaksParam param;
710     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
711     if (!param.success) {
712       Report("LeakSanitizer has encountered a fatal error.\n");
713       Report(
714           "HINT: For debugging, try setting environment variable "
715           "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
716       Report(
717           "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
718           "etc)\n");
719       Die();
720     }
721     // No new suppressions stacks, so rerun will not help and we can report.
722     if (!param.leak_report.ApplySuppressions())
723       return PrintResults(param.leak_report);
724 
725     // No indirect leaks to report, so we are done here.
726     if (!param.leak_report.IndirectUnsuppressedLeakCount())
727       return PrintResults(param.leak_report);
728 
729     if (i >= 8) {
730       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
731       return PrintResults(param.leak_report);
732     }
733 
734     // We found a new previously unseen suppressed call stack. Rerun to make
735     // sure it does not hold indirect leaks.
736     VReport(1, "Rerun with %zu suppressed stacks.",
737             GetSuppressionContext()->GetSortedSuppressedStacks().size());
738   }
739 }
740 
741 static bool has_reported_leaks = false;
742 bool HasReportedLeaks() { return has_reported_leaks; }
743 
744 void DoLeakCheck() {
745   BlockingMutexLock l(&global_mutex);
746   static bool already_done;
747   if (already_done) return;
748   already_done = true;
749   has_reported_leaks = CheckForLeaks();
750   if (has_reported_leaks) HandleLeaks();
751 }
752 
753 static int DoRecoverableLeakCheck() {
754   BlockingMutexLock l(&global_mutex);
755   bool have_leaks = CheckForLeaks();
756   return have_leaks ? 1 : 0;
757 }
758 
759 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
760 
761 Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
762   Suppression *s = nullptr;
763 
764   // Suppress by module name.
765   if (const char *module_name =
766           Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
767     if (context.Match(module_name, kSuppressionLeak, &s))
768       return s;
769 
770   // Suppress by file or function name.
771   SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
772   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
773     if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
774         context.Match(cur->info.file, kSuppressionLeak, &s)) {
775       break;
776     }
777   }
778   frames->ClearAll();
779   return s;
780 }
781 
782 Suppression *LeakSuppressionContext::GetSuppressionForStack(
783     u32 stack_trace_id) {
784   LazyInit();
785   StackTrace stack = StackDepotGet(stack_trace_id);
786   for (uptr i = 0; i < stack.size; i++) {
787     Suppression *s = GetSuppressionForAddr(
788         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
789     if (s) {
790       suppressed_stacks_sorted = false;
791       suppressed_stacks.push_back(stack_trace_id);
792       return s;
793     }
794   }
795   return nullptr;
796 }
797 
798 ///// LeakReport implementation. /////
799 
800 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
801 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
802 // in real-world applications.
803 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
804 // use a hash table.
805 const uptr kMaxLeaksConsidered = 5000;
806 
807 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
808                                 uptr leaked_size, ChunkTag tag) {
809   CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
810   bool is_directly_leaked = (tag == kDirectlyLeaked);
811   uptr i;
812   for (i = 0; i < leaks_.size(); i++) {
813     if (leaks_[i].stack_trace_id == stack_trace_id &&
814         leaks_[i].is_directly_leaked == is_directly_leaked) {
815       leaks_[i].hit_count++;
816       leaks_[i].total_size += leaked_size;
817       break;
818     }
819   }
820   if (i == leaks_.size()) {
821     if (leaks_.size() == kMaxLeaksConsidered) return;
822     Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
823                   is_directly_leaked, /* is_suppressed */ false };
824     leaks_.push_back(leak);
825   }
826   if (flags()->report_objects) {
827     LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
828     leaked_objects_.push_back(obj);
829   }
830 }
831 
832 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
833   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
834     return leak1.total_size > leak2.total_size;
835   else
836     return leak1.is_directly_leaked;
837 }
838 
839 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
840   CHECK(leaks_.size() <= kMaxLeaksConsidered);
841   Printf("\n");
842   if (leaks_.size() == kMaxLeaksConsidered)
843     Printf("Too many leaks! Only the first %zu leaks encountered will be "
844            "reported.\n",
845            kMaxLeaksConsidered);
846 
847   uptr unsuppressed_count = UnsuppressedLeakCount();
848   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
849     Printf("The %zu top leak(s):\n", num_leaks_to_report);
850   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
851   uptr leaks_reported = 0;
852   for (uptr i = 0; i < leaks_.size(); i++) {
853     if (leaks_[i].is_suppressed) continue;
854     PrintReportForLeak(i);
855     leaks_reported++;
856     if (leaks_reported == num_leaks_to_report) break;
857   }
858   if (leaks_reported < unsuppressed_count) {
859     uptr remaining = unsuppressed_count - leaks_reported;
860     Printf("Omitting %zu more leak(s).\n", remaining);
861   }
862 }
863 
864 void LeakReport::PrintReportForLeak(uptr index) {
865   Decorator d;
866   Printf("%s", d.Leak());
867   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
868          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
869          leaks_[index].total_size, leaks_[index].hit_count);
870   Printf("%s", d.Default());
871 
872   PrintStackTraceById(leaks_[index].stack_trace_id);
873 
874   if (flags()->report_objects) {
875     Printf("Objects leaked above:\n");
876     PrintLeakedObjectsForLeak(index);
877     Printf("\n");
878   }
879 }
880 
881 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
882   u32 leak_id = leaks_[index].id;
883   for (uptr j = 0; j < leaked_objects_.size(); j++) {
884     if (leaked_objects_[j].leak_id == leak_id)
885       Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
886              leaked_objects_[j].size);
887   }
888 }
889 
890 void LeakReport::PrintSummary() {
891   CHECK(leaks_.size() <= kMaxLeaksConsidered);
892   uptr bytes = 0, allocations = 0;
893   for (uptr i = 0; i < leaks_.size(); i++) {
894       if (leaks_[i].is_suppressed) continue;
895       bytes += leaks_[i].total_size;
896       allocations += leaks_[i].hit_count;
897   }
898   InternalScopedString summary;
899   summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
900                  allocations);
901   ReportErrorSummary(summary.data());
902 }
903 
904 uptr LeakReport::ApplySuppressions() {
905   LeakSuppressionContext *suppressions = GetSuppressionContext();
906   uptr new_suppressions = false;
907   for (uptr i = 0; i < leaks_.size(); i++) {
908     Suppression *s =
909         suppressions->GetSuppressionForStack(leaks_[i].stack_trace_id);
910     if (s) {
911       s->weight += leaks_[i].total_size;
912       atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
913           leaks_[i].hit_count);
914       leaks_[i].is_suppressed = true;
915       ++new_suppressions;
916     }
917   }
918   return new_suppressions;
919 }
920 
921 uptr LeakReport::UnsuppressedLeakCount() {
922   uptr result = 0;
923   for (uptr i = 0; i < leaks_.size(); i++)
924     if (!leaks_[i].is_suppressed) result++;
925   return result;
926 }
927 
928 uptr LeakReport::IndirectUnsuppressedLeakCount() {
929   uptr result = 0;
930   for (uptr i = 0; i < leaks_.size(); i++)
931     if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
932       result++;
933   return result;
934 }
935 
936 } // namespace __lsan
937 #else // CAN_SANITIZE_LEAKS
938 namespace __lsan {
939 void InitCommonLsan() { }
940 void DoLeakCheck() { }
941 void DoRecoverableLeakCheckVoid() { }
942 void DisableInThisThread() { }
943 void EnableInThisThread() { }
944 }
945 #endif // CAN_SANITIZE_LEAKS
946 
947 using namespace __lsan;
948 
949 extern "C" {
950 SANITIZER_INTERFACE_ATTRIBUTE
951 void __lsan_ignore_object(const void *p) {
952 #if CAN_SANITIZE_LEAKS
953   if (!common_flags()->detect_leaks)
954     return;
955   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
956   // locked.
957   BlockingMutexLock l(&global_mutex);
958   IgnoreObjectResult res = IgnoreObjectLocked(p);
959   if (res == kIgnoreObjectInvalid)
960     VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
961   if (res == kIgnoreObjectAlreadyIgnored)
962     VReport(1, "__lsan_ignore_object(): "
963            "heap object at %p is already being ignored\n", p);
964   if (res == kIgnoreObjectSuccess)
965     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
966 #endif // CAN_SANITIZE_LEAKS
967 }
968 
969 SANITIZER_INTERFACE_ATTRIBUTE
970 void __lsan_register_root_region(const void *begin, uptr size) {
971 #if CAN_SANITIZE_LEAKS
972   BlockingMutexLock l(&global_mutex);
973   CHECK(root_regions);
974   RootRegion region = {reinterpret_cast<uptr>(begin), size};
975   root_regions->push_back(region);
976   VReport(1, "Registered root region at %p of size %llu\n", begin, size);
977 #endif // CAN_SANITIZE_LEAKS
978 }
979 
980 SANITIZER_INTERFACE_ATTRIBUTE
981 void __lsan_unregister_root_region(const void *begin, uptr size) {
982 #if CAN_SANITIZE_LEAKS
983   BlockingMutexLock l(&global_mutex);
984   CHECK(root_regions);
985   bool removed = false;
986   for (uptr i = 0; i < root_regions->size(); i++) {
987     RootRegion region = (*root_regions)[i];
988     if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
989       removed = true;
990       uptr last_index = root_regions->size() - 1;
991       (*root_regions)[i] = (*root_regions)[last_index];
992       root_regions->pop_back();
993       VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
994       break;
995     }
996   }
997   if (!removed) {
998     Report(
999         "__lsan_unregister_root_region(): region at %p of size %llu has not "
1000         "been registered.\n",
1001         begin, size);
1002     Die();
1003   }
1004 #endif // CAN_SANITIZE_LEAKS
1005 }
1006 
1007 SANITIZER_INTERFACE_ATTRIBUTE
1008 void __lsan_disable() {
1009 #if CAN_SANITIZE_LEAKS
1010   __lsan::DisableInThisThread();
1011 #endif
1012 }
1013 
1014 SANITIZER_INTERFACE_ATTRIBUTE
1015 void __lsan_enable() {
1016 #if CAN_SANITIZE_LEAKS
1017   __lsan::EnableInThisThread();
1018 #endif
1019 }
1020 
1021 SANITIZER_INTERFACE_ATTRIBUTE
1022 void __lsan_do_leak_check() {
1023 #if CAN_SANITIZE_LEAKS
1024   if (common_flags()->detect_leaks)
1025     __lsan::DoLeakCheck();
1026 #endif // CAN_SANITIZE_LEAKS
1027 }
1028 
1029 SANITIZER_INTERFACE_ATTRIBUTE
1030 int __lsan_do_recoverable_leak_check() {
1031 #if CAN_SANITIZE_LEAKS
1032   if (common_flags()->detect_leaks)
1033     return __lsan::DoRecoverableLeakCheck();
1034 #endif // CAN_SANITIZE_LEAKS
1035   return 0;
1036 }
1037 
1038 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
1039   return "";
1040 }
1041 
1042 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1043 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
1044 int __lsan_is_turned_off() {
1045   return 0;
1046 }
1047 
1048 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
1049 const char *__lsan_default_suppressions() {
1050   return "";
1051 }
1052 #endif
1053 } // extern "C"
1054