1 //=-- lsan_common.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "lsan_common.h"
15 
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
27 
28 #if CAN_SANITIZE_LEAKS
29 namespace __lsan {
30 
31 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32 // also to protect the global list of root regions.
33 Mutex global_mutex;
34 
35 Flags lsan_flags;
36 
37 void DisableCounterUnderflow() {
38   if (common_flags()->detect_leaks) {
39     Report("Unmatched call to __lsan_enable().\n");
40     Die();
41   }
42 }
43 
44 void Flags::SetDefaults() {
45 #  define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
46 #  include "lsan_flags.inc"
47 #  undef LSAN_FLAG
48 }
49 
50 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
51 #  define LSAN_FLAG(Type, Name, DefaultValue, Description) \
52     RegisterFlag(parser, #Name, Description, &f->Name);
53 #  include "lsan_flags.inc"
54 #  undef LSAN_FLAG
55 }
56 
57 #  define LOG_POINTERS(...)      \
58     do {                         \
59       if (flags()->log_pointers) \
60         Report(__VA_ARGS__);     \
61     } while (0)
62 
63 #  define LOG_THREADS(...)      \
64     do {                        \
65       if (flags()->log_threads) \
66         Report(__VA_ARGS__);    \
67     } while (0)
68 
69 class LeakSuppressionContext {
70   bool parsed = false;
71   SuppressionContext context;
72   bool suppressed_stacks_sorted = true;
73   InternalMmapVector<u32> suppressed_stacks;
74   const LoadedModule *suppress_module = nullptr;
75 
76   void LazyInit();
77   Suppression *GetSuppressionForAddr(uptr addr);
78   bool SuppressInvalid(const StackTrace &stack);
79   bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
80 
81  public:
82   LeakSuppressionContext(const char *supprression_types[],
83                          int suppression_types_num)
84       : context(supprression_types, suppression_types_num) {}
85 
86   bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
87 
88   const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
89     if (!suppressed_stacks_sorted) {
90       suppressed_stacks_sorted = true;
91       SortAndDedup(suppressed_stacks);
92     }
93     return suppressed_stacks;
94   }
95   void PrintMatchedSuppressions();
96 };
97 
98 ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
99 static LeakSuppressionContext *suppression_ctx = nullptr;
100 static const char kSuppressionLeak[] = "leak";
101 static const char *kSuppressionTypes[] = {kSuppressionLeak};
102 static const char kStdSuppressions[] =
103 #  if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
104     // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
105     // definition.
106     "leak:*pthread_exit*\n"
107 #  endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
108 #  if SANITIZER_MAC
109     // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
110     "leak:*_os_trace*\n"
111 #  endif
112     // TLS leak in some glibc versions, described in
113     // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
114     "leak:*tls_get_addr*\n";
115 
116 void InitializeSuppressions() {
117   CHECK_EQ(nullptr, suppression_ctx);
118   suppression_ctx = new (suppression_placeholder)
119       LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
120 }
121 
122 void LeakSuppressionContext::LazyInit() {
123   if (!parsed) {
124     parsed = true;
125     context.ParseFromFile(flags()->suppressions);
126     if (&__lsan_default_suppressions)
127       context.Parse(__lsan_default_suppressions());
128     context.Parse(kStdSuppressions);
129     if (flags()->use_tls && flags()->use_ld_allocations)
130       suppress_module = GetLinker();
131   }
132 }
133 
134 Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
135   Suppression *s = nullptr;
136 
137   // Suppress by module name.
138   if (const char *module_name =
139           Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
140     if (context.Match(module_name, kSuppressionLeak, &s))
141       return s;
142 
143   // Suppress by file or function name.
144   SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
145   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
146     if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
147         context.Match(cur->info.file, kSuppressionLeak, &s)) {
148       break;
149     }
150   }
151   frames->ClearAll();
152   return s;
153 }
154 
155 static uptr GetCallerPC(const StackTrace &stack) {
156   // The top frame is our malloc/calloc/etc. The next frame is the caller.
157   if (stack.size >= 2)
158     return stack.trace[1];
159   return 0;
160 }
161 
162 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
163 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
164 // modules accounting etc.
165 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
166 // They are allocated with a __libc_memalign() call in allocate_and_init()
167 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
168 // blocks, but we can make sure they come from our own allocator by intercepting
169 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
170 // addresses are stored in a dynamically allocated array (the DTV) which is
171 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
172 // being reachable from the static TLS, and the dynamic TLS being reachable from
173 // the DTV. This is because the initial DTV is allocated before our interception
174 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
175 // can't special-case it either, since we don't know its size.
176 // Our solution is to include in the root set all allocations made from
177 // ld-linux.so (which is where allocate_and_init() is implemented). This is
178 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
179 // which we don't care about).
180 // On all other platforms, this simply checks to ensure that the caller pc is
181 // valid before reporting chunks as leaked.
182 bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
183   uptr caller_pc = GetCallerPC(stack);
184   // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
185   // it as reachable, as we can't properly report its allocation stack anyway.
186   return !caller_pc ||
187          (suppress_module && suppress_module->containsAddress(caller_pc));
188 }
189 
190 bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
191                                             uptr hit_count, uptr total_size) {
192   for (uptr i = 0; i < stack.size; i++) {
193     Suppression *s = GetSuppressionForAddr(
194         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
195     if (s) {
196       s->weight += total_size;
197       atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
198       return true;
199     }
200   }
201   return false;
202 }
203 
204 bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
205                                       uptr total_size) {
206   LazyInit();
207   StackTrace stack = StackDepotGet(stack_trace_id);
208   if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
209     return false;
210   suppressed_stacks_sorted = false;
211   suppressed_stacks.push_back(stack_trace_id);
212   return true;
213 }
214 
215 static LeakSuppressionContext *GetSuppressionContext() {
216   CHECK(suppression_ctx);
217   return suppression_ctx;
218 }
219 
220 static InternalMmapVectorNoCtor<RootRegion> root_regions;
221 
222 InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
223   return &root_regions;
224 }
225 
226 void InitCommonLsan() {
227   if (common_flags()->detect_leaks) {
228     // Initialization which can fail or print warnings should only be done if
229     // LSan is actually enabled.
230     InitializeSuppressions();
231     InitializePlatformSpecificModules();
232   }
233 }
234 
235 class Decorator : public __sanitizer::SanitizerCommonDecorator {
236  public:
237   Decorator() : SanitizerCommonDecorator() {}
238   const char *Error() { return Red(); }
239   const char *Leak() { return Blue(); }
240 };
241 
242 static inline bool CanBeAHeapPointer(uptr p) {
243   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
244   // bound on heap addresses.
245   const uptr kMinAddress = 4 * 4096;
246   if (p < kMinAddress)
247     return false;
248 #  if defined(__x86_64__)
249   // Accept only canonical form user-space addresses.
250   return ((p >> 47) == 0);
251 #  elif defined(__mips64)
252   return ((p >> 40) == 0);
253 #  elif defined(__aarch64__)
254   unsigned runtimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
255   return ((p >> runtimeVMA) == 0);
256 #  else
257   return true;
258 #  endif
259 }
260 
261 // Scans the memory range, looking for byte patterns that point into allocator
262 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
263 // There are two usage modes for this function: finding reachable chunks
264 // (|tag| = kReachable) and finding indirectly leaked chunks
265 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
266 // so |frontier| = 0.
267 void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
268                           const char *region_type, ChunkTag tag) {
269   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
270   const uptr alignment = flags()->pointer_alignment();
271   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
272                (void *)end);
273   uptr pp = begin;
274   if (pp % alignment)
275     pp = pp + alignment - pp % alignment;
276   for (; pp + sizeof(void *) <= end; pp += alignment) {
277     void *p = *reinterpret_cast<void **>(pp);
278     if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p)))
279       continue;
280     uptr chunk = PointsIntoChunk(p);
281     if (!chunk)
282       continue;
283     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
284     if (chunk == begin)
285       continue;
286     LsanMetadata m(chunk);
287     if (m.tag() == kReachable || m.tag() == kIgnored)
288       continue;
289 
290     // Do this check relatively late so we can log only the interesting cases.
291     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
292       LOG_POINTERS(
293           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
294           "%zu.\n",
295           (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
296           m.requested_size());
297       continue;
298     }
299 
300     m.set_tag(tag);
301     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
302                  (void *)pp, p, (void *)chunk,
303                  (void *)(chunk + m.requested_size()), m.requested_size());
304     if (frontier)
305       frontier->push_back(chunk);
306   }
307 }
308 
309 // Scans a global range for pointers
310 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
311   uptr allocator_begin = 0, allocator_end = 0;
312   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
313   if (begin <= allocator_begin && allocator_begin < end) {
314     CHECK_LE(allocator_begin, allocator_end);
315     CHECK_LE(allocator_end, end);
316     if (begin < allocator_begin)
317       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
318                            kReachable);
319     if (allocator_end < end)
320       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
321   } else {
322     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
323   }
324 }
325 
326 void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
327   Frontier *frontier = reinterpret_cast<Frontier *>(arg);
328   ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
329 }
330 
331 #  if SANITIZER_FUCHSIA
332 
333 // Fuchsia handles all threads together with its own callback.
334 static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
335 
336 #  else
337 
338 #    if SANITIZER_ANDROID
339 // FIXME: Move this out into *libcdep.cpp
340 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
341     pid_t, void (*cb)(void *, void *, uptr, void *), void *);
342 #    endif
343 
344 static void ProcessThreadRegistry(Frontier *frontier) {
345   InternalMmapVector<uptr> ptrs;
346   GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
347       GetAdditionalThreadContextPtrs, &ptrs);
348 
349   for (uptr i = 0; i < ptrs.size(); ++i) {
350     void *ptr = reinterpret_cast<void *>(ptrs[i]);
351     uptr chunk = PointsIntoChunk(ptr);
352     if (!chunk)
353       continue;
354     LsanMetadata m(chunk);
355     if (!m.allocated())
356       continue;
357 
358     // Mark as reachable and add to frontier.
359     LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
360     m.set_tag(kReachable);
361     frontier->push_back(chunk);
362   }
363 }
364 
365 // Scans thread data (stacks and TLS) for heap pointers.
366 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
367                            Frontier *frontier) {
368   InternalMmapVector<uptr> registers;
369   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
370     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
371     LOG_THREADS("Processing thread %llu.\n", os_id);
372     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
373     DTLS *dtls;
374     bool thread_found =
375         GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
376                               &tls_end, &cache_begin, &cache_end, &dtls);
377     if (!thread_found) {
378       // If a thread can't be found in the thread registry, it's probably in the
379       // process of destruction. Log this event and move on.
380       LOG_THREADS("Thread %llu not found in registry.\n", os_id);
381       continue;
382     }
383     uptr sp;
384     PtraceRegistersStatus have_registers =
385         suspended_threads.GetRegistersAndSP(i, &registers, &sp);
386     if (have_registers != REGISTERS_AVAILABLE) {
387       Report("Unable to get registers from thread %llu.\n", os_id);
388       // If unable to get SP, consider the entire stack to be reachable unless
389       // GetRegistersAndSP failed with ESRCH.
390       if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
391         continue;
392       sp = stack_begin;
393     }
394 
395     if (flags()->use_registers && have_registers) {
396       uptr registers_begin = reinterpret_cast<uptr>(registers.data());
397       uptr registers_end =
398           reinterpret_cast<uptr>(registers.data() + registers.size());
399       ScanRangeForPointers(registers_begin, registers_end, frontier,
400                            "REGISTERS", kReachable);
401     }
402 
403     if (flags()->use_stacks) {
404       LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
405                   (void *)stack_end, (void *)sp);
406       if (sp < stack_begin || sp >= stack_end) {
407         // SP is outside the recorded stack range (e.g. the thread is running a
408         // signal handler on alternate stack, or swapcontext was used).
409         // Again, consider the entire stack range to be reachable.
410         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
411         uptr page_size = GetPageSizeCached();
412         int skipped = 0;
413         while (stack_begin < stack_end &&
414                !IsAccessibleMemoryRange(stack_begin, 1)) {
415           skipped++;
416           stack_begin += page_size;
417         }
418         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
419                     skipped, (void *)stack_begin, (void *)stack_end);
420       } else {
421         // Shrink the stack range to ignore out-of-scope values.
422         stack_begin = sp;
423       }
424       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
425                            kReachable);
426       ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
427     }
428 
429     if (flags()->use_tls) {
430       if (tls_begin) {
431         LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
432         // If the tls and cache ranges don't overlap, scan full tls range,
433         // otherwise, only scan the non-overlapping portions
434         if (cache_begin == cache_end || tls_end < cache_begin ||
435             tls_begin > cache_end) {
436           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
437         } else {
438           if (tls_begin < cache_begin)
439             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
440                                  kReachable);
441           if (tls_end > cache_end)
442             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
443                                  kReachable);
444         }
445       }
446 #    if SANITIZER_ANDROID
447       auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
448                      void *arg) -> void {
449         ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
450                              reinterpret_cast<uptr>(dtls_end),
451                              reinterpret_cast<Frontier *>(arg), "DTLS",
452                              kReachable);
453       };
454 
455       // FIXME: There might be a race-condition here (and in Bionic) if the
456       // thread is suspended in the middle of updating its DTLS. IOWs, we
457       // could scan already freed memory. (probably fine for now)
458       __libc_iterate_dynamic_tls(os_id, cb, frontier);
459 #    else
460       if (dtls && !DTLSInDestruction(dtls)) {
461         ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
462           uptr dtls_beg = dtv.beg;
463           uptr dtls_end = dtls_beg + dtv.size;
464           if (dtls_beg < dtls_end) {
465             LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
466                         (void *)dtls_end);
467             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
468                                  kReachable);
469           }
470         });
471       } else {
472         // We are handling a thread with DTLS under destruction. Log about
473         // this and continue.
474         LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
475       }
476 #    endif
477     }
478   }
479 
480   // Add pointers reachable from ThreadContexts
481   ProcessThreadRegistry(frontier);
482 }
483 
484 #  endif  // SANITIZER_FUCHSIA
485 
486 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
487                     uptr region_begin, uptr region_end, bool is_readable) {
488   uptr intersection_begin = Max(root_region.begin, region_begin);
489   uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
490   if (intersection_begin >= intersection_end)
491     return;
492   LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
493                (void *)root_region.begin,
494                (void *)(root_region.begin + root_region.size),
495                (void *)region_begin, (void *)region_end,
496                is_readable ? "readable" : "unreadable");
497   if (is_readable)
498     ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
499                          kReachable);
500 }
501 
502 static void ProcessRootRegion(Frontier *frontier,
503                               const RootRegion &root_region) {
504   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
505   MemoryMappedSegment segment;
506   while (proc_maps.Next(&segment)) {
507     ScanRootRegion(frontier, root_region, segment.start, segment.end,
508                    segment.IsReadable());
509   }
510 }
511 
512 // Scans root regions for heap pointers.
513 static void ProcessRootRegions(Frontier *frontier) {
514   if (!flags()->use_root_regions)
515     return;
516   for (uptr i = 0; i < root_regions.size(); i++)
517     ProcessRootRegion(frontier, root_regions[i]);
518 }
519 
520 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
521   while (frontier->size()) {
522     uptr next_chunk = frontier->back();
523     frontier->pop_back();
524     LsanMetadata m(next_chunk);
525     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
526                          "HEAP", tag);
527   }
528 }
529 
530 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
531 // which are reachable from it as indirectly leaked.
532 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
533   chunk = GetUserBegin(chunk);
534   LsanMetadata m(chunk);
535   if (m.allocated() && m.tag() != kReachable) {
536     ScanRangeForPointers(chunk, chunk + m.requested_size(),
537                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
538   }
539 }
540 
541 static void IgnoredSuppressedCb(uptr chunk, void *arg) {
542   CHECK(arg);
543   chunk = GetUserBegin(chunk);
544   LsanMetadata m(chunk);
545   if (!m.allocated() || m.tag() == kIgnored)
546     return;
547 
548   const InternalMmapVector<u32> &suppressed =
549       *static_cast<const InternalMmapVector<u32> *>(arg);
550   uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
551   if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
552     return;
553 
554   LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
555                (void *)(chunk + m.requested_size()), m.requested_size());
556   m.set_tag(kIgnored);
557 }
558 
559 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
560 // frontier.
561 static void CollectIgnoredCb(uptr chunk, void *arg) {
562   CHECK(arg);
563   chunk = GetUserBegin(chunk);
564   LsanMetadata m(chunk);
565   if (m.allocated() && m.tag() == kIgnored) {
566     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
567                  (void *)(chunk + m.requested_size()), m.requested_size());
568     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
569   }
570 }
571 
572 // Sets the appropriate tag on each chunk.
573 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
574                               Frontier *frontier) {
575   const InternalMmapVector<u32> &suppressed_stacks =
576       GetSuppressionContext()->GetSortedSuppressedStacks();
577   if (!suppressed_stacks.empty()) {
578     ForEachChunk(IgnoredSuppressedCb,
579                  const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
580   }
581   ForEachChunk(CollectIgnoredCb, frontier);
582   ProcessGlobalRegions(frontier);
583   ProcessThreads(suspended_threads, frontier);
584   ProcessRootRegions(frontier);
585   FloodFillTag(frontier, kReachable);
586 
587   // The check here is relatively expensive, so we do this in a separate flood
588   // fill. That way we can skip the check for chunks that are reachable
589   // otherwise.
590   LOG_POINTERS("Processing platform-specific allocations.\n");
591   ProcessPlatformSpecificAllocations(frontier);
592   FloodFillTag(frontier, kReachable);
593 
594   // Iterate over leaked chunks and mark those that are reachable from other
595   // leaked chunks.
596   LOG_POINTERS("Scanning leaked chunks.\n");
597   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
598 }
599 
600 // ForEachChunk callback. Resets the tags to pre-leak-check state.
601 static void ResetTagsCb(uptr chunk, void *arg) {
602   (void)arg;
603   chunk = GetUserBegin(chunk);
604   LsanMetadata m(chunk);
605   if (m.allocated() && m.tag() != kIgnored)
606     m.set_tag(kDirectlyLeaked);
607 }
608 
609 // ForEachChunk callback. Aggregates information about unreachable chunks into
610 // a LeakReport.
611 static void CollectLeaksCb(uptr chunk, void *arg) {
612   CHECK(arg);
613   LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
614   chunk = GetUserBegin(chunk);
615   LsanMetadata m(chunk);
616   if (!m.allocated())
617     return;
618   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
619     leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
620 }
621 
622 void LeakSuppressionContext::PrintMatchedSuppressions() {
623   InternalMmapVector<Suppression *> matched;
624   context.GetMatched(&matched);
625   if (!matched.size())
626     return;
627   const char *line = "-----------------------------------------------------";
628   Printf("%s\n", line);
629   Printf("Suppressions used:\n");
630   Printf("  count      bytes template\n");
631   for (uptr i = 0; i < matched.size(); i++) {
632     Printf("%7zu %10zu %s\n",
633            static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
634            matched[i]->weight, matched[i]->templ);
635   }
636   Printf("%s\n\n", line);
637 }
638 
639 static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
640   const InternalMmapVector<tid_t> &suspended_threads =
641       *(const InternalMmapVector<tid_t> *)arg;
642   if (tctx->status == ThreadStatusRunning) {
643     uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
644     if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
645       Report(
646           "Running thread %llu was not suspended. False leaks are possible.\n",
647           tctx->os_id);
648   }
649 }
650 
651 #  if SANITIZER_FUCHSIA
652 
653 // Fuchsia provides a libc interface that guarantees all threads are
654 // covered, and SuspendedThreadList is never really used.
655 static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
656 
657 #  else  // !SANITIZER_FUCHSIA
658 
659 static void ReportUnsuspendedThreads(
660     const SuspendedThreadsList &suspended_threads) {
661   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
662   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
663     threads[i] = suspended_threads.GetThreadID(i);
664 
665   Sort(threads.data(), threads.size());
666 
667   GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
668       &ReportIfNotSuspended, &threads);
669 }
670 
671 #  endif  // !SANITIZER_FUCHSIA
672 
673 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
674                                   void *arg) {
675   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
676   CHECK(param);
677   CHECK(!param->success);
678   ReportUnsuspendedThreads(suspended_threads);
679   ClassifyAllChunks(suspended_threads, &param->frontier);
680   ForEachChunk(CollectLeaksCb, &param->leaks);
681   // Clean up for subsequent leak checks. This assumes we did not overwrite any
682   // kIgnored tags.
683   ForEachChunk(ResetTagsCb, nullptr);
684   param->success = true;
685 }
686 
687 static bool PrintResults(LeakReport &report) {
688   uptr unsuppressed_count = report.UnsuppressedLeakCount();
689   if (unsuppressed_count) {
690     Decorator d;
691     Printf(
692         "\n"
693         "================================================================="
694         "\n");
695     Printf("%s", d.Error());
696     Report("ERROR: LeakSanitizer: detected memory leaks\n");
697     Printf("%s", d.Default());
698     report.ReportTopLeaks(flags()->max_leaks);
699   }
700   if (common_flags()->print_suppressions)
701     GetSuppressionContext()->PrintMatchedSuppressions();
702   if (unsuppressed_count > 0) {
703     report.PrintSummary();
704     return true;
705   }
706   return false;
707 }
708 
709 static bool CheckForLeaks() {
710   if (&__lsan_is_turned_off && __lsan_is_turned_off())
711     return false;
712   // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
713   // suppressions. However if a stack id was previously suppressed, it should be
714   // suppressed in future checks as well.
715   for (int i = 0;; ++i) {
716     EnsureMainThreadIDIsCorrect();
717     CheckForLeaksParam param;
718     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
719     if (!param.success) {
720       Report("LeakSanitizer has encountered a fatal error.\n");
721       Report(
722           "HINT: For debugging, try setting environment variable "
723           "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
724       Report(
725           "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
726           "etc)\n");
727       Die();
728     }
729     LeakReport leak_report;
730     leak_report.AddLeakedChunks(param.leaks);
731 
732     // No new suppressions stacks, so rerun will not help and we can report.
733     if (!leak_report.ApplySuppressions())
734       return PrintResults(leak_report);
735 
736     // No indirect leaks to report, so we are done here.
737     if (!leak_report.IndirectUnsuppressedLeakCount())
738       return PrintResults(leak_report);
739 
740     if (i >= 8) {
741       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
742       return PrintResults(leak_report);
743     }
744 
745     // We found a new previously unseen suppressed call stack. Rerun to make
746     // sure it does not hold indirect leaks.
747     VReport(1, "Rerun with %zu suppressed stacks.",
748             GetSuppressionContext()->GetSortedSuppressedStacks().size());
749   }
750 }
751 
752 static bool has_reported_leaks = false;
753 bool HasReportedLeaks() { return has_reported_leaks; }
754 
755 void DoLeakCheck() {
756   Lock l(&global_mutex);
757   static bool already_done;
758   if (already_done)
759     return;
760   already_done = true;
761   has_reported_leaks = CheckForLeaks();
762   if (has_reported_leaks)
763     HandleLeaks();
764 }
765 
766 static int DoRecoverableLeakCheck() {
767   Lock l(&global_mutex);
768   bool have_leaks = CheckForLeaks();
769   return have_leaks ? 1 : 0;
770 }
771 
772 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
773 
774 ///// LeakReport implementation. /////
775 
776 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
777 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
778 // in real-world applications.
779 // FIXME: Get rid of this limit by moving logic into DedupLeaks.
780 const uptr kMaxLeaksConsidered = 5000;
781 
782 void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
783   for (const LeakedChunk &leak : chunks) {
784     uptr chunk = leak.chunk;
785     u32 stack_trace_id = leak.stack_trace_id;
786     uptr leaked_size = leak.leaked_size;
787     ChunkTag tag = leak.tag;
788     CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
789 
790     if (u32 resolution = flags()->resolution) {
791       StackTrace stack = StackDepotGet(stack_trace_id);
792       stack.size = Min(stack.size, resolution);
793       stack_trace_id = StackDepotPut(stack);
794     }
795 
796     bool is_directly_leaked = (tag == kDirectlyLeaked);
797     uptr i;
798     for (i = 0; i < leaks_.size(); i++) {
799       if (leaks_[i].stack_trace_id == stack_trace_id &&
800           leaks_[i].is_directly_leaked == is_directly_leaked) {
801         leaks_[i].hit_count++;
802         leaks_[i].total_size += leaked_size;
803         break;
804       }
805     }
806     if (i == leaks_.size()) {
807       if (leaks_.size() == kMaxLeaksConsidered)
808         return;
809       Leak leak = {next_id_++,         /* hit_count */ 1,
810                    leaked_size,        stack_trace_id,
811                    is_directly_leaked, /* is_suppressed */ false};
812       leaks_.push_back(leak);
813     }
814     if (flags()->report_objects) {
815       LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
816       leaked_objects_.push_back(obj);
817     }
818   }
819 }
820 
821 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
822   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
823     return leak1.total_size > leak2.total_size;
824   else
825     return leak1.is_directly_leaked;
826 }
827 
828 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
829   CHECK(leaks_.size() <= kMaxLeaksConsidered);
830   Printf("\n");
831   if (leaks_.size() == kMaxLeaksConsidered)
832     Printf(
833         "Too many leaks! Only the first %zu leaks encountered will be "
834         "reported.\n",
835         kMaxLeaksConsidered);
836 
837   uptr unsuppressed_count = UnsuppressedLeakCount();
838   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
839     Printf("The %zu top leak(s):\n", num_leaks_to_report);
840   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
841   uptr leaks_reported = 0;
842   for (uptr i = 0; i < leaks_.size(); i++) {
843     if (leaks_[i].is_suppressed)
844       continue;
845     PrintReportForLeak(i);
846     leaks_reported++;
847     if (leaks_reported == num_leaks_to_report)
848       break;
849   }
850   if (leaks_reported < unsuppressed_count) {
851     uptr remaining = unsuppressed_count - leaks_reported;
852     Printf("Omitting %zu more leak(s).\n", remaining);
853   }
854 }
855 
856 void LeakReport::PrintReportForLeak(uptr index) {
857   Decorator d;
858   Printf("%s", d.Leak());
859   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
860          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
861          leaks_[index].total_size, leaks_[index].hit_count);
862   Printf("%s", d.Default());
863 
864   CHECK(leaks_[index].stack_trace_id);
865   StackDepotGet(leaks_[index].stack_trace_id).Print();
866 
867   if (flags()->report_objects) {
868     Printf("Objects leaked above:\n");
869     PrintLeakedObjectsForLeak(index);
870     Printf("\n");
871   }
872 }
873 
874 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
875   u32 leak_id = leaks_[index].id;
876   for (uptr j = 0; j < leaked_objects_.size(); j++) {
877     if (leaked_objects_[j].leak_id == leak_id)
878       Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
879              leaked_objects_[j].size);
880   }
881 }
882 
883 void LeakReport::PrintSummary() {
884   CHECK(leaks_.size() <= kMaxLeaksConsidered);
885   uptr bytes = 0, allocations = 0;
886   for (uptr i = 0; i < leaks_.size(); i++) {
887     if (leaks_[i].is_suppressed)
888       continue;
889     bytes += leaks_[i].total_size;
890     allocations += leaks_[i].hit_count;
891   }
892   InternalScopedString summary;
893   summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
894                  allocations);
895   ReportErrorSummary(summary.data());
896 }
897 
898 uptr LeakReport::ApplySuppressions() {
899   LeakSuppressionContext *suppressions = GetSuppressionContext();
900   uptr new_suppressions = false;
901   for (uptr i = 0; i < leaks_.size(); i++) {
902     if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
903                                leaks_[i].total_size)) {
904       leaks_[i].is_suppressed = true;
905       ++new_suppressions;
906     }
907   }
908   return new_suppressions;
909 }
910 
911 uptr LeakReport::UnsuppressedLeakCount() {
912   uptr result = 0;
913   for (uptr i = 0; i < leaks_.size(); i++)
914     if (!leaks_[i].is_suppressed)
915       result++;
916   return result;
917 }
918 
919 uptr LeakReport::IndirectUnsuppressedLeakCount() {
920   uptr result = 0;
921   for (uptr i = 0; i < leaks_.size(); i++)
922     if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
923       result++;
924   return result;
925 }
926 
927 }  // namespace __lsan
928 #else   // CAN_SANITIZE_LEAKS
929 namespace __lsan {
930 void InitCommonLsan() {}
931 void DoLeakCheck() {}
932 void DoRecoverableLeakCheckVoid() {}
933 void DisableInThisThread() {}
934 void EnableInThisThread() {}
935 }  // namespace __lsan
936 #endif  // CAN_SANITIZE_LEAKS
937 
938 using namespace __lsan;
939 
940 extern "C" {
941 SANITIZER_INTERFACE_ATTRIBUTE
942 void __lsan_ignore_object(const void *p) {
943 #if CAN_SANITIZE_LEAKS
944   if (!common_flags()->detect_leaks)
945     return;
946   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
947   // locked.
948   Lock l(&global_mutex);
949   IgnoreObjectResult res = IgnoreObjectLocked(p);
950   if (res == kIgnoreObjectInvalid)
951     VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
952   if (res == kIgnoreObjectAlreadyIgnored)
953     VReport(1,
954             "__lsan_ignore_object(): "
955             "heap object at %p is already being ignored\n",
956             p);
957   if (res == kIgnoreObjectSuccess)
958     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
959 #endif  // CAN_SANITIZE_LEAKS
960 }
961 
962 SANITIZER_INTERFACE_ATTRIBUTE
963 void __lsan_register_root_region(const void *begin, uptr size) {
964 #if CAN_SANITIZE_LEAKS
965   Lock l(&global_mutex);
966   RootRegion region = {reinterpret_cast<uptr>(begin), size};
967   root_regions.push_back(region);
968   VReport(1, "Registered root region at %p of size %zu\n", begin, size);
969 #endif  // CAN_SANITIZE_LEAKS
970 }
971 
972 SANITIZER_INTERFACE_ATTRIBUTE
973 void __lsan_unregister_root_region(const void *begin, uptr size) {
974 #if CAN_SANITIZE_LEAKS
975   Lock l(&global_mutex);
976   bool removed = false;
977   for (uptr i = 0; i < root_regions.size(); i++) {
978     RootRegion region = root_regions[i];
979     if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
980       removed = true;
981       uptr last_index = root_regions.size() - 1;
982       root_regions[i] = root_regions[last_index];
983       root_regions.pop_back();
984       VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
985       break;
986     }
987   }
988   if (!removed) {
989     Report(
990         "__lsan_unregister_root_region(): region at %p of size %zu has not "
991         "been registered.\n",
992         begin, size);
993     Die();
994   }
995 #endif  // CAN_SANITIZE_LEAKS
996 }
997 
998 SANITIZER_INTERFACE_ATTRIBUTE
999 void __lsan_disable() {
1000 #if CAN_SANITIZE_LEAKS
1001   __lsan::DisableInThisThread();
1002 #endif
1003 }
1004 
1005 SANITIZER_INTERFACE_ATTRIBUTE
1006 void __lsan_enable() {
1007 #if CAN_SANITIZE_LEAKS
1008   __lsan::EnableInThisThread();
1009 #endif
1010 }
1011 
1012 SANITIZER_INTERFACE_ATTRIBUTE
1013 void __lsan_do_leak_check() {
1014 #if CAN_SANITIZE_LEAKS
1015   if (common_flags()->detect_leaks)
1016     __lsan::DoLeakCheck();
1017 #endif  // CAN_SANITIZE_LEAKS
1018 }
1019 
1020 SANITIZER_INTERFACE_ATTRIBUTE
1021 int __lsan_do_recoverable_leak_check() {
1022 #if CAN_SANITIZE_LEAKS
1023   if (common_flags()->detect_leaks)
1024     return __lsan::DoRecoverableLeakCheck();
1025 #endif  // CAN_SANITIZE_LEAKS
1026   return 0;
1027 }
1028 
1029 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
1030   return "";
1031 }
1032 
1033 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1034 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
1035 __lsan_is_turned_off() {
1036   return 0;
1037 }
1038 
1039 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
1040 __lsan_default_suppressions() {
1041   return "";
1042 }
1043 #endif
1044 }  // extern "C"
1045