1 //=-- lsan_common.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "lsan_common.h"
15 
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
27 
28 #if CAN_SANITIZE_LEAKS
29 
30 #  if SANITIZER_APPLE
31 // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
32 #    if SANITIZER_IOS && !SANITIZER_IOSSIM
33 #      define OBJC_DATA_MASK 0x0000007ffffffff8UL
34 #    else
35 #      define OBJC_DATA_MASK 0x00007ffffffffff8UL
36 #    endif
37 #  endif
38 
39 namespace __lsan {
40 
41 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
42 // also to protect the global list of root regions.
43 static Mutex global_mutex;
44 
45 Flags lsan_flags;
46 
47 void DisableCounterUnderflow() {
48   if (common_flags()->detect_leaks) {
49     Report("Unmatched call to __lsan_enable().\n");
50     Die();
51   }
52 }
53 
54 void Flags::SetDefaults() {
55 #  define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
56 #  include "lsan_flags.inc"
57 #  undef LSAN_FLAG
58 }
59 
60 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
61 #  define LSAN_FLAG(Type, Name, DefaultValue, Description) \
62     RegisterFlag(parser, #Name, Description, &f->Name);
63 #  include "lsan_flags.inc"
64 #  undef LSAN_FLAG
65 }
66 
67 #  define LOG_POINTERS(...)      \
68     do {                         \
69       if (flags()->log_pointers) \
70         Report(__VA_ARGS__);     \
71     } while (0)
72 
73 #  define LOG_THREADS(...)      \
74     do {                        \
75       if (flags()->log_threads) \
76         Report(__VA_ARGS__);    \
77     } while (0)
78 
79 class LeakSuppressionContext {
80   bool parsed = false;
81   SuppressionContext context;
82   bool suppressed_stacks_sorted = true;
83   InternalMmapVector<u32> suppressed_stacks;
84   const LoadedModule *suppress_module = nullptr;
85 
86   void LazyInit();
87   Suppression *GetSuppressionForAddr(uptr addr);
88   bool SuppressInvalid(const StackTrace &stack);
89   bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
90 
91  public:
92   LeakSuppressionContext(const char *supprression_types[],
93                          int suppression_types_num)
94       : context(supprression_types, suppression_types_num) {}
95 
96   bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
97 
98   const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
99     if (!suppressed_stacks_sorted) {
100       suppressed_stacks_sorted = true;
101       SortAndDedup(suppressed_stacks);
102     }
103     return suppressed_stacks;
104   }
105   void PrintMatchedSuppressions();
106 };
107 
108 ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
109 static LeakSuppressionContext *suppression_ctx = nullptr;
110 static const char kSuppressionLeak[] = "leak";
111 static const char *kSuppressionTypes[] = {kSuppressionLeak};
112 static const char kStdSuppressions[] =
113 #  if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
114     // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
115     // definition.
116     "leak:*pthread_exit*\n"
117 #  endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
118 #  if SANITIZER_APPLE
119     // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
120     "leak:*_os_trace*\n"
121 #  endif
122     // TLS leak in some glibc versions, described in
123     // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
124     "leak:*tls_get_addr*\n";
125 
126 void InitializeSuppressions() {
127   CHECK_EQ(nullptr, suppression_ctx);
128   suppression_ctx = new (suppression_placeholder)
129       LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
130 }
131 
132 void LeakSuppressionContext::LazyInit() {
133   if (!parsed) {
134     parsed = true;
135     context.ParseFromFile(flags()->suppressions);
136     if (&__lsan_default_suppressions)
137       context.Parse(__lsan_default_suppressions());
138     context.Parse(kStdSuppressions);
139     if (flags()->use_tls && flags()->use_ld_allocations)
140       suppress_module = GetLinker();
141   }
142 }
143 
144 Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
145   Suppression *s = nullptr;
146 
147   // Suppress by module name.
148   const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
149   if (!module_name)
150     module_name = "<unknown module>";
151   if (context.Match(module_name, kSuppressionLeak, &s))
152     return s;
153 
154   // Suppress by file or function name.
155   SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
156   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
157     if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
158         context.Match(cur->info.file, kSuppressionLeak, &s)) {
159       break;
160     }
161   }
162   frames->ClearAll();
163   return s;
164 }
165 
166 static uptr GetCallerPC(const StackTrace &stack) {
167   // The top frame is our malloc/calloc/etc. The next frame is the caller.
168   if (stack.size >= 2)
169     return stack.trace[1];
170   return 0;
171 }
172 
173 #  if SANITIZER_APPLE
174 // Several pointers in the Objective-C runtime (method cache and class_rw_t,
175 // for example) are tagged with additional bits we need to strip.
176 static inline void *TransformPointer(void *p) {
177   uptr ptr = reinterpret_cast<uptr>(p);
178   return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
179 }
180 #  endif
181 
182 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
183 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
184 // modules accounting etc.
185 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
186 // They are allocated with a __libc_memalign() call in allocate_and_init()
187 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
188 // blocks, but we can make sure they come from our own allocator by intercepting
189 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
190 // addresses are stored in a dynamically allocated array (the DTV) which is
191 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
192 // being reachable from the static TLS, and the dynamic TLS being reachable from
193 // the DTV. This is because the initial DTV is allocated before our interception
194 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
195 // can't special-case it either, since we don't know its size.
196 // Our solution is to include in the root set all allocations made from
197 // ld-linux.so (which is where allocate_and_init() is implemented). This is
198 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
199 // which we don't care about).
200 // On all other platforms, this simply checks to ensure that the caller pc is
201 // valid before reporting chunks as leaked.
202 bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
203   uptr caller_pc = GetCallerPC(stack);
204   // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
205   // it as reachable, as we can't properly report its allocation stack anyway.
206   return !caller_pc ||
207          (suppress_module && suppress_module->containsAddress(caller_pc));
208 }
209 
210 bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
211                                             uptr hit_count, uptr total_size) {
212   for (uptr i = 0; i < stack.size; i++) {
213     Suppression *s = GetSuppressionForAddr(
214         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
215     if (s) {
216       s->weight += total_size;
217       atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
218       return true;
219     }
220   }
221   return false;
222 }
223 
224 bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
225                                       uptr total_size) {
226   LazyInit();
227   StackTrace stack = StackDepotGet(stack_trace_id);
228   if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
229     return false;
230   suppressed_stacks_sorted = false;
231   suppressed_stacks.push_back(stack_trace_id);
232   return true;
233 }
234 
235 static LeakSuppressionContext *GetSuppressionContext() {
236   CHECK(suppression_ctx);
237   return suppression_ctx;
238 }
239 
240 void InitCommonLsan() {
241   if (common_flags()->detect_leaks) {
242     // Initialization which can fail or print warnings should only be done if
243     // LSan is actually enabled.
244     InitializeSuppressions();
245     InitializePlatformSpecificModules();
246   }
247 }
248 
249 class Decorator : public __sanitizer::SanitizerCommonDecorator {
250  public:
251   Decorator() : SanitizerCommonDecorator() {}
252   const char *Error() { return Red(); }
253   const char *Leak() { return Blue(); }
254 };
255 
256 static inline bool MaybeUserPointer(uptr p) {
257   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
258   // bound on heap addresses.
259   const uptr kMinAddress = 4 * 4096;
260   if (p < kMinAddress)
261     return false;
262 #  if defined(__x86_64__)
263   // TODO: support LAM48 and 5 level page tables.
264   // LAM_U57 mask format
265   //  * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
266   //  * top-1 byte: 0xff because it should be 0
267   //  * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
268   constexpr uptr kLAM_U57Mask = 0x81ff80;
269   constexpr uptr kPointerMask = kLAM_U57Mask << 40;
270   return ((p & kPointerMask) == 0);
271 #  elif defined(__mips64)
272   return ((p >> 40) == 0);
273 #  elif defined(__aarch64__)
274   // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
275   // address translation and can be used to store a tag.
276   constexpr uptr kPointerMask = 255ULL << 48;
277   // Accept up to 48 bit VMA.
278   return ((p & kPointerMask) == 0);
279 #  elif defined(__loongarch_lp64)
280   // Allow 47-bit user-space VMA at current.
281   return ((p >> 47) == 0);
282 #  else
283   return true;
284 #  endif
285 }
286 
287 // Scans the memory range, looking for byte patterns that point into allocator
288 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
289 // There are two usage modes for this function: finding reachable chunks
290 // (|tag| = kReachable) and finding indirectly leaked chunks
291 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
292 // so |frontier| = 0.
293 void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
294                           const char *region_type, ChunkTag tag) {
295   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
296   const uptr alignment = flags()->pointer_alignment();
297   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
298                (void *)end);
299   uptr pp = begin;
300   if (pp % alignment)
301     pp = pp + alignment - pp % alignment;
302   for (; pp + sizeof(void *) <= end; pp += alignment) {
303     void *p = *reinterpret_cast<void **>(pp);
304 #  if SANITIZER_APPLE
305     p = TransformPointer(p);
306 #  endif
307     if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
308       continue;
309     uptr chunk = PointsIntoChunk(p);
310     if (!chunk)
311       continue;
312     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
313     if (chunk == begin)
314       continue;
315     LsanMetadata m(chunk);
316     if (m.tag() == kReachable || m.tag() == kIgnored)
317       continue;
318 
319     // Do this check relatively late so we can log only the interesting cases.
320     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
321       LOG_POINTERS(
322           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
323           "%zu.\n",
324           (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
325           m.requested_size());
326       continue;
327     }
328 
329     m.set_tag(tag);
330     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
331                  (void *)pp, p, (void *)chunk,
332                  (void *)(chunk + m.requested_size()), m.requested_size());
333     if (frontier)
334       frontier->push_back(chunk);
335   }
336 }
337 
338 // Scans a global range for pointers
339 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
340   uptr allocator_begin = 0, allocator_end = 0;
341   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
342   if (begin <= allocator_begin && allocator_begin < end) {
343     CHECK_LE(allocator_begin, allocator_end);
344     CHECK_LE(allocator_end, end);
345     if (begin < allocator_begin)
346       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
347                            kReachable);
348     if (allocator_end < end)
349       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
350   } else {
351     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
352   }
353 }
354 
355 void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
356                           Frontier *frontier) {
357   for (uptr i = 0; i < ranges.size(); i++) {
358     ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
359                          kReachable);
360   }
361 }
362 
363 #  if SANITIZER_FUCHSIA
364 
365 // Fuchsia handles all threads together with its own callback.
366 static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
367                            uptr) {}
368 
369 #  else
370 
371 #    if SANITIZER_ANDROID
372 // FIXME: Move this out into *libcdep.cpp
373 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
374     pid_t, void (*cb)(void *, void *, uptr, void *), void *);
375 #    endif
376 
377 static void ProcessThreadRegistry(Frontier *frontier) {
378   InternalMmapVector<uptr> ptrs;
379   GetAdditionalThreadContextPtrsLocked(&ptrs);
380 
381   for (uptr i = 0; i < ptrs.size(); ++i) {
382     void *ptr = reinterpret_cast<void *>(ptrs[i]);
383     uptr chunk = PointsIntoChunk(ptr);
384     if (!chunk)
385       continue;
386     LsanMetadata m(chunk);
387     if (!m.allocated())
388       continue;
389 
390     // Mark as reachable and add to frontier.
391     LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
392     m.set_tag(kReachable);
393     frontier->push_back(chunk);
394   }
395 }
396 
397 // Scans thread data (stacks and TLS) for heap pointers.
398 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
399                            Frontier *frontier, tid_t caller_tid,
400                            uptr caller_sp) {
401   InternalMmapVector<uptr> registers;
402   InternalMmapVector<Range> extra_ranges;
403   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
404     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
405     LOG_THREADS("Processing thread %llu.\n", os_id);
406     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
407     DTLS *dtls;
408     bool thread_found =
409         GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
410                               &tls_end, &cache_begin, &cache_end, &dtls);
411     if (!thread_found) {
412       // If a thread can't be found in the thread registry, it's probably in the
413       // process of destruction. Log this event and move on.
414       LOG_THREADS("Thread %llu not found in registry.\n", os_id);
415       continue;
416     }
417     uptr sp;
418     PtraceRegistersStatus have_registers =
419         suspended_threads.GetRegistersAndSP(i, &registers, &sp);
420     if (have_registers != REGISTERS_AVAILABLE) {
421       Report("Unable to get registers from thread %llu.\n", os_id);
422       // If unable to get SP, consider the entire stack to be reachable unless
423       // GetRegistersAndSP failed with ESRCH.
424       if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
425         continue;
426       sp = stack_begin;
427     }
428     if (suspended_threads.GetThreadID(i) == caller_tid) {
429       sp = caller_sp;
430     }
431 
432     if (flags()->use_registers && have_registers) {
433       uptr registers_begin = reinterpret_cast<uptr>(registers.data());
434       uptr registers_end =
435           reinterpret_cast<uptr>(registers.data() + registers.size());
436       ScanRangeForPointers(registers_begin, registers_end, frontier,
437                            "REGISTERS", kReachable);
438     }
439 
440     if (flags()->use_stacks) {
441       LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
442                   (void *)stack_end, (void *)sp);
443       if (sp < stack_begin || sp >= stack_end) {
444         // SP is outside the recorded stack range (e.g. the thread is running a
445         // signal handler on alternate stack, or swapcontext was used).
446         // Again, consider the entire stack range to be reachable.
447         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
448         uptr page_size = GetPageSizeCached();
449         int skipped = 0;
450         while (stack_begin < stack_end &&
451                !IsAccessibleMemoryRange(stack_begin, 1)) {
452           skipped++;
453           stack_begin += page_size;
454         }
455         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
456                     skipped, (void *)stack_begin, (void *)stack_end);
457       } else {
458         // Shrink the stack range to ignore out-of-scope values.
459         stack_begin = sp;
460       }
461       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
462                            kReachable);
463       extra_ranges.clear();
464       GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
465       ScanExtraStackRanges(extra_ranges, frontier);
466     }
467 
468     if (flags()->use_tls) {
469       if (tls_begin) {
470         LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
471         // If the tls and cache ranges don't overlap, scan full tls range,
472         // otherwise, only scan the non-overlapping portions
473         if (cache_begin == cache_end || tls_end < cache_begin ||
474             tls_begin > cache_end) {
475           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
476         } else {
477           if (tls_begin < cache_begin)
478             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
479                                  kReachable);
480           if (tls_end > cache_end)
481             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
482                                  kReachable);
483         }
484       }
485 #    if SANITIZER_ANDROID
486       auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
487                      void *arg) -> void {
488         ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
489                              reinterpret_cast<uptr>(dtls_end),
490                              reinterpret_cast<Frontier *>(arg), "DTLS",
491                              kReachable);
492       };
493 
494       // FIXME: There might be a race-condition here (and in Bionic) if the
495       // thread is suspended in the middle of updating its DTLS. IOWs, we
496       // could scan already freed memory. (probably fine for now)
497       __libc_iterate_dynamic_tls(os_id, cb, frontier);
498 #    else
499       if (dtls && !DTLSInDestruction(dtls)) {
500         ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
501           uptr dtls_beg = dtv.beg;
502           uptr dtls_end = dtls_beg + dtv.size;
503           if (dtls_beg < dtls_end) {
504             LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
505                         (void *)dtls_end);
506             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
507                                  kReachable);
508           }
509         });
510       } else {
511         // We are handling a thread with DTLS under destruction. Log about
512         // this and continue.
513         LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
514       }
515 #    endif
516     }
517   }
518 
519   // Add pointers reachable from ThreadContexts
520   ProcessThreadRegistry(frontier);
521 }
522 
523 #  endif  // SANITIZER_FUCHSIA
524 
525 // A map that contains [region_begin, region_end) pairs.
526 using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
527 
528 static RootRegions &GetRootRegionsLocked() {
529   global_mutex.CheckLocked();
530   static RootRegions *regions = nullptr;
531   alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
532   if (!regions)
533     regions = new (placeholder) RootRegions();
534   return *regions;
535 }
536 
537 bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
538 
539 void ScanRootRegions(Frontier *frontier,
540                      const InternalMmapVectorNoCtor<Region> &mapped_regions) {
541   if (!flags()->use_root_regions)
542     return;
543 
544   InternalMmapVector<Region> regions;
545   GetRootRegionsLocked().forEach([&](const auto &kv) {
546     regions.push_back({kv.first.first, kv.first.second});
547     return true;
548   });
549 
550   InternalMmapVector<Region> intersection;
551   Intersect(mapped_regions, regions, intersection);
552 
553   for (const Region &r : intersection) {
554     LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
555                  (void *)r.begin, (void *)r.end);
556     ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
557   }
558 }
559 
560 // Scans root regions for heap pointers.
561 static void ProcessRootRegions(Frontier *frontier) {
562   if (!flags()->use_root_regions || !HasRootRegions())
563     return;
564   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
565   MemoryMappedSegment segment;
566   InternalMmapVector<Region> mapped_regions;
567   while (proc_maps.Next(&segment))
568     if (segment.IsReadable())
569       mapped_regions.push_back({segment.start, segment.end});
570   ScanRootRegions(frontier, mapped_regions);
571 }
572 
573 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
574   while (frontier->size()) {
575     uptr next_chunk = frontier->back();
576     frontier->pop_back();
577     LsanMetadata m(next_chunk);
578     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
579                          "HEAP", tag);
580   }
581 }
582 
583 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
584 // which are reachable from it as indirectly leaked.
585 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
586   chunk = GetUserBegin(chunk);
587   LsanMetadata m(chunk);
588   if (m.allocated() && m.tag() != kReachable) {
589     ScanRangeForPointers(chunk, chunk + m.requested_size(),
590                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
591   }
592 }
593 
594 static void IgnoredSuppressedCb(uptr chunk, void *arg) {
595   CHECK(arg);
596   chunk = GetUserBegin(chunk);
597   LsanMetadata m(chunk);
598   if (!m.allocated() || m.tag() == kIgnored)
599     return;
600 
601   const InternalMmapVector<u32> &suppressed =
602       *static_cast<const InternalMmapVector<u32> *>(arg);
603   uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
604   if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
605     return;
606 
607   LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
608                (void *)(chunk + m.requested_size()), m.requested_size());
609   m.set_tag(kIgnored);
610 }
611 
612 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
613 // frontier.
614 static void CollectIgnoredCb(uptr chunk, void *arg) {
615   CHECK(arg);
616   chunk = GetUserBegin(chunk);
617   LsanMetadata m(chunk);
618   if (m.allocated() && m.tag() == kIgnored) {
619     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
620                  (void *)(chunk + m.requested_size()), m.requested_size());
621     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
622   }
623 }
624 
625 // Sets the appropriate tag on each chunk.
626 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
627                               Frontier *frontier, tid_t caller_tid,
628                               uptr caller_sp) {
629   const InternalMmapVector<u32> &suppressed_stacks =
630       GetSuppressionContext()->GetSortedSuppressedStacks();
631   if (!suppressed_stacks.empty()) {
632     ForEachChunk(IgnoredSuppressedCb,
633                  const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
634   }
635   ForEachChunk(CollectIgnoredCb, frontier);
636   ProcessGlobalRegions(frontier);
637   ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
638   ProcessRootRegions(frontier);
639   FloodFillTag(frontier, kReachable);
640 
641   // The check here is relatively expensive, so we do this in a separate flood
642   // fill. That way we can skip the check for chunks that are reachable
643   // otherwise.
644   LOG_POINTERS("Processing platform-specific allocations.\n");
645   ProcessPlatformSpecificAllocations(frontier);
646   FloodFillTag(frontier, kReachable);
647 
648   // Iterate over leaked chunks and mark those that are reachable from other
649   // leaked chunks.
650   LOG_POINTERS("Scanning leaked chunks.\n");
651   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
652 }
653 
654 // ForEachChunk callback. Resets the tags to pre-leak-check state.
655 static void ResetTagsCb(uptr chunk, void *arg) {
656   (void)arg;
657   chunk = GetUserBegin(chunk);
658   LsanMetadata m(chunk);
659   if (m.allocated() && m.tag() != kIgnored)
660     m.set_tag(kDirectlyLeaked);
661 }
662 
663 // ForEachChunk callback. Aggregates information about unreachable chunks into
664 // a LeakReport.
665 static void CollectLeaksCb(uptr chunk, void *arg) {
666   CHECK(arg);
667   LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
668   chunk = GetUserBegin(chunk);
669   LsanMetadata m(chunk);
670   if (!m.allocated())
671     return;
672   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
673     leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
674 }
675 
676 void LeakSuppressionContext::PrintMatchedSuppressions() {
677   InternalMmapVector<Suppression *> matched;
678   context.GetMatched(&matched);
679   if (!matched.size())
680     return;
681   const char *line = "-----------------------------------------------------";
682   Printf("%s\n", line);
683   Printf("Suppressions used:\n");
684   Printf("  count      bytes template\n");
685   for (uptr i = 0; i < matched.size(); i++) {
686     Printf("%7zu %10zu %s\n",
687            static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
688            matched[i]->weight, matched[i]->templ);
689   }
690   Printf("%s\n\n", line);
691 }
692 
693 #  if SANITIZER_FUCHSIA
694 
695 // Fuchsia provides a libc interface that guarantees all threads are
696 // covered, and SuspendedThreadList is never really used.
697 static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
698 
699 #  else  // !SANITIZER_FUCHSIA
700 
701 static void ReportUnsuspendedThreads(
702     const SuspendedThreadsList &suspended_threads) {
703   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
704   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
705     threads[i] = suspended_threads.GetThreadID(i);
706 
707   Sort(threads.data(), threads.size());
708 
709   InternalMmapVector<tid_t> unsuspended;
710   GetRunningThreadsLocked(&unsuspended);
711 
712   for (auto os_id : unsuspended) {
713     uptr i = InternalLowerBound(threads, os_id);
714     if (i >= threads.size() || threads[i] != os_id)
715       Report(
716           "Running thread %zu was not suspended. False leaks are possible.\n",
717           os_id);
718   }
719 }
720 
721 #  endif  // !SANITIZER_FUCHSIA
722 
723 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
724                                   void *arg) {
725   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
726   CHECK(param);
727   CHECK(!param->success);
728   ReportUnsuspendedThreads(suspended_threads);
729   ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
730                     param->caller_sp);
731   ForEachChunk(CollectLeaksCb, &param->leaks);
732   // Clean up for subsequent leak checks. This assumes we did not overwrite any
733   // kIgnored tags.
734   ForEachChunk(ResetTagsCb, nullptr);
735   param->success = true;
736 }
737 
738 static bool PrintResults(LeakReport &report) {
739   uptr unsuppressed_count = report.UnsuppressedLeakCount();
740   if (unsuppressed_count) {
741     Decorator d;
742     Printf(
743         "\n"
744         "================================================================="
745         "\n");
746     Printf("%s", d.Error());
747     Report("ERROR: LeakSanitizer: detected memory leaks\n");
748     Printf("%s", d.Default());
749     report.ReportTopLeaks(flags()->max_leaks);
750   }
751   if (common_flags()->print_suppressions)
752     GetSuppressionContext()->PrintMatchedSuppressions();
753   if (unsuppressed_count > 0) {
754     report.PrintSummary();
755     return true;
756   }
757   return false;
758 }
759 
760 static bool CheckForLeaks() {
761   if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
762     VReport(1, "LeakSanitizer is disabled");
763     return false;
764   }
765   VReport(1, "LeakSanitizer: checking for leaks");
766   // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
767   // suppressions. However if a stack id was previously suppressed, it should be
768   // suppressed in future checks as well.
769   for (int i = 0;; ++i) {
770     EnsureMainThreadIDIsCorrect();
771     CheckForLeaksParam param;
772     // Capture calling thread's stack pointer early, to avoid false negatives.
773     // Old frame with dead pointers might be overlapped by new frame inside
774     // CheckForLeaks which does not use bytes with pointers before the
775     // threads are suspended and stack pointers captured.
776     param.caller_tid = GetTid();
777     param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
778     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
779     if (!param.success) {
780       Report("LeakSanitizer has encountered a fatal error.\n");
781       Report(
782           "HINT: For debugging, try setting environment variable "
783           "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
784       Report(
785           "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
786           "etc)\n");
787       Die();
788     }
789     LeakReport leak_report;
790     leak_report.AddLeakedChunks(param.leaks);
791 
792     // No new suppressions stacks, so rerun will not help and we can report.
793     if (!leak_report.ApplySuppressions())
794       return PrintResults(leak_report);
795 
796     // No indirect leaks to report, so we are done here.
797     if (!leak_report.IndirectUnsuppressedLeakCount())
798       return PrintResults(leak_report);
799 
800     if (i >= 8) {
801       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
802       return PrintResults(leak_report);
803     }
804 
805     // We found a new previously unseen suppressed call stack. Rerun to make
806     // sure it does not hold indirect leaks.
807     VReport(1, "Rerun with %zu suppressed stacks.",
808             GetSuppressionContext()->GetSortedSuppressedStacks().size());
809   }
810 }
811 
812 static bool has_reported_leaks = false;
813 bool HasReportedLeaks() { return has_reported_leaks; }
814 
815 void DoLeakCheck() {
816   Lock l(&global_mutex);
817   static bool already_done;
818   if (already_done)
819     return;
820   already_done = true;
821   has_reported_leaks = CheckForLeaks();
822   if (has_reported_leaks)
823     HandleLeaks();
824 }
825 
826 static int DoRecoverableLeakCheck() {
827   Lock l(&global_mutex);
828   bool have_leaks = CheckForLeaks();
829   return have_leaks ? 1 : 0;
830 }
831 
832 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
833 
834 ///// LeakReport implementation. /////
835 
836 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
837 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
838 // in real-world applications.
839 // FIXME: Get rid of this limit by moving logic into DedupLeaks.
840 const uptr kMaxLeaksConsidered = 5000;
841 
842 void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
843   for (const LeakedChunk &leak : chunks) {
844     uptr chunk = leak.chunk;
845     u32 stack_trace_id = leak.stack_trace_id;
846     uptr leaked_size = leak.leaked_size;
847     ChunkTag tag = leak.tag;
848     CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
849 
850     if (u32 resolution = flags()->resolution) {
851       StackTrace stack = StackDepotGet(stack_trace_id);
852       stack.size = Min(stack.size, resolution);
853       stack_trace_id = StackDepotPut(stack);
854     }
855 
856     bool is_directly_leaked = (tag == kDirectlyLeaked);
857     uptr i;
858     for (i = 0; i < leaks_.size(); i++) {
859       if (leaks_[i].stack_trace_id == stack_trace_id &&
860           leaks_[i].is_directly_leaked == is_directly_leaked) {
861         leaks_[i].hit_count++;
862         leaks_[i].total_size += leaked_size;
863         break;
864       }
865     }
866     if (i == leaks_.size()) {
867       if (leaks_.size() == kMaxLeaksConsidered)
868         return;
869       Leak leak = {next_id_++,         /* hit_count */ 1,
870                    leaked_size,        stack_trace_id,
871                    is_directly_leaked, /* is_suppressed */ false};
872       leaks_.push_back(leak);
873     }
874     if (flags()->report_objects) {
875       LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
876       leaked_objects_.push_back(obj);
877     }
878   }
879 }
880 
881 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
882   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
883     return leak1.total_size > leak2.total_size;
884   else
885     return leak1.is_directly_leaked;
886 }
887 
888 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
889   CHECK(leaks_.size() <= kMaxLeaksConsidered);
890   Printf("\n");
891   if (leaks_.size() == kMaxLeaksConsidered)
892     Printf(
893         "Too many leaks! Only the first %zu leaks encountered will be "
894         "reported.\n",
895         kMaxLeaksConsidered);
896 
897   uptr unsuppressed_count = UnsuppressedLeakCount();
898   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
899     Printf("The %zu top leak(s):\n", num_leaks_to_report);
900   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
901   uptr leaks_reported = 0;
902   for (uptr i = 0; i < leaks_.size(); i++) {
903     if (leaks_[i].is_suppressed)
904       continue;
905     PrintReportForLeak(i);
906     leaks_reported++;
907     if (leaks_reported == num_leaks_to_report)
908       break;
909   }
910   if (leaks_reported < unsuppressed_count) {
911     uptr remaining = unsuppressed_count - leaks_reported;
912     Printf("Omitting %zu more leak(s).\n", remaining);
913   }
914 }
915 
916 void LeakReport::PrintReportForLeak(uptr index) {
917   Decorator d;
918   Printf("%s", d.Leak());
919   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
920          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
921          leaks_[index].total_size, leaks_[index].hit_count);
922   Printf("%s", d.Default());
923 
924   CHECK(leaks_[index].stack_trace_id);
925   StackDepotGet(leaks_[index].stack_trace_id).Print();
926 
927   if (flags()->report_objects) {
928     Printf("Objects leaked above:\n");
929     PrintLeakedObjectsForLeak(index);
930     Printf("\n");
931   }
932 }
933 
934 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
935   u32 leak_id = leaks_[index].id;
936   for (uptr j = 0; j < leaked_objects_.size(); j++) {
937     if (leaked_objects_[j].leak_id == leak_id)
938       Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
939              leaked_objects_[j].size);
940   }
941 }
942 
943 void LeakReport::PrintSummary() {
944   CHECK(leaks_.size() <= kMaxLeaksConsidered);
945   uptr bytes = 0, allocations = 0;
946   for (uptr i = 0; i < leaks_.size(); i++) {
947     if (leaks_[i].is_suppressed)
948       continue;
949     bytes += leaks_[i].total_size;
950     allocations += leaks_[i].hit_count;
951   }
952   InternalScopedString summary;
953   summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
954                  allocations);
955   ReportErrorSummary(summary.data());
956 }
957 
958 uptr LeakReport::ApplySuppressions() {
959   LeakSuppressionContext *suppressions = GetSuppressionContext();
960   uptr new_suppressions = 0;
961   for (uptr i = 0; i < leaks_.size(); i++) {
962     if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
963                                leaks_[i].total_size)) {
964       leaks_[i].is_suppressed = true;
965       ++new_suppressions;
966     }
967   }
968   return new_suppressions;
969 }
970 
971 uptr LeakReport::UnsuppressedLeakCount() {
972   uptr result = 0;
973   for (uptr i = 0; i < leaks_.size(); i++)
974     if (!leaks_[i].is_suppressed)
975       result++;
976   return result;
977 }
978 
979 uptr LeakReport::IndirectUnsuppressedLeakCount() {
980   uptr result = 0;
981   for (uptr i = 0; i < leaks_.size(); i++)
982     if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
983       result++;
984   return result;
985 }
986 
987 }  // namespace __lsan
988 #else   // CAN_SANITIZE_LEAKS
989 namespace __lsan {
990 void InitCommonLsan() {}
991 void DoLeakCheck() {}
992 void DoRecoverableLeakCheckVoid() {}
993 void DisableInThisThread() {}
994 void EnableInThisThread() {}
995 }  // namespace __lsan
996 #endif  // CAN_SANITIZE_LEAKS
997 
998 using namespace __lsan;
999 
1000 extern "C" {
1001 SANITIZER_INTERFACE_ATTRIBUTE
1002 void __lsan_ignore_object(const void *p) {
1003 #if CAN_SANITIZE_LEAKS
1004   if (!common_flags()->detect_leaks)
1005     return;
1006   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
1007   // locked.
1008   Lock l(&global_mutex);
1009   IgnoreObjectResult res = IgnoreObject(p);
1010   if (res == kIgnoreObjectInvalid)
1011     VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
1012   if (res == kIgnoreObjectAlreadyIgnored)
1013     VReport(1,
1014             "__lsan_ignore_object(): "
1015             "heap object at %p is already being ignored\n",
1016             p);
1017   if (res == kIgnoreObjectSuccess)
1018     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
1019 #endif  // CAN_SANITIZE_LEAKS
1020 }
1021 
1022 SANITIZER_INTERFACE_ATTRIBUTE
1023 void __lsan_register_root_region(const void *begin, uptr size) {
1024 #if CAN_SANITIZE_LEAKS
1025   VReport(1, "Registered root region at %p of size %zu\n", begin, size);
1026   uptr b = reinterpret_cast<uptr>(begin);
1027   uptr e = b + size;
1028   CHECK_LT(b, e);
1029 
1030   Lock l(&global_mutex);
1031   ++GetRootRegionsLocked()[{b, e}];
1032 #endif  // CAN_SANITIZE_LEAKS
1033 }
1034 
1035 SANITIZER_INTERFACE_ATTRIBUTE
1036 void __lsan_unregister_root_region(const void *begin, uptr size) {
1037 #if CAN_SANITIZE_LEAKS
1038   uptr b = reinterpret_cast<uptr>(begin);
1039   uptr e = b + size;
1040   CHECK_LT(b, e);
1041   VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
1042 
1043   {
1044     Lock l(&global_mutex);
1045     if (auto *f = GetRootRegionsLocked().find({b, e})) {
1046       if (--(f->second) == 0)
1047         GetRootRegionsLocked().erase(f);
1048       return;
1049     }
1050   }
1051   Report(
1052       "__lsan_unregister_root_region(): region at %p of size %zu has not "
1053       "been registered.\n",
1054       begin, size);
1055   Die();
1056 #endif  // CAN_SANITIZE_LEAKS
1057 }
1058 
1059 SANITIZER_INTERFACE_ATTRIBUTE
1060 void __lsan_disable() {
1061 #if CAN_SANITIZE_LEAKS
1062   __lsan::DisableInThisThread();
1063 #endif
1064 }
1065 
1066 SANITIZER_INTERFACE_ATTRIBUTE
1067 void __lsan_enable() {
1068 #if CAN_SANITIZE_LEAKS
1069   __lsan::EnableInThisThread();
1070 #endif
1071 }
1072 
1073 SANITIZER_INTERFACE_ATTRIBUTE
1074 void __lsan_do_leak_check() {
1075 #if CAN_SANITIZE_LEAKS
1076   if (common_flags()->detect_leaks)
1077     __lsan::DoLeakCheck();
1078 #endif  // CAN_SANITIZE_LEAKS
1079 }
1080 
1081 SANITIZER_INTERFACE_ATTRIBUTE
1082 int __lsan_do_recoverable_leak_check() {
1083 #if CAN_SANITIZE_LEAKS
1084   if (common_flags()->detect_leaks)
1085     return __lsan::DoRecoverableLeakCheck();
1086 #endif  // CAN_SANITIZE_LEAKS
1087   return 0;
1088 }
1089 
1090 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
1091   return "";
1092 }
1093 
1094 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1095 SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
1096   return 0;
1097 }
1098 
1099 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
1100   return "";
1101 }
1102 #endif
1103 }  // extern "C"
1104