1 //=-- lsan_common.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "lsan_common.h"
15 
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
27 
28 #if CAN_SANITIZE_LEAKS
29 
30 #  if SANITIZER_APPLE
31 // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
32 #    if SANITIZER_IOS && !SANITIZER_IOSSIM
33 #      define OBJC_DATA_MASK 0x0000007ffffffff8UL
34 #    else
35 #      define OBJC_DATA_MASK 0x00007ffffffffff8UL
36 #    endif
37 // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
38 #    define OBJC_FAST_IS_RW 0x8000000000000000UL
39 #  endif
40 
41 namespace __lsan {
42 
43 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
44 // also to protect the global list of root regions.
45 Mutex global_mutex;
46 
47 Flags lsan_flags;
48 
49 void DisableCounterUnderflow() {
50   if (common_flags()->detect_leaks) {
51     Report("Unmatched call to __lsan_enable().\n");
52     Die();
53   }
54 }
55 
56 void Flags::SetDefaults() {
57 #  define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
58 #  include "lsan_flags.inc"
59 #  undef LSAN_FLAG
60 }
61 
62 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
63 #  define LSAN_FLAG(Type, Name, DefaultValue, Description) \
64     RegisterFlag(parser, #Name, Description, &f->Name);
65 #  include "lsan_flags.inc"
66 #  undef LSAN_FLAG
67 }
68 
69 #  define LOG_POINTERS(...)      \
70     do {                         \
71       if (flags()->log_pointers) \
72         Report(__VA_ARGS__);     \
73     } while (0)
74 
75 #  define LOG_THREADS(...)      \
76     do {                        \
77       if (flags()->log_threads) \
78         Report(__VA_ARGS__);    \
79     } while (0)
80 
81 class LeakSuppressionContext {
82   bool parsed = false;
83   SuppressionContext context;
84   bool suppressed_stacks_sorted = true;
85   InternalMmapVector<u32> suppressed_stacks;
86   const LoadedModule *suppress_module = nullptr;
87 
88   void LazyInit();
89   Suppression *GetSuppressionForAddr(uptr addr);
90   bool SuppressInvalid(const StackTrace &stack);
91   bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
92 
93  public:
94   LeakSuppressionContext(const char *supprression_types[],
95                          int suppression_types_num)
96       : context(supprression_types, suppression_types_num) {}
97 
98   bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
99 
100   const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
101     if (!suppressed_stacks_sorted) {
102       suppressed_stacks_sorted = true;
103       SortAndDedup(suppressed_stacks);
104     }
105     return suppressed_stacks;
106   }
107   void PrintMatchedSuppressions();
108 };
109 
110 ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
111 static LeakSuppressionContext *suppression_ctx = nullptr;
112 static const char kSuppressionLeak[] = "leak";
113 static const char *kSuppressionTypes[] = {kSuppressionLeak};
114 static const char kStdSuppressions[] =
115 #  if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
116     // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
117     // definition.
118     "leak:*pthread_exit*\n"
119 #  endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
120 #  if SANITIZER_APPLE
121     // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
122     "leak:*_os_trace*\n"
123 #  endif
124     // TLS leak in some glibc versions, described in
125     // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
126     "leak:*tls_get_addr*\n";
127 
128 void InitializeSuppressions() {
129   CHECK_EQ(nullptr, suppression_ctx);
130   suppression_ctx = new (suppression_placeholder)
131       LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
132 }
133 
134 void LeakSuppressionContext::LazyInit() {
135   if (!parsed) {
136     parsed = true;
137     context.ParseFromFile(flags()->suppressions);
138     if (&__lsan_default_suppressions)
139       context.Parse(__lsan_default_suppressions());
140     context.Parse(kStdSuppressions);
141     if (flags()->use_tls && flags()->use_ld_allocations)
142       suppress_module = GetLinker();
143   }
144 }
145 
146 Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
147   Suppression *s = nullptr;
148 
149   // Suppress by module name.
150   const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
151   if (!module_name)
152     module_name = "<unknown module>";
153   if (context.Match(module_name, kSuppressionLeak, &s))
154     return s;
155 
156   // Suppress by file or function name.
157   SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
158   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
159     if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
160         context.Match(cur->info.file, kSuppressionLeak, &s)) {
161       break;
162     }
163   }
164   frames->ClearAll();
165   return s;
166 }
167 
168 static uptr GetCallerPC(const StackTrace &stack) {
169   // The top frame is our malloc/calloc/etc. The next frame is the caller.
170   if (stack.size >= 2)
171     return stack.trace[1];
172   return 0;
173 }
174 
175 #  if SANITIZER_APPLE
176 // Objective-C class data pointers are stored with flags in the low bits, so
177 // they need to be transformed back into something that looks like a pointer.
178 static inline void *MaybeTransformPointer(void *p) {
179   uptr ptr = reinterpret_cast<uptr>(p);
180   if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW)
181     ptr &= OBJC_DATA_MASK;
182   return reinterpret_cast<void *>(ptr);
183 }
184 #  endif
185 
186 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
187 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
188 // modules accounting etc.
189 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
190 // They are allocated with a __libc_memalign() call in allocate_and_init()
191 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
192 // blocks, but we can make sure they come from our own allocator by intercepting
193 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
194 // addresses are stored in a dynamically allocated array (the DTV) which is
195 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
196 // being reachable from the static TLS, and the dynamic TLS being reachable from
197 // the DTV. This is because the initial DTV is allocated before our interception
198 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
199 // can't special-case it either, since we don't know its size.
200 // Our solution is to include in the root set all allocations made from
201 // ld-linux.so (which is where allocate_and_init() is implemented). This is
202 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
203 // which we don't care about).
204 // On all other platforms, this simply checks to ensure that the caller pc is
205 // valid before reporting chunks as leaked.
206 bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
207   uptr caller_pc = GetCallerPC(stack);
208   // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
209   // it as reachable, as we can't properly report its allocation stack anyway.
210   return !caller_pc ||
211          (suppress_module && suppress_module->containsAddress(caller_pc));
212 }
213 
214 bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
215                                             uptr hit_count, uptr total_size) {
216   for (uptr i = 0; i < stack.size; i++) {
217     Suppression *s = GetSuppressionForAddr(
218         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
219     if (s) {
220       s->weight += total_size;
221       atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
222       return true;
223     }
224   }
225   return false;
226 }
227 
228 bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
229                                       uptr total_size) {
230   LazyInit();
231   StackTrace stack = StackDepotGet(stack_trace_id);
232   if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
233     return false;
234   suppressed_stacks_sorted = false;
235   suppressed_stacks.push_back(stack_trace_id);
236   return true;
237 }
238 
239 static LeakSuppressionContext *GetSuppressionContext() {
240   CHECK(suppression_ctx);
241   return suppression_ctx;
242 }
243 
244 static InternalMmapVectorNoCtor<RootRegion> root_regions;
245 
246 InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
247   return &root_regions;
248 }
249 
250 void InitCommonLsan() {
251   if (common_flags()->detect_leaks) {
252     // Initialization which can fail or print warnings should only be done if
253     // LSan is actually enabled.
254     InitializeSuppressions();
255     InitializePlatformSpecificModules();
256   }
257 }
258 
259 class Decorator : public __sanitizer::SanitizerCommonDecorator {
260  public:
261   Decorator() : SanitizerCommonDecorator() {}
262   const char *Error() { return Red(); }
263   const char *Leak() { return Blue(); }
264 };
265 
266 static inline bool MaybeUserPointer(uptr p) {
267   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
268   // bound on heap addresses.
269   const uptr kMinAddress = 4 * 4096;
270   if (p < kMinAddress)
271     return false;
272 #  if defined(__x86_64__)
273   // Accept only canonical form user-space addresses.
274   return ((p >> 47) == 0);
275 #  elif defined(__mips64)
276   return ((p >> 40) == 0);
277 #  elif defined(__aarch64__)
278   // Accept up to 48 bit VMA.
279   return ((p >> 48) == 0);
280 #  elif defined(__loongarch_lp64)
281   // Allow 47-bit user-space VMA at current.
282   return ((p >> 47) == 0);
283 #  else
284   return true;
285 #  endif
286 }
287 
288 // Scans the memory range, looking for byte patterns that point into allocator
289 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
290 // There are two usage modes for this function: finding reachable chunks
291 // (|tag| = kReachable) and finding indirectly leaked chunks
292 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
293 // so |frontier| = 0.
294 void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
295                           const char *region_type, ChunkTag tag) {
296   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
297   const uptr alignment = flags()->pointer_alignment();
298   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
299                (void *)end);
300   uptr pp = begin;
301   if (pp % alignment)
302     pp = pp + alignment - pp % alignment;
303   for (; pp + sizeof(void *) <= end; pp += alignment) {
304     void *p = *reinterpret_cast<void **>(pp);
305 #  if SANITIZER_APPLE
306     p = MaybeTransformPointer(p);
307 #  endif
308     if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
309       continue;
310     uptr chunk = PointsIntoChunk(p);
311     if (!chunk)
312       continue;
313     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
314     if (chunk == begin)
315       continue;
316     LsanMetadata m(chunk);
317     if (m.tag() == kReachable || m.tag() == kIgnored)
318       continue;
319 
320     // Do this check relatively late so we can log only the interesting cases.
321     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
322       LOG_POINTERS(
323           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
324           "%zu.\n",
325           (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
326           m.requested_size());
327       continue;
328     }
329 
330     m.set_tag(tag);
331     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
332                  (void *)pp, p, (void *)chunk,
333                  (void *)(chunk + m.requested_size()), m.requested_size());
334     if (frontier)
335       frontier->push_back(chunk);
336   }
337 }
338 
339 // Scans a global range for pointers
340 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
341   uptr allocator_begin = 0, allocator_end = 0;
342   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
343   if (begin <= allocator_begin && allocator_begin < end) {
344     CHECK_LE(allocator_begin, allocator_end);
345     CHECK_LE(allocator_end, end);
346     if (begin < allocator_begin)
347       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
348                            kReachable);
349     if (allocator_end < end)
350       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
351   } else {
352     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
353   }
354 }
355 
356 void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
357                           Frontier *frontier) {
358   for (uptr i = 0; i < ranges.size(); i++) {
359     ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
360                          kReachable);
361   }
362 }
363 
364 #  if SANITIZER_FUCHSIA
365 
366 // Fuchsia handles all threads together with its own callback.
367 static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
368                            uptr) {}
369 
370 #  else
371 
372 #    if SANITIZER_ANDROID
373 // FIXME: Move this out into *libcdep.cpp
374 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
375     pid_t, void (*cb)(void *, void *, uptr, void *), void *);
376 #    endif
377 
378 static void ProcessThreadRegistry(Frontier *frontier) {
379   InternalMmapVector<uptr> ptrs;
380   GetAdditionalThreadContextPtrsLocked(&ptrs);
381 
382   for (uptr i = 0; i < ptrs.size(); ++i) {
383     void *ptr = reinterpret_cast<void *>(ptrs[i]);
384     uptr chunk = PointsIntoChunk(ptr);
385     if (!chunk)
386       continue;
387     LsanMetadata m(chunk);
388     if (!m.allocated())
389       continue;
390 
391     // Mark as reachable and add to frontier.
392     LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
393     m.set_tag(kReachable);
394     frontier->push_back(chunk);
395   }
396 }
397 
398 // Scans thread data (stacks and TLS) for heap pointers.
399 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
400                            Frontier *frontier, tid_t caller_tid,
401                            uptr caller_sp) {
402   InternalMmapVector<uptr> registers;
403   InternalMmapVector<Range> extra_ranges;
404   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
405     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
406     LOG_THREADS("Processing thread %llu.\n", os_id);
407     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
408     DTLS *dtls;
409     bool thread_found =
410         GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
411                               &tls_end, &cache_begin, &cache_end, &dtls);
412     if (!thread_found) {
413       // If a thread can't be found in the thread registry, it's probably in the
414       // process of destruction. Log this event and move on.
415       LOG_THREADS("Thread %llu not found in registry.\n", os_id);
416       continue;
417     }
418     uptr sp;
419     PtraceRegistersStatus have_registers =
420         suspended_threads.GetRegistersAndSP(i, &registers, &sp);
421     if (have_registers != REGISTERS_AVAILABLE) {
422       Report("Unable to get registers from thread %llu.\n", os_id);
423       // If unable to get SP, consider the entire stack to be reachable unless
424       // GetRegistersAndSP failed with ESRCH.
425       if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
426         continue;
427       sp = stack_begin;
428     }
429     if (suspended_threads.GetThreadID(i) == caller_tid) {
430       sp = caller_sp;
431     }
432 
433     if (flags()->use_registers && have_registers) {
434       uptr registers_begin = reinterpret_cast<uptr>(registers.data());
435       uptr registers_end =
436           reinterpret_cast<uptr>(registers.data() + registers.size());
437       ScanRangeForPointers(registers_begin, registers_end, frontier,
438                            "REGISTERS", kReachable);
439     }
440 
441     if (flags()->use_stacks) {
442       LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
443                   (void *)stack_end, (void *)sp);
444       if (sp < stack_begin || sp >= stack_end) {
445         // SP is outside the recorded stack range (e.g. the thread is running a
446         // signal handler on alternate stack, or swapcontext was used).
447         // Again, consider the entire stack range to be reachable.
448         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
449         uptr page_size = GetPageSizeCached();
450         int skipped = 0;
451         while (stack_begin < stack_end &&
452                !IsAccessibleMemoryRange(stack_begin, 1)) {
453           skipped++;
454           stack_begin += page_size;
455         }
456         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
457                     skipped, (void *)stack_begin, (void *)stack_end);
458       } else {
459         // Shrink the stack range to ignore out-of-scope values.
460         stack_begin = sp;
461       }
462       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
463                            kReachable);
464       extra_ranges.clear();
465       GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
466       ScanExtraStackRanges(extra_ranges, frontier);
467     }
468 
469     if (flags()->use_tls) {
470       if (tls_begin) {
471         LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
472         // If the tls and cache ranges don't overlap, scan full tls range,
473         // otherwise, only scan the non-overlapping portions
474         if (cache_begin == cache_end || tls_end < cache_begin ||
475             tls_begin > cache_end) {
476           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
477         } else {
478           if (tls_begin < cache_begin)
479             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
480                                  kReachable);
481           if (tls_end > cache_end)
482             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
483                                  kReachable);
484         }
485       }
486 #    if SANITIZER_ANDROID
487       auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
488                      void *arg) -> void {
489         ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
490                              reinterpret_cast<uptr>(dtls_end),
491                              reinterpret_cast<Frontier *>(arg), "DTLS",
492                              kReachable);
493       };
494 
495       // FIXME: There might be a race-condition here (and in Bionic) if the
496       // thread is suspended in the middle of updating its DTLS. IOWs, we
497       // could scan already freed memory. (probably fine for now)
498       __libc_iterate_dynamic_tls(os_id, cb, frontier);
499 #    else
500       if (dtls && !DTLSInDestruction(dtls)) {
501         ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
502           uptr dtls_beg = dtv.beg;
503           uptr dtls_end = dtls_beg + dtv.size;
504           if (dtls_beg < dtls_end) {
505             LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
506                         (void *)dtls_end);
507             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
508                                  kReachable);
509           }
510         });
511       } else {
512         // We are handling a thread with DTLS under destruction. Log about
513         // this and continue.
514         LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
515       }
516 #    endif
517     }
518   }
519 
520   // Add pointers reachable from ThreadContexts
521   ProcessThreadRegistry(frontier);
522 }
523 
524 #  endif  // SANITIZER_FUCHSIA
525 
526 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
527                     uptr region_begin, uptr region_end, bool is_readable) {
528   uptr intersection_begin = Max(root_region.begin, region_begin);
529   uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
530   if (intersection_begin >= intersection_end)
531     return;
532   LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
533                (void *)root_region.begin,
534                (void *)(root_region.begin + root_region.size),
535                (void *)region_begin, (void *)region_end,
536                is_readable ? "readable" : "unreadable");
537   if (is_readable)
538     ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
539                          kReachable);
540 }
541 
542 static void ProcessRootRegion(Frontier *frontier,
543                               const RootRegion &root_region) {
544   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
545   MemoryMappedSegment segment;
546   while (proc_maps.Next(&segment)) {
547     ScanRootRegion(frontier, root_region, segment.start, segment.end,
548                    segment.IsReadable());
549   }
550 }
551 
552 // Scans root regions for heap pointers.
553 static void ProcessRootRegions(Frontier *frontier) {
554   if (!flags()->use_root_regions)
555     return;
556   for (uptr i = 0; i < root_regions.size(); i++)
557     ProcessRootRegion(frontier, root_regions[i]);
558 }
559 
560 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
561   while (frontier->size()) {
562     uptr next_chunk = frontier->back();
563     frontier->pop_back();
564     LsanMetadata m(next_chunk);
565     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
566                          "HEAP", tag);
567   }
568 }
569 
570 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
571 // which are reachable from it as indirectly leaked.
572 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
573   chunk = GetUserBegin(chunk);
574   LsanMetadata m(chunk);
575   if (m.allocated() && m.tag() != kReachable) {
576     ScanRangeForPointers(chunk, chunk + m.requested_size(),
577                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
578   }
579 }
580 
581 static void IgnoredSuppressedCb(uptr chunk, void *arg) {
582   CHECK(arg);
583   chunk = GetUserBegin(chunk);
584   LsanMetadata m(chunk);
585   if (!m.allocated() || m.tag() == kIgnored)
586     return;
587 
588   const InternalMmapVector<u32> &suppressed =
589       *static_cast<const InternalMmapVector<u32> *>(arg);
590   uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
591   if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
592     return;
593 
594   LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
595                (void *)(chunk + m.requested_size()), m.requested_size());
596   m.set_tag(kIgnored);
597 }
598 
599 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
600 // frontier.
601 static void CollectIgnoredCb(uptr chunk, void *arg) {
602   CHECK(arg);
603   chunk = GetUserBegin(chunk);
604   LsanMetadata m(chunk);
605   if (m.allocated() && m.tag() == kIgnored) {
606     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
607                  (void *)(chunk + m.requested_size()), m.requested_size());
608     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
609   }
610 }
611 
612 // Sets the appropriate tag on each chunk.
613 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
614                               Frontier *frontier, tid_t caller_tid,
615                               uptr caller_sp) {
616   const InternalMmapVector<u32> &suppressed_stacks =
617       GetSuppressionContext()->GetSortedSuppressedStacks();
618   if (!suppressed_stacks.empty()) {
619     ForEachChunk(IgnoredSuppressedCb,
620                  const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
621   }
622   ForEachChunk(CollectIgnoredCb, frontier);
623   ProcessGlobalRegions(frontier);
624   ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
625   ProcessRootRegions(frontier);
626   FloodFillTag(frontier, kReachable);
627 
628   // The check here is relatively expensive, so we do this in a separate flood
629   // fill. That way we can skip the check for chunks that are reachable
630   // otherwise.
631   LOG_POINTERS("Processing platform-specific allocations.\n");
632   ProcessPlatformSpecificAllocations(frontier);
633   FloodFillTag(frontier, kReachable);
634 
635   // Iterate over leaked chunks and mark those that are reachable from other
636   // leaked chunks.
637   LOG_POINTERS("Scanning leaked chunks.\n");
638   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
639 }
640 
641 // ForEachChunk callback. Resets the tags to pre-leak-check state.
642 static void ResetTagsCb(uptr chunk, void *arg) {
643   (void)arg;
644   chunk = GetUserBegin(chunk);
645   LsanMetadata m(chunk);
646   if (m.allocated() && m.tag() != kIgnored)
647     m.set_tag(kDirectlyLeaked);
648 }
649 
650 // ForEachChunk callback. Aggregates information about unreachable chunks into
651 // a LeakReport.
652 static void CollectLeaksCb(uptr chunk, void *arg) {
653   CHECK(arg);
654   LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
655   chunk = GetUserBegin(chunk);
656   LsanMetadata m(chunk);
657   if (!m.allocated())
658     return;
659   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
660     leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
661 }
662 
663 void LeakSuppressionContext::PrintMatchedSuppressions() {
664   InternalMmapVector<Suppression *> matched;
665   context.GetMatched(&matched);
666   if (!matched.size())
667     return;
668   const char *line = "-----------------------------------------------------";
669   Printf("%s\n", line);
670   Printf("Suppressions used:\n");
671   Printf("  count      bytes template\n");
672   for (uptr i = 0; i < matched.size(); i++) {
673     Printf("%7zu %10zu %s\n",
674            static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
675            matched[i]->weight, matched[i]->templ);
676   }
677   Printf("%s\n\n", line);
678 }
679 
680 #  if SANITIZER_FUCHSIA
681 
682 // Fuchsia provides a libc interface that guarantees all threads are
683 // covered, and SuspendedThreadList is never really used.
684 static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
685 
686 #  else  // !SANITIZER_FUCHSIA
687 
688 static void ReportUnsuspendedThreads(
689     const SuspendedThreadsList &suspended_threads) {
690   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
691   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
692     threads[i] = suspended_threads.GetThreadID(i);
693 
694   Sort(threads.data(), threads.size());
695 
696   InternalMmapVector<tid_t> unsuspended;
697   GetRunningThreadsLocked(&unsuspended);
698 
699   for (auto os_id : unsuspended) {
700     uptr i = InternalLowerBound(threads, os_id);
701     if (i >= threads.size() || threads[i] != os_id)
702       Report(
703           "Running thread %zu was not suspended. False leaks are possible.\n",
704           os_id);
705   }
706 }
707 
708 #  endif  // !SANITIZER_FUCHSIA
709 
710 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
711                                   void *arg) {
712   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
713   CHECK(param);
714   CHECK(!param->success);
715   ReportUnsuspendedThreads(suspended_threads);
716   ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
717                     param->caller_sp);
718   ForEachChunk(CollectLeaksCb, &param->leaks);
719   // Clean up for subsequent leak checks. This assumes we did not overwrite any
720   // kIgnored tags.
721   ForEachChunk(ResetTagsCb, nullptr);
722   param->success = true;
723 }
724 
725 static bool PrintResults(LeakReport &report) {
726   uptr unsuppressed_count = report.UnsuppressedLeakCount();
727   if (unsuppressed_count) {
728     Decorator d;
729     Printf(
730         "\n"
731         "================================================================="
732         "\n");
733     Printf("%s", d.Error());
734     Report("ERROR: LeakSanitizer: detected memory leaks\n");
735     Printf("%s", d.Default());
736     report.ReportTopLeaks(flags()->max_leaks);
737   }
738   if (common_flags()->print_suppressions)
739     GetSuppressionContext()->PrintMatchedSuppressions();
740   if (unsuppressed_count > 0) {
741     report.PrintSummary();
742     return true;
743   }
744   return false;
745 }
746 
747 static bool CheckForLeaks() {
748   if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
749     VReport(1, "LeakSanitizer is disabled");
750     return false;
751   }
752   VReport(1, "LeakSanitizer: checking for leaks");
753   // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
754   // suppressions. However if a stack id was previously suppressed, it should be
755   // suppressed in future checks as well.
756   for (int i = 0;; ++i) {
757     EnsureMainThreadIDIsCorrect();
758     CheckForLeaksParam param;
759     // Capture calling thread's stack pointer early, to avoid false negatives.
760     // Old frame with dead pointers might be overlapped by new frame inside
761     // CheckForLeaks which does not use bytes with pointers before the
762     // threads are suspended and stack pointers captured.
763     param.caller_tid = GetTid();
764     param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
765     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
766     if (!param.success) {
767       Report("LeakSanitizer has encountered a fatal error.\n");
768       Report(
769           "HINT: For debugging, try setting environment variable "
770           "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
771       Report(
772           "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
773           "etc)\n");
774       Die();
775     }
776     LeakReport leak_report;
777     leak_report.AddLeakedChunks(param.leaks);
778 
779     // No new suppressions stacks, so rerun will not help and we can report.
780     if (!leak_report.ApplySuppressions())
781       return PrintResults(leak_report);
782 
783     // No indirect leaks to report, so we are done here.
784     if (!leak_report.IndirectUnsuppressedLeakCount())
785       return PrintResults(leak_report);
786 
787     if (i >= 8) {
788       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
789       return PrintResults(leak_report);
790     }
791 
792     // We found a new previously unseen suppressed call stack. Rerun to make
793     // sure it does not hold indirect leaks.
794     VReport(1, "Rerun with %zu suppressed stacks.",
795             GetSuppressionContext()->GetSortedSuppressedStacks().size());
796   }
797 }
798 
799 static bool has_reported_leaks = false;
800 bool HasReportedLeaks() { return has_reported_leaks; }
801 
802 void DoLeakCheck() {
803   Lock l(&global_mutex);
804   static bool already_done;
805   if (already_done)
806     return;
807   already_done = true;
808   has_reported_leaks = CheckForLeaks();
809   if (has_reported_leaks)
810     HandleLeaks();
811 }
812 
813 static int DoRecoverableLeakCheck() {
814   Lock l(&global_mutex);
815   bool have_leaks = CheckForLeaks();
816   return have_leaks ? 1 : 0;
817 }
818 
819 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
820 
821 ///// LeakReport implementation. /////
822 
823 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
824 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
825 // in real-world applications.
826 // FIXME: Get rid of this limit by moving logic into DedupLeaks.
827 const uptr kMaxLeaksConsidered = 5000;
828 
829 void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
830   for (const LeakedChunk &leak : chunks) {
831     uptr chunk = leak.chunk;
832     u32 stack_trace_id = leak.stack_trace_id;
833     uptr leaked_size = leak.leaked_size;
834     ChunkTag tag = leak.tag;
835     CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
836 
837     if (u32 resolution = flags()->resolution) {
838       StackTrace stack = StackDepotGet(stack_trace_id);
839       stack.size = Min(stack.size, resolution);
840       stack_trace_id = StackDepotPut(stack);
841     }
842 
843     bool is_directly_leaked = (tag == kDirectlyLeaked);
844     uptr i;
845     for (i = 0; i < leaks_.size(); i++) {
846       if (leaks_[i].stack_trace_id == stack_trace_id &&
847           leaks_[i].is_directly_leaked == is_directly_leaked) {
848         leaks_[i].hit_count++;
849         leaks_[i].total_size += leaked_size;
850         break;
851       }
852     }
853     if (i == leaks_.size()) {
854       if (leaks_.size() == kMaxLeaksConsidered)
855         return;
856       Leak leak = {next_id_++,         /* hit_count */ 1,
857                    leaked_size,        stack_trace_id,
858                    is_directly_leaked, /* is_suppressed */ false};
859       leaks_.push_back(leak);
860     }
861     if (flags()->report_objects) {
862       LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
863       leaked_objects_.push_back(obj);
864     }
865   }
866 }
867 
868 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
869   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
870     return leak1.total_size > leak2.total_size;
871   else
872     return leak1.is_directly_leaked;
873 }
874 
875 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
876   CHECK(leaks_.size() <= kMaxLeaksConsidered);
877   Printf("\n");
878   if (leaks_.size() == kMaxLeaksConsidered)
879     Printf(
880         "Too many leaks! Only the first %zu leaks encountered will be "
881         "reported.\n",
882         kMaxLeaksConsidered);
883 
884   uptr unsuppressed_count = UnsuppressedLeakCount();
885   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
886     Printf("The %zu top leak(s):\n", num_leaks_to_report);
887   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
888   uptr leaks_reported = 0;
889   for (uptr i = 0; i < leaks_.size(); i++) {
890     if (leaks_[i].is_suppressed)
891       continue;
892     PrintReportForLeak(i);
893     leaks_reported++;
894     if (leaks_reported == num_leaks_to_report)
895       break;
896   }
897   if (leaks_reported < unsuppressed_count) {
898     uptr remaining = unsuppressed_count - leaks_reported;
899     Printf("Omitting %zu more leak(s).\n", remaining);
900   }
901 }
902 
903 void LeakReport::PrintReportForLeak(uptr index) {
904   Decorator d;
905   Printf("%s", d.Leak());
906   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
907          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
908          leaks_[index].total_size, leaks_[index].hit_count);
909   Printf("%s", d.Default());
910 
911   CHECK(leaks_[index].stack_trace_id);
912   StackDepotGet(leaks_[index].stack_trace_id).Print();
913 
914   if (flags()->report_objects) {
915     Printf("Objects leaked above:\n");
916     PrintLeakedObjectsForLeak(index);
917     Printf("\n");
918   }
919 }
920 
921 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
922   u32 leak_id = leaks_[index].id;
923   for (uptr j = 0; j < leaked_objects_.size(); j++) {
924     if (leaked_objects_[j].leak_id == leak_id)
925       Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
926              leaked_objects_[j].size);
927   }
928 }
929 
930 void LeakReport::PrintSummary() {
931   CHECK(leaks_.size() <= kMaxLeaksConsidered);
932   uptr bytes = 0, allocations = 0;
933   for (uptr i = 0; i < leaks_.size(); i++) {
934     if (leaks_[i].is_suppressed)
935       continue;
936     bytes += leaks_[i].total_size;
937     allocations += leaks_[i].hit_count;
938   }
939   InternalScopedString summary;
940   summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
941                  allocations);
942   ReportErrorSummary(summary.data());
943 }
944 
945 uptr LeakReport::ApplySuppressions() {
946   LeakSuppressionContext *suppressions = GetSuppressionContext();
947   uptr new_suppressions = false;
948   for (uptr i = 0; i < leaks_.size(); i++) {
949     if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
950                                leaks_[i].total_size)) {
951       leaks_[i].is_suppressed = true;
952       ++new_suppressions;
953     }
954   }
955   return new_suppressions;
956 }
957 
958 uptr LeakReport::UnsuppressedLeakCount() {
959   uptr result = 0;
960   for (uptr i = 0; i < leaks_.size(); i++)
961     if (!leaks_[i].is_suppressed)
962       result++;
963   return result;
964 }
965 
966 uptr LeakReport::IndirectUnsuppressedLeakCount() {
967   uptr result = 0;
968   for (uptr i = 0; i < leaks_.size(); i++)
969     if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
970       result++;
971   return result;
972 }
973 
974 }  // namespace __lsan
975 #else   // CAN_SANITIZE_LEAKS
976 namespace __lsan {
977 void InitCommonLsan() {}
978 void DoLeakCheck() {}
979 void DoRecoverableLeakCheckVoid() {}
980 void DisableInThisThread() {}
981 void EnableInThisThread() {}
982 }  // namespace __lsan
983 #endif  // CAN_SANITIZE_LEAKS
984 
985 using namespace __lsan;
986 
987 extern "C" {
988 SANITIZER_INTERFACE_ATTRIBUTE
989 void __lsan_ignore_object(const void *p) {
990 #if CAN_SANITIZE_LEAKS
991   if (!common_flags()->detect_leaks)
992     return;
993   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
994   // locked.
995   Lock l(&global_mutex);
996   IgnoreObjectResult res = IgnoreObjectLocked(p);
997   if (res == kIgnoreObjectInvalid)
998     VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
999   if (res == kIgnoreObjectAlreadyIgnored)
1000     VReport(1,
1001             "__lsan_ignore_object(): "
1002             "heap object at %p is already being ignored\n",
1003             p);
1004   if (res == kIgnoreObjectSuccess)
1005     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
1006 #endif  // CAN_SANITIZE_LEAKS
1007 }
1008 
1009 SANITIZER_INTERFACE_ATTRIBUTE
1010 void __lsan_register_root_region(const void *begin, uptr size) {
1011 #if CAN_SANITIZE_LEAKS
1012   Lock l(&global_mutex);
1013   RootRegion region = {reinterpret_cast<uptr>(begin), size};
1014   root_regions.push_back(region);
1015   VReport(1, "Registered root region at %p of size %zu\n", begin, size);
1016 #endif  // CAN_SANITIZE_LEAKS
1017 }
1018 
1019 SANITIZER_INTERFACE_ATTRIBUTE
1020 void __lsan_unregister_root_region(const void *begin, uptr size) {
1021 #if CAN_SANITIZE_LEAKS
1022   Lock l(&global_mutex);
1023   bool removed = false;
1024   for (uptr i = 0; i < root_regions.size(); i++) {
1025     RootRegion region = root_regions[i];
1026     if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
1027       removed = true;
1028       uptr last_index = root_regions.size() - 1;
1029       root_regions[i] = root_regions[last_index];
1030       root_regions.pop_back();
1031       VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
1032       break;
1033     }
1034   }
1035   if (!removed) {
1036     Report(
1037         "__lsan_unregister_root_region(): region at %p of size %zu has not "
1038         "been registered.\n",
1039         begin, size);
1040     Die();
1041   }
1042 #endif  // CAN_SANITIZE_LEAKS
1043 }
1044 
1045 SANITIZER_INTERFACE_ATTRIBUTE
1046 void __lsan_disable() {
1047 #if CAN_SANITIZE_LEAKS
1048   __lsan::DisableInThisThread();
1049 #endif
1050 }
1051 
1052 SANITIZER_INTERFACE_ATTRIBUTE
1053 void __lsan_enable() {
1054 #if CAN_SANITIZE_LEAKS
1055   __lsan::EnableInThisThread();
1056 #endif
1057 }
1058 
1059 SANITIZER_INTERFACE_ATTRIBUTE
1060 void __lsan_do_leak_check() {
1061 #if CAN_SANITIZE_LEAKS
1062   if (common_flags()->detect_leaks)
1063     __lsan::DoLeakCheck();
1064 #endif  // CAN_SANITIZE_LEAKS
1065 }
1066 
1067 SANITIZER_INTERFACE_ATTRIBUTE
1068 int __lsan_do_recoverable_leak_check() {
1069 #if CAN_SANITIZE_LEAKS
1070   if (common_flags()->detect_leaks)
1071     return __lsan::DoRecoverableLeakCheck();
1072 #endif  // CAN_SANITIZE_LEAKS
1073   return 0;
1074 }
1075 
1076 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
1077   return "";
1078 }
1079 
1080 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1081 SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
1082   return 0;
1083 }
1084 
1085 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
1086   return "";
1087 }
1088 #endif
1089 }  // extern "C"
1090