1 //=-- lsan_common.h -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Private LSan header.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LSAN_COMMON_H
15 #define LSAN_COMMON_H
16 
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_platform.h"
21 #include "sanitizer_common/sanitizer_stoptheworld.h"
22 #include "sanitizer_common/sanitizer_symbolizer.h"
23 
24 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
25 // Also, LSan doesn't like 32 bit architectures
26 // because of "small" (4 bytes) pointer size that leads to high false negative
27 // ratio on large leaks. But we still want to have it for some 32 bit arches
28 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
29 // To enable LeakSanitizer on a new architecture, one needs to implement the
30 // internal_clone function as well as (probably) adjust the TLS machinery for
31 // the new architecture inside the sanitizer library.
32 // Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
33 // is missing. This caused a link error.
34 #if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
35 #define CAN_SANITIZE_LEAKS 0
36 #elif (SANITIZER_LINUX || SANITIZER_MAC) && (SANITIZER_WORDSIZE == 64) && \
37     (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) ||  \
38      defined(__powerpc64__) || defined(__s390x__))
39 #define CAN_SANITIZE_LEAKS 1
40 #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_MAC)
41 #define CAN_SANITIZE_LEAKS 1
42 #elif defined(__arm__) && SANITIZER_LINUX
43 #define CAN_SANITIZE_LEAKS 1
44 #elif SANITIZER_RISCV64 && SANITIZER_LINUX
45 #define CAN_SANITIZE_LEAKS 1
46 #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
47 #define CAN_SANITIZE_LEAKS 1
48 #else
49 #define CAN_SANITIZE_LEAKS 0
50 #endif
51 
52 namespace __sanitizer {
53 class FlagParser;
54 class ThreadRegistry;
55 class ThreadContextBase;
56 struct DTLS;
57 }
58 
59 namespace __lsan {
60 
61 // Chunk tags.
62 enum ChunkTag {
63   kDirectlyLeaked = 0,  // default
64   kIndirectlyLeaked = 1,
65   kReachable = 2,
66   kIgnored = 3
67 };
68 
69 struct Flags {
70 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
71 #include "lsan_flags.inc"
72 #undef LSAN_FLAG
73 
74   void SetDefaults();
75   uptr pointer_alignment() const {
76     return use_unaligned ? 1 : sizeof(uptr);
77   }
78 };
79 
80 extern Flags lsan_flags;
81 inline Flags *flags() { return &lsan_flags; }
82 void RegisterLsanFlags(FlagParser *parser, Flags *f);
83 
84 struct Leak {
85   u32 id;
86   uptr hit_count;
87   uptr total_size;
88   u32 stack_trace_id;
89   bool is_directly_leaked;
90   bool is_suppressed;
91 };
92 
93 struct LeakedObject {
94   u32 leak_id;
95   uptr addr;
96   uptr size;
97 };
98 
99 // Aggregates leaks by stack trace prefix.
100 class LeakReport {
101  public:
102   LeakReport() {}
103   void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
104                       ChunkTag tag);
105   void ReportTopLeaks(uptr max_leaks);
106   void PrintSummary();
107   uptr ApplySuppressions();
108   uptr UnsuppressedLeakCount();
109   uptr IndirectUnsuppressedLeakCount();
110 
111  private:
112   void PrintReportForLeak(uptr index);
113   void PrintLeakedObjectsForLeak(uptr index);
114 
115   u32 next_id_ = 0;
116   InternalMmapVector<Leak> leaks_;
117   InternalMmapVector<LeakedObject> leaked_objects_;
118 };
119 
120 typedef InternalMmapVector<uptr> Frontier;
121 
122 // Platform-specific functions.
123 void InitializePlatformSpecificModules();
124 void ProcessGlobalRegions(Frontier *frontier);
125 void ProcessPlatformSpecificAllocations(Frontier *frontier);
126 
127 struct RootRegion {
128   uptr begin;
129   uptr size;
130 };
131 
132 // LockStuffAndStopTheWorld can start to use Scan* calls to collect into
133 // this Frontier vector before the StopTheWorldCallback actually runs.
134 // This is used when the OS has a unified callback API for suspending
135 // threads and enumerating roots.
136 struct CheckForLeaksParam {
137   Frontier frontier;
138   LeakReport leak_report;
139   bool success = false;
140 };
141 
142 InternalMmapVector<RootRegion> const *GetRootRegions();
143 void ScanRootRegion(Frontier *frontier, RootRegion const &region,
144                     uptr region_begin, uptr region_end, bool is_readable);
145 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
146 void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
147 // Run stoptheworld while holding any platform-specific locks, as well as the
148 // allocator and thread registry locks.
149 void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
150                               CheckForLeaksParam* argument);
151 
152 void ScanRangeForPointers(uptr begin, uptr end,
153                           Frontier *frontier,
154                           const char *region_type, ChunkTag tag);
155 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
156 
157 enum IgnoreObjectResult {
158   kIgnoreObjectSuccess,
159   kIgnoreObjectAlreadyIgnored,
160   kIgnoreObjectInvalid
161 };
162 
163 // Functions called from the parent tool.
164 const char *MaybeCallLsanDefaultOptions();
165 void InitCommonLsan();
166 void DoLeakCheck();
167 void DoRecoverableLeakCheckVoid();
168 void DisableCounterUnderflow();
169 bool DisabledInThisThread();
170 
171 // Used to implement __lsan::ScopedDisabler.
172 void DisableInThisThread();
173 void EnableInThisThread();
174 // Can be used to ignore memory allocated by an intercepted
175 // function.
176 struct ScopedInterceptorDisabler {
177   ScopedInterceptorDisabler() { DisableInThisThread(); }
178   ~ScopedInterceptorDisabler() { EnableInThisThread(); }
179 };
180 
181 // According to Itanium C++ ABI array cookie is a one word containing
182 // size of allocated array.
183 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
184                                            uptr addr) {
185   return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
186          *reinterpret_cast<uptr *>(chunk_beg) == 0;
187 }
188 
189 // According to ARM C++ ABI array cookie consists of two words:
190 // struct array_cookie {
191 //   std::size_t element_size; // element_size != 0
192 //   std::size_t element_count;
193 // };
194 static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
195                                        uptr addr) {
196   return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
197          *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
198 }
199 
200 // Special case for "new T[0]" where T is a type with DTOR.
201 // new T[0] will allocate a cookie (one or two words) for the array size (0)
202 // and store a pointer to the end of allocated chunk. The actual cookie layout
203 // varies between platforms according to their C++ ABI implementation.
204 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
205                                         uptr addr) {
206 #if defined(__arm__)
207   return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
208 #else
209   return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
210 #endif
211 }
212 
213 // The following must be implemented in the parent tool.
214 
215 void ForEachChunk(ForEachChunkCallback callback, void *arg);
216 // Returns the address range occupied by the global allocator object.
217 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
218 // Wrappers for allocator's ForceLock()/ForceUnlock().
219 void LockAllocator();
220 void UnlockAllocator();
221 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
222 bool WordIsPoisoned(uptr addr);
223 // Wrappers for ThreadRegistry access.
224 void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
225 void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
226 ThreadRegistry *GetThreadRegistryLocked();
227 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
228                            uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
229                            uptr *cache_end, DTLS **dtls);
230 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
231 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
232                             void *arg);
233 // If called from the main thread, updates the main thread's TID in the thread
234 // registry. We need this to handle processes that fork() without a subsequent
235 // exec(), which invalidates the recorded TID. To update it, we must call
236 // gettid() from the main thread. Our solution is to call this function before
237 // leak checking and also before every call to pthread_create() (to handle cases
238 // where leak checking is initiated from a non-main thread).
239 void EnsureMainThreadIDIsCorrect();
240 // If p points into a chunk that has been allocated to the user, returns its
241 // user-visible address. Otherwise, returns 0.
242 uptr PointsIntoChunk(void *p);
243 // Returns address of user-visible chunk contained in this allocator chunk.
244 uptr GetUserBegin(uptr chunk);
245 // Helper for __lsan_ignore_object().
246 IgnoreObjectResult IgnoreObjectLocked(const void *p);
247 
248 // Return the linker module, if valid for the platform.
249 LoadedModule *GetLinker();
250 
251 // Return true if LSan has finished leak checking and reported leaks.
252 bool HasReportedLeaks();
253 
254 // Run platform-specific leak handlers.
255 void HandleLeaks();
256 
257 // Wrapper for chunk metadata operations.
258 class LsanMetadata {
259  public:
260   // Constructor accepts address of user-visible chunk.
261   explicit LsanMetadata(uptr chunk);
262   bool allocated() const;
263   ChunkTag tag() const;
264   void set_tag(ChunkTag value);
265   uptr requested_size() const;
266   u32 stack_trace_id() const;
267  private:
268   void *metadata_;
269 };
270 
271 }  // namespace __lsan
272 
273 extern "C" {
274 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
275 const char *__lsan_default_options();
276 
277 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
278 int __lsan_is_turned_off();
279 
280 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
281 const char *__lsan_default_suppressions();
282 }  // extern "C"
283 
284 #endif  // LSAN_COMMON_H
285