1 //=-- lsan_allocator.cc ---------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of LeakSanitizer.
9 // See lsan_allocator.h for details.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "lsan_allocator.h"
14 
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_checks.h"
17 #include "sanitizer_common/sanitizer_allocator_interface.h"
18 #include "sanitizer_common/sanitizer_allocator_report.h"
19 #include "sanitizer_common/sanitizer_errno.h"
20 #include "sanitizer_common/sanitizer_internal_defs.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "lsan_common.h"
24 
25 extern "C" void *memset(void *ptr, int value, uptr num);
26 
27 namespace __lsan {
28 #if defined(__i386__) || defined(__arm__)
29 static const uptr kMaxAllowedMallocSize = 1UL << 30;
30 #elif defined(__mips64) || defined(__aarch64__)
31 static const uptr kMaxAllowedMallocSize = 4UL << 30;
32 #else
33 static const uptr kMaxAllowedMallocSize = 8UL << 30;
34 #endif
35 typedef LargeMmapAllocator<> SecondaryAllocator;
36 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
37           SecondaryAllocator> Allocator;
38 
39 static Allocator allocator;
40 
InitializeAllocator()41 void InitializeAllocator() {
42   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
43   allocator.InitLinkerInitialized(
44       common_flags()->allocator_release_to_os_interval_ms);
45 }
46 
AllocatorThreadFinish()47 void AllocatorThreadFinish() {
48   allocator.SwallowCache(GetAllocatorCache());
49 }
50 
Metadata(const void * p)51 static ChunkMetadata *Metadata(const void *p) {
52   return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
53 }
54 
RegisterAllocation(const StackTrace & stack,void * p,uptr size)55 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
56   if (!p) return;
57   ChunkMetadata *m = Metadata(p);
58   CHECK(m);
59   m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
60   m->stack_trace_id = StackDepotPut(stack);
61   m->requested_size = size;
62   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
63 }
64 
RegisterDeallocation(void * p)65 static void RegisterDeallocation(void *p) {
66   if (!p) return;
67   ChunkMetadata *m = Metadata(p);
68   CHECK(m);
69   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
70 }
71 
ReportAllocationSizeTooBig(uptr size,const StackTrace & stack)72 static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
73   if (AllocatorMayReturnNull()) {
74     Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
75     return nullptr;
76   }
77   ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
78 }
79 
Allocate(const StackTrace & stack,uptr size,uptr alignment,bool cleared)80 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
81                bool cleared) {
82   if (size == 0)
83     size = 1;
84   if (size > kMaxAllowedMallocSize)
85     return ReportAllocationSizeTooBig(size, stack);
86   void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
87   if (UNLIKELY(!p)) {
88     SetAllocatorOutOfMemory();
89     if (AllocatorMayReturnNull())
90       return nullptr;
91     ReportOutOfMemory(size, &stack);
92   }
93   // Do not rely on the allocator to clear the memory (it's slow).
94   if (cleared && allocator.FromPrimary(p))
95     memset(p, 0, size);
96   RegisterAllocation(stack, p, size);
97   if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
98   RunMallocHooks(p, size);
99   return p;
100 }
101 
Calloc(uptr nmemb,uptr size,const StackTrace & stack)102 static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
103   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
104     if (AllocatorMayReturnNull())
105       return nullptr;
106     ReportCallocOverflow(nmemb, size, &stack);
107   }
108   size *= nmemb;
109   return Allocate(stack, size, 1, true);
110 }
111 
Deallocate(void * p)112 void Deallocate(void *p) {
113   if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
114   RunFreeHooks(p);
115   RegisterDeallocation(p);
116   allocator.Deallocate(GetAllocatorCache(), p);
117 }
118 
Reallocate(const StackTrace & stack,void * p,uptr new_size,uptr alignment)119 void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
120                  uptr alignment) {
121   RegisterDeallocation(p);
122   if (new_size > kMaxAllowedMallocSize) {
123     allocator.Deallocate(GetAllocatorCache(), p);
124     return ReportAllocationSizeTooBig(new_size, stack);
125   }
126   p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
127   RegisterAllocation(stack, p, new_size);
128   return p;
129 }
130 
GetAllocatorCacheRange(uptr * begin,uptr * end)131 void GetAllocatorCacheRange(uptr *begin, uptr *end) {
132   *begin = (uptr)GetAllocatorCache();
133   *end = *begin + sizeof(AllocatorCache);
134 }
135 
GetMallocUsableSize(const void * p)136 uptr GetMallocUsableSize(const void *p) {
137   ChunkMetadata *m = Metadata(p);
138   if (!m) return 0;
139   return m->requested_size;
140 }
141 
lsan_posix_memalign(void ** memptr,uptr alignment,uptr size,const StackTrace & stack)142 int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
143                         const StackTrace &stack) {
144   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
145     if (AllocatorMayReturnNull())
146       return errno_EINVAL;
147     ReportInvalidPosixMemalignAlignment(alignment, &stack);
148   }
149   void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
150   if (UNLIKELY(!ptr))
151     // OOM error is already taken care of by Allocate.
152     return errno_ENOMEM;
153   CHECK(IsAligned((uptr)ptr, alignment));
154   *memptr = ptr;
155   return 0;
156 }
157 
lsan_aligned_alloc(uptr alignment,uptr size,const StackTrace & stack)158 void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
159   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
160     errno = errno_EINVAL;
161     if (AllocatorMayReturnNull())
162       return nullptr;
163     ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
164   }
165   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
166 }
167 
lsan_memalign(uptr alignment,uptr size,const StackTrace & stack)168 void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
169   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
170     errno = errno_EINVAL;
171     if (AllocatorMayReturnNull())
172       return nullptr;
173     ReportInvalidAllocationAlignment(alignment, &stack);
174   }
175   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
176 }
177 
lsan_malloc(uptr size,const StackTrace & stack)178 void *lsan_malloc(uptr size, const StackTrace &stack) {
179   return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
180 }
181 
lsan_free(void * p)182 void lsan_free(void *p) {
183   Deallocate(p);
184 }
185 
lsan_realloc(void * p,uptr size,const StackTrace & stack)186 void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
187   return SetErrnoOnNull(Reallocate(stack, p, size, 1));
188 }
189 
lsan_calloc(uptr nmemb,uptr size,const StackTrace & stack)190 void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
191   return SetErrnoOnNull(Calloc(nmemb, size, stack));
192 }
193 
lsan_valloc(uptr size,const StackTrace & stack)194 void *lsan_valloc(uptr size, const StackTrace &stack) {
195   return SetErrnoOnNull(
196       Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
197 }
198 
lsan_pvalloc(uptr size,const StackTrace & stack)199 void *lsan_pvalloc(uptr size, const StackTrace &stack) {
200   uptr PageSize = GetPageSizeCached();
201   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
202     errno = errno_ENOMEM;
203     if (AllocatorMayReturnNull())
204       return nullptr;
205     ReportPvallocOverflow(size, &stack);
206   }
207   // pvalloc(0) should allocate one page.
208   size = size ? RoundUpTo(size, PageSize) : PageSize;
209   return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
210 }
211 
lsan_mz_size(const void * p)212 uptr lsan_mz_size(const void *p) {
213   return GetMallocUsableSize(p);
214 }
215 
216 ///// Interface to the common LSan module. /////
217 
LockAllocator()218 void LockAllocator() {
219   allocator.ForceLock();
220 }
221 
UnlockAllocator()222 void UnlockAllocator() {
223   allocator.ForceUnlock();
224 }
225 
GetAllocatorGlobalRange(uptr * begin,uptr * end)226 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
227   *begin = (uptr)&allocator;
228   *end = *begin + sizeof(allocator);
229 }
230 
PointsIntoChunk(void * p)231 uptr PointsIntoChunk(void* p) {
232   uptr addr = reinterpret_cast<uptr>(p);
233   uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
234   if (!chunk) return 0;
235   // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
236   // valid, but we don't want that.
237   if (addr < chunk) return 0;
238   ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
239   CHECK(m);
240   if (!m->allocated)
241     return 0;
242   if (addr < chunk + m->requested_size)
243     return chunk;
244   if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
245     return chunk;
246   return 0;
247 }
248 
GetUserBegin(uptr chunk)249 uptr GetUserBegin(uptr chunk) {
250   return chunk;
251 }
252 
LsanMetadata(uptr chunk)253 LsanMetadata::LsanMetadata(uptr chunk) {
254   metadata_ = Metadata(reinterpret_cast<void *>(chunk));
255   CHECK(metadata_);
256 }
257 
allocated() const258 bool LsanMetadata::allocated() const {
259   return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
260 }
261 
tag() const262 ChunkTag LsanMetadata::tag() const {
263   return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
264 }
265 
set_tag(ChunkTag value)266 void LsanMetadata::set_tag(ChunkTag value) {
267   reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
268 }
269 
requested_size() const270 uptr LsanMetadata::requested_size() const {
271   return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
272 }
273 
stack_trace_id() const274 u32 LsanMetadata::stack_trace_id() const {
275   return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
276 }
277 
ForEachChunk(ForEachChunkCallback callback,void * arg)278 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
279   allocator.ForEachChunk(callback, arg);
280 }
281 
IgnoreObjectLocked(const void * p)282 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
283   void *chunk = allocator.GetBlockBegin(p);
284   if (!chunk || p < chunk) return kIgnoreObjectInvalid;
285   ChunkMetadata *m = Metadata(chunk);
286   CHECK(m);
287   if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
288     if (m->tag == kIgnored)
289       return kIgnoreObjectAlreadyIgnored;
290     m->tag = kIgnored;
291     return kIgnoreObjectSuccess;
292   } else {
293     return kIgnoreObjectInvalid;
294   }
295 }
296 } // namespace __lsan
297 
298 using namespace __lsan;
299 
300 extern "C" {
301 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_current_allocated_bytes()302 uptr __sanitizer_get_current_allocated_bytes() {
303   uptr stats[AllocatorStatCount];
304   allocator.GetStats(stats);
305   return stats[AllocatorStatAllocated];
306 }
307 
308 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_heap_size()309 uptr __sanitizer_get_heap_size() {
310   uptr stats[AllocatorStatCount];
311   allocator.GetStats(stats);
312   return stats[AllocatorStatMapped];
313 }
314 
315 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_free_bytes()316 uptr __sanitizer_get_free_bytes() { return 0; }
317 
318 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_unmapped_bytes()319 uptr __sanitizer_get_unmapped_bytes() { return 0; }
320 
321 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_estimated_allocated_size(uptr size)322 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
323 
324 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_ownership(const void * p)325 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
326 
327 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_allocated_size(const void * p)328 uptr __sanitizer_get_allocated_size(const void *p) {
329   return GetMallocUsableSize(p);
330 }
331 
332 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
333 // Provide default (no-op) implementation of malloc hooks.
334 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__sanitizer_malloc_hook(void * ptr,uptr size)335 void __sanitizer_malloc_hook(void *ptr, uptr size) {
336   (void)ptr;
337   (void)size;
338 }
339 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__sanitizer_free_hook(void * ptr)340 void __sanitizer_free_hook(void *ptr) {
341   (void)ptr;
342 }
343 #endif
344 } // extern "C"
345