1 //===-- asan_allocator.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "asan_allocator.h"
18 #include "asan_mapping.h"
19 #include "asan_poisoning.h"
20 #include "asan_report.h"
21 #include "asan_stack.h"
22 #include "asan_thread.h"
23 #include "sanitizer_common/sanitizer_allocator_checks.h"
24 #include "sanitizer_common/sanitizer_allocator_interface.h"
25 #include "sanitizer_common/sanitizer_errno.h"
26 #include "sanitizer_common/sanitizer_flags.h"
27 #include "sanitizer_common/sanitizer_internal_defs.h"
28 #include "sanitizer_common/sanitizer_list.h"
29 #include "sanitizer_common/sanitizer_stackdepot.h"
30 #include "sanitizer_common/sanitizer_quarantine.h"
31 #include "lsan/lsan_common.h"
32
33 namespace __asan {
34
35 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
36 // We use adaptive redzones: for larger allocation larger redzones are used.
RZLog2Size(u32 rz_log)37 static u32 RZLog2Size(u32 rz_log) {
38 CHECK_LT(rz_log, 8);
39 return 16 << rz_log;
40 }
41
RZSize2Log(u32 rz_size)42 static u32 RZSize2Log(u32 rz_size) {
43 CHECK_GE(rz_size, 16);
44 CHECK_LE(rz_size, 2048);
45 CHECK(IsPowerOfTwo(rz_size));
46 u32 res = Log2(rz_size) - 4;
47 CHECK_EQ(rz_size, RZLog2Size(res));
48 return res;
49 }
50
51 static AsanAllocator &get_allocator();
52
53 // The memory chunk allocated from the underlying allocator looks like this:
54 // L L L L L L H H U U U U U U R R
55 // L -- left redzone words (0 or more bytes)
56 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
57 // U -- user memory.
58 // R -- right redzone (0 or more bytes)
59 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
60 // memory.
61
62 // If the left redzone is greater than the ChunkHeader size we store a magic
63 // value in the first uptr word of the memory block and store the address of
64 // ChunkBase in the next uptr.
65 // M B L L L L L L L L L H H U U U U U U
66 // | ^
67 // ---------------------|
68 // M -- magic value kAllocBegMagic
69 // B -- address of ChunkHeader pointing to the first 'H'
70 static const uptr kAllocBegMagic = 0xCC6E96B9;
71
72 struct ChunkHeader {
73 // 1-st 8 bytes.
74 u32 chunk_state : 8; // Must be first.
75 u32 alloc_tid : 24;
76
77 u32 free_tid : 24;
78 u32 from_memalign : 1;
79 u32 alloc_type : 2;
80 u32 rz_log : 3;
81 u32 lsan_tag : 2;
82 // 2-nd 8 bytes
83 // This field is used for small sizes. For large sizes it is equal to
84 // SizeClassMap::kMaxSize and the actual size is stored in the
85 // SecondaryAllocator's metadata.
86 u32 user_requested_size : 29;
87 // align < 8 -> 0
88 // else -> log2(min(align, 512)) - 2
89 u32 user_requested_alignment_log : 3;
90 u32 alloc_context_id;
91 };
92
93 struct ChunkBase : ChunkHeader {
94 // Header2, intersects with user memory.
95 u32 free_context_id;
96 };
97
98 static const usize kChunkHeaderSize = sizeof(ChunkHeader);
99 static const usize kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
100 COMPILER_CHECK(kChunkHeaderSize == 16);
101 COMPILER_CHECK(kChunkHeader2Size <= 16);
102
103 // Every chunk of memory allocated by this allocator can be in one of 3 states:
104 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
105 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
106 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
107 enum {
108 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
109 CHUNK_ALLOCATED = 2,
110 CHUNK_QUARANTINE = 3
111 };
112
113 struct AsanChunk: ChunkBase {
Beg__asan::AsanChunk114 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
UsedSize__asan::AsanChunk115 usize UsedSize(bool locked_version = false) {
116 if (user_requested_size != SizeClassMap::kMaxSize)
117 return user_requested_size;
118 return *reinterpret_cast<uptr *>(
119 get_allocator().GetMetaData(AllocBeg(locked_version)));
120 }
AllocBeg__asan::AsanChunk121 void *AllocBeg(bool locked_version = false) {
122 if (from_memalign) {
123 if (locked_version)
124 return get_allocator().GetBlockBeginFastLocked(
125 reinterpret_cast<void *>(this));
126 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
127 }
128 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
129 }
AddrIsInside__asan::AsanChunk130 bool AddrIsInside(uptr addr, bool locked_version = false) {
131 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
132 }
133 };
134
135 struct QuarantineCallback {
QuarantineCallback__asan::QuarantineCallback136 QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
137 : cache_(cache),
138 stack_(stack) {
139 }
140
Recycle__asan::QuarantineCallback141 void Recycle(AsanChunk *m) {
142 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
143 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
144 CHECK_NE(m->alloc_tid, kInvalidTid);
145 CHECK_NE(m->free_tid, kInvalidTid);
146 PoisonShadow(m->Beg(),
147 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
148 kAsanHeapLeftRedzoneMagic);
149 void *p = reinterpret_cast<void *>(m->AllocBeg());
150 if (p != m) {
151 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
152 CHECK_EQ(alloc_magic[0], kAllocBegMagic);
153 // Clear the magic value, as allocator internals may overwrite the
154 // contents of deallocated chunk, confusing GetAsanChunk lookup.
155 alloc_magic[0] = 0;
156 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
157 }
158
159 // Statistics.
160 AsanStats &thread_stats = GetCurrentThreadStats();
161 thread_stats.real_frees++;
162 thread_stats.really_freed += m->UsedSize();
163
164 get_allocator().Deallocate(cache_, p);
165 }
166
Allocate__asan::QuarantineCallback167 void *Allocate(usize size) {
168 void *res = get_allocator().Allocate(cache_, size, 1);
169 // TODO(alekseys): Consider making quarantine OOM-friendly.
170 if (UNLIKELY(!res))
171 ReportOutOfMemory(size, stack_);
172 return res;
173 }
174
Deallocate__asan::QuarantineCallback175 void Deallocate(void *p) {
176 get_allocator().Deallocate(cache_, p);
177 }
178
179 private:
180 AllocatorCache* const cache_;
181 BufferedStackTrace* const stack_;
182 };
183
184 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
185 typedef AsanQuarantine::Cache QuarantineCache;
186
OnMap(uptr p,usize size) const187 void AsanMapUnmapCallback::OnMap(uptr p, usize size) const {
188 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
189 // Statistics.
190 AsanStats &thread_stats = GetCurrentThreadStats();
191 thread_stats.mmaps++;
192 thread_stats.mmaped += size;
193 }
OnUnmap(uptr p,usize size) const194 void AsanMapUnmapCallback::OnUnmap(uptr p, usize size) const {
195 PoisonShadow(p, size, 0);
196 // We are about to unmap a chunk of user memory.
197 // Mark the corresponding shadow memory as not needed.
198 FlushUnneededASanShadowMemory(p, size);
199 // Statistics.
200 AsanStats &thread_stats = GetCurrentThreadStats();
201 thread_stats.munmaps++;
202 thread_stats.munmaped += size;
203 }
204
205 // We can not use THREADLOCAL because it is not supported on some of the
206 // platforms we care about (OSX 10.6, Android).
207 // static THREADLOCAL AllocatorCache cache;
GetAllocatorCache(AsanThreadLocalMallocStorage * ms)208 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
209 CHECK(ms);
210 return &ms->allocator_cache;
211 }
212
GetQuarantineCache(AsanThreadLocalMallocStorage * ms)213 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
214 CHECK(ms);
215 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
216 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
217 }
218
SetFrom(const Flags * f,const CommonFlags * cf)219 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
220 quarantine_size_mb = f->quarantine_size_mb;
221 thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
222 min_redzone = f->redzone;
223 max_redzone = f->max_redzone;
224 may_return_null = cf->allocator_may_return_null;
225 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
226 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
227 }
228
CopyTo(Flags * f,CommonFlags * cf)229 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
230 f->quarantine_size_mb = quarantine_size_mb;
231 f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
232 f->redzone = min_redzone;
233 f->max_redzone = max_redzone;
234 cf->allocator_may_return_null = may_return_null;
235 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
236 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
237 }
238
239 struct Allocator {
240 static const usize kMaxAllowedMallocSize =
241 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
242
243 AsanAllocator allocator;
244 AsanQuarantine quarantine;
245 StaticSpinMutex fallback_mutex;
246 AllocatorCache fallback_allocator_cache;
247 QuarantineCache fallback_quarantine_cache;
248
249 uptr max_user_defined_malloc_size;
250 atomic_uint8_t rss_limit_exceeded;
251
252 // ------------------- Options --------------------------
253 atomic_uint16_t min_redzone;
254 atomic_uint16_t max_redzone;
255 atomic_uint8_t alloc_dealloc_mismatch;
256
257 // ------------------- Initialization ------------------------
Allocator__asan::Allocator258 explicit Allocator(LinkerInitialized)
259 : quarantine(LINKER_INITIALIZED),
260 fallback_quarantine_cache(LINKER_INITIALIZED) {}
261
CheckOptions__asan::Allocator262 void CheckOptions(const AllocatorOptions &options) const {
263 CHECK_GE(options.min_redzone, 16);
264 CHECK_GE(options.max_redzone, options.min_redzone);
265 CHECK_LE(options.max_redzone, 2048);
266 CHECK(IsPowerOfTwo(options.min_redzone));
267 CHECK(IsPowerOfTwo(options.max_redzone));
268 }
269
SharedInitCode__asan::Allocator270 void SharedInitCode(const AllocatorOptions &options) {
271 CheckOptions(options);
272 quarantine.Init((uptr)options.quarantine_size_mb << 20,
273 (uptr)options.thread_local_quarantine_size_kb << 10);
274 atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
275 memory_order_release);
276 atomic_store(&min_redzone, options.min_redzone, memory_order_release);
277 atomic_store(&max_redzone, options.max_redzone, memory_order_release);
278 }
279
InitLinkerInitialized__asan::Allocator280 void InitLinkerInitialized(const AllocatorOptions &options) {
281 SetAllocatorMayReturnNull(options.may_return_null);
282 allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
283 SharedInitCode(options);
284 max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
285 ? common_flags()->max_allocation_size_mb
286 << 20
287 : kMaxAllowedMallocSize;
288 }
289
RssLimitExceeded__asan::Allocator290 bool RssLimitExceeded() {
291 return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
292 }
293
SetRssLimitExceeded__asan::Allocator294 void SetRssLimitExceeded(bool limit_exceeded) {
295 atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
296 }
297
RePoisonChunk__asan::Allocator298 void RePoisonChunk(uptr chunk) {
299 // This could be a user-facing chunk (with redzones), or some internal
300 // housekeeping chunk, like TransferBatch. Start by assuming the former.
301 AsanChunk *ac = GetAsanChunk((void *)chunk);
302 uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
303 uptr beg = ac->Beg();
304 uptr end = ac->Beg() + ac->UsedSize(true);
305 uptr chunk_end = chunk + allocated_size;
306 if (chunk < beg && beg < end && end <= chunk_end &&
307 ac->chunk_state == CHUNK_ALLOCATED) {
308 // Looks like a valid AsanChunk in use, poison redzones only.
309 PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
310 uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
311 FastPoisonShadowPartialRightRedzone(
312 end_aligned_down, end - end_aligned_down,
313 chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
314 } else {
315 // This is either not an AsanChunk or freed or quarantined AsanChunk.
316 // In either case, poison everything.
317 PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
318 }
319 }
320
ReInitialize__asan::Allocator321 void ReInitialize(const AllocatorOptions &options) {
322 SetAllocatorMayReturnNull(options.may_return_null);
323 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
324 SharedInitCode(options);
325
326 // Poison all existing allocation's redzones.
327 if (CanPoisonMemory()) {
328 allocator.ForceLock();
329 allocator.ForEachChunk(
330 [](uptr chunk, void *alloc) {
331 ((Allocator *)alloc)->RePoisonChunk(chunk);
332 },
333 this);
334 allocator.ForceUnlock();
335 }
336 }
337
GetOptions__asan::Allocator338 void GetOptions(AllocatorOptions *options) const {
339 options->quarantine_size_mb = quarantine.GetSize() >> 20;
340 options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
341 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
342 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
343 options->may_return_null = AllocatorMayReturnNull();
344 options->alloc_dealloc_mismatch =
345 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
346 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
347 }
348
349 // -------------------- Helper methods. -------------------------
ComputeRZLog__asan::Allocator350 uptr ComputeRZLog(uptr user_requested_size) {
351 u32 rz_log =
352 user_requested_size <= 64 - 16 ? 0 :
353 user_requested_size <= 128 - 32 ? 1 :
354 user_requested_size <= 512 - 64 ? 2 :
355 user_requested_size <= 4096 - 128 ? 3 :
356 user_requested_size <= (1 << 14) - 256 ? 4 :
357 user_requested_size <= (1 << 15) - 512 ? 5 :
358 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
359 u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
360 u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
361 return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
362 }
363
ComputeUserRequestedAlignmentLog__asan::Allocator364 static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
365 if (user_requested_alignment < 8)
366 return 0;
367 if (user_requested_alignment > 512)
368 user_requested_alignment = 512;
369 return Log2(user_requested_alignment) - 2;
370 }
371
ComputeUserAlignment__asan::Allocator372 static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
373 if (user_requested_alignment_log == 0)
374 return 0;
375 return 1LL << (user_requested_alignment_log + 2);
376 }
377
378 // We have an address between two chunks, and we want to report just one.
ChooseChunk__asan::Allocator379 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
380 AsanChunk *right_chunk) {
381 // Prefer an allocated chunk over freed chunk and freed chunk
382 // over available chunk.
383 if (left_chunk->chunk_state != right_chunk->chunk_state) {
384 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
385 return left_chunk;
386 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
387 return right_chunk;
388 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
389 return left_chunk;
390 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
391 return right_chunk;
392 }
393 // Same chunk_state: choose based on offset.
394 sptr l_offset = 0, r_offset = 0;
395 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
396 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
397 if (l_offset < r_offset)
398 return left_chunk;
399 return right_chunk;
400 }
401
UpdateAllocationStack__asan::Allocator402 bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
403 AsanChunk *m = GetAsanChunkByAddr(addr);
404 if (!m) return false;
405 if (m->chunk_state != CHUNK_ALLOCATED) return false;
406 if (m->Beg() != addr) return false;
407 atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack),
408 memory_order_relaxed);
409 return true;
410 }
411
412 // -------------------- Allocation/Deallocation routines ---------------
Allocate__asan::Allocator413 void *Allocate(usize size, usize alignment, BufferedStackTrace *stack,
414 AllocType alloc_type, bool can_fill) {
415 if (UNLIKELY(!asan_inited))
416 AsanInitFromRtl();
417 if (RssLimitExceeded()) {
418 if (AllocatorMayReturnNull())
419 return nullptr;
420 ReportRssLimitExceeded(stack);
421 }
422 Flags &fl = *flags();
423 CHECK(stack);
424 const uptr min_alignment = SHADOW_GRANULARITY;
425 const uptr user_requested_alignment_log =
426 ComputeUserRequestedAlignmentLog(alignment);
427 if (alignment < min_alignment)
428 alignment = min_alignment;
429 if (size == 0) {
430 // We'd be happy to avoid allocating memory for zero-size requests, but
431 // some programs/tests depend on this behavior and assume that malloc
432 // would not return NULL even for zero-size allocations. Moreover, it
433 // looks like operator new should never return NULL, and results of
434 // consecutive "new" calls must be different even if the allocated size
435 // is zero.
436 size = 1;
437 }
438 CHECK(IsPowerOfTwo(alignment));
439 usize rz_log = ComputeRZLog(size);
440 usize rz_size = RZLog2Size(rz_log);
441 usize rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
442 usize needed_size = rounded_size + rz_size;
443 if (alignment > min_alignment)
444 needed_size += alignment;
445 bool using_primary_allocator = true;
446 // If we are allocating from the secondary allocator, there will be no
447 // automatic right redzone, so add the right redzone manually.
448 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
449 needed_size += rz_size;
450 using_primary_allocator = false;
451 }
452 CHECK(IsAligned(needed_size, min_alignment));
453 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
454 size > max_user_defined_malloc_size) {
455 if (AllocatorMayReturnNull()) {
456 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
457 (void*)size);
458 return nullptr;
459 }
460 uptr malloc_limit =
461 Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
462 ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
463 }
464
465 AsanThread *t = GetCurrentThread();
466 void *allocated;
467 if (t) {
468 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
469 allocated = allocator.Allocate(cache, needed_size, 8);
470 } else {
471 SpinMutexLock l(&fallback_mutex);
472 AllocatorCache *cache = &fallback_allocator_cache;
473 allocated = allocator.Allocate(cache, needed_size, 8);
474 }
475 if (UNLIKELY(!allocated)) {
476 SetAllocatorOutOfMemory();
477 if (AllocatorMayReturnNull())
478 return nullptr;
479 ReportOutOfMemory(size, stack);
480 }
481
482 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
483 // Heap poisoning is enabled, but the allocator provides an unpoisoned
484 // chunk. This is possible if CanPoisonMemory() was false for some
485 // time, for example, due to flags()->start_disabled.
486 // Anyway, poison the block before using it for anything else.
487 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
488 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
489 }
490
491 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
492 uptr alloc_end = alloc_beg + needed_size;
493 uptr beg_plus_redzone = alloc_beg + rz_size;
494 uptr user_beg = beg_plus_redzone;
495 if (!IsAligned(user_beg, alignment))
496 user_beg = RoundUpTo(user_beg, alignment);
497 uptr user_end = user_beg + size;
498 CHECK_LE(user_end, alloc_end);
499 uptr chunk_beg = user_beg - kChunkHeaderSize;
500 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
501 m->alloc_type = alloc_type;
502 m->rz_log = rz_log;
503 u32 alloc_tid = t ? t->tid() : 0;
504 m->alloc_tid = alloc_tid;
505 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
506 m->free_tid = kInvalidTid;
507 m->from_memalign = user_beg != beg_plus_redzone;
508 if (alloc_beg != chunk_beg) {
509 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
510 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
511 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
512 }
513 if (using_primary_allocator) {
514 CHECK(size);
515 m->user_requested_size = size;
516 CHECK(allocator.FromPrimary(allocated));
517 } else {
518 CHECK(!allocator.FromPrimary(allocated));
519 m->user_requested_size = SizeClassMap::kMaxSize;
520 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
521 meta[0] = size;
522 meta[1] = chunk_beg;
523 }
524 m->user_requested_alignment_log = user_requested_alignment_log;
525
526 m->alloc_context_id = StackDepotPut(*stack);
527
528 uptr size_rounded_down_to_granularity =
529 RoundDownTo(size, SHADOW_GRANULARITY);
530 // Unpoison the bulk of the memory region.
531 if (size_rounded_down_to_granularity)
532 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
533 // Deal with the end of the region if size is not aligned to granularity.
534 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
535 u8 *shadow =
536 (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
537 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
538 }
539
540 AsanStats &thread_stats = GetCurrentThreadStats();
541 thread_stats.mallocs++;
542 thread_stats.malloced += size;
543 thread_stats.malloced_redzones += needed_size - size;
544 if (needed_size > SizeClassMap::kMaxSize)
545 thread_stats.malloc_large++;
546 else
547 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
548
549 void *res = reinterpret_cast<void *>(user_beg);
550 if (can_fill && fl.max_malloc_fill_size) {
551 usize fill_size = Min(size, (usize)fl.max_malloc_fill_size);
552 REAL(memset)(res, fl.malloc_fill_byte, fill_size);
553 }
554 #if CAN_SANITIZE_LEAKS
555 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
556 : __lsan::kDirectlyLeaked;
557 #endif
558 // Must be the last mutation of metadata in this function.
559 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
560 ASAN_MALLOC_HOOK(res, size);
561 return res;
562 }
563
564 // Set quarantine flag if chunk is allocated, issue ASan error report on
565 // available and quarantined chunks. Return true on success, false otherwise.
AtomicallySetQuarantineFlagIfAllocated__asan::Allocator566 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
567 BufferedStackTrace *stack) {
568 u8 old_chunk_state = CHUNK_ALLOCATED;
569 // Flip the chunk_state atomically to avoid race on double-free.
570 if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
571 CHUNK_QUARANTINE,
572 memory_order_acquire)) {
573 ReportInvalidFree(ptr, old_chunk_state, stack);
574 // It's not safe to push a chunk in quarantine on invalid free.
575 return false;
576 }
577 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
578 return true;
579 }
580
581 // Expects the chunk to already be marked as quarantined by using
582 // AtomicallySetQuarantineFlagIfAllocated.
QuarantineChunk__asan::Allocator583 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
584 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
585 CHECK_GE(m->alloc_tid, 0);
586 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
587 CHECK_EQ(m->free_tid, kInvalidTid);
588 AsanThread *t = GetCurrentThread();
589 m->free_tid = t ? t->tid() : 0;
590 m->free_context_id = StackDepotPut(*stack);
591
592 Flags &fl = *flags();
593 if (fl.max_free_fill_size > 0) {
594 // We have to skip the chunk header, it contains free_context_id.
595 uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
596 if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
597 uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
598 size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
599 REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
600 }
601 }
602
603 // Poison the region.
604 PoisonShadow(m->Beg(),
605 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
606 kAsanHeapFreeMagic);
607
608 AsanStats &thread_stats = GetCurrentThreadStats();
609 thread_stats.frees++;
610 thread_stats.freed += m->UsedSize();
611
612 // Push into quarantine.
613 if (t) {
614 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
615 AllocatorCache *ac = GetAllocatorCache(ms);
616 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
617 m->UsedSize());
618 } else {
619 SpinMutexLock l(&fallback_mutex);
620 AllocatorCache *ac = &fallback_allocator_cache;
621 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
622 m, m->UsedSize());
623 }
624 }
625
Deallocate__asan::Allocator626 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
627 BufferedStackTrace *stack, AllocType alloc_type) {
628 uptr p = reinterpret_cast<uptr>(ptr);
629 if (p == 0) return;
630
631 uptr chunk_beg = p - kChunkHeaderSize;
632 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
633
634 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
635 // malloc. Don't report an invalid free in this case.
636 if (SANITIZER_WINDOWS &&
637 !get_allocator().PointerIsMine(ptr)) {
638 if (!IsSystemHeapAddress(p))
639 ReportFreeNotMalloced(p, stack);
640 return;
641 }
642
643 ASAN_FREE_HOOK(ptr);
644
645 // Must mark the chunk as quarantined before any changes to its metadata.
646 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
647 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
648
649 if (m->alloc_type != alloc_type) {
650 if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
651 ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
652 (AllocType)alloc_type);
653 }
654 } else {
655 if (flags()->new_delete_type_mismatch &&
656 (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
657 ((delete_size && delete_size != m->UsedSize()) ||
658 ComputeUserRequestedAlignmentLog(delete_alignment) !=
659 m->user_requested_alignment_log)) {
660 ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
661 }
662 }
663
664 QuarantineChunk(m, ptr, stack);
665 }
666
Reallocate__asan::Allocator667 void *Reallocate(void *old_ptr, usize new_size, BufferedStackTrace *stack) {
668 CHECK(old_ptr && new_size);
669 uptr p = reinterpret_cast<uptr>(old_ptr);
670 uptr chunk_beg = p - kChunkHeaderSize;
671 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
672
673 AsanStats &thread_stats = GetCurrentThreadStats();
674 thread_stats.reallocs++;
675 thread_stats.realloced += new_size;
676
677 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
678 if (new_ptr) {
679 u8 chunk_state = m->chunk_state;
680 if (chunk_state != CHUNK_ALLOCATED)
681 ReportInvalidFree(old_ptr, chunk_state, stack);
682 CHECK_NE(REAL(memcpy), nullptr);
683 uptr memcpy_size = Min(new_size, m->UsedSize());
684 // If realloc() races with free(), we may start copying freed memory.
685 // However, we will report racy double-free later anyway.
686 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
687 Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
688 }
689 return new_ptr;
690 }
691
Calloc__asan::Allocator692 void *Calloc(uptr nmemb, usize size, BufferedStackTrace *stack) {
693 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
694 if (AllocatorMayReturnNull())
695 return nullptr;
696 ReportCallocOverflow(nmemb, size, stack);
697 }
698 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
699 // If the memory comes from the secondary allocator no need to clear it
700 // as it comes directly from mmap.
701 if (ptr && allocator.FromPrimary(ptr))
702 REAL(memset)(ptr, 0, nmemb * size);
703 return ptr;
704 }
705
ReportInvalidFree__asan::Allocator706 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
707 if (chunk_state == CHUNK_QUARANTINE)
708 ReportDoubleFree((uptr)ptr, stack);
709 else
710 ReportFreeNotMalloced((uptr)ptr, stack);
711 }
712
CommitBack__asan::Allocator713 void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
714 AllocatorCache *ac = GetAllocatorCache(ms);
715 quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
716 allocator.SwallowCache(ac);
717 }
718
719 // -------------------------- Chunk lookup ----------------------
720
721 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
GetAsanChunk__asan::Allocator722 AsanChunk *GetAsanChunk(void *alloc_beg) {
723 if (!alloc_beg) return nullptr;
724 if (!allocator.FromPrimary(alloc_beg)) {
725 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
726 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
727 return m;
728 }
729 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
730 if (alloc_magic[0] == kAllocBegMagic)
731 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
732 return reinterpret_cast<AsanChunk *>(alloc_beg);
733 }
734
GetAsanChunkByAddr__asan::Allocator735 AsanChunk *GetAsanChunkByAddr(uptr p) {
736 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
737 return GetAsanChunk(alloc_beg);
738 }
739
740 // Allocator must be locked when this function is called.
GetAsanChunkByAddrFastLocked__asan::Allocator741 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
742 void *alloc_beg =
743 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
744 return GetAsanChunk(alloc_beg);
745 }
746
AllocationSize__asan::Allocator747 uptr AllocationSize(uptr p) {
748 AsanChunk *m = GetAsanChunkByAddr(p);
749 if (!m) return 0;
750 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
751 if (m->Beg() != p) return 0;
752 return m->UsedSize();
753 }
754
FindHeapChunkByAddress__asan::Allocator755 AsanChunkView FindHeapChunkByAddress(uptr addr) {
756 AsanChunk *m1 = GetAsanChunkByAddr(addr);
757 if (!m1) return AsanChunkView(m1);
758 sptr offset = 0;
759 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
760 // The address is in the chunk's left redzone, so maybe it is actually
761 // a right buffer overflow from the other chunk to the left.
762 // Search a bit to the left to see if there is another chunk.
763 AsanChunk *m2 = nullptr;
764 for (uptr l = 1; l < GetPageSizeCached(); l++) {
765 m2 = GetAsanChunkByAddr(addr - l);
766 if (m2 == m1) continue; // Still the same chunk.
767 break;
768 }
769 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
770 m1 = ChooseChunk(addr, m2, m1);
771 }
772 return AsanChunkView(m1);
773 }
774
Purge__asan::Allocator775 void Purge(BufferedStackTrace *stack) {
776 AsanThread *t = GetCurrentThread();
777 if (t) {
778 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
779 quarantine.DrainAndRecycle(GetQuarantineCache(ms),
780 QuarantineCallback(GetAllocatorCache(ms),
781 stack));
782 }
783 {
784 SpinMutexLock l(&fallback_mutex);
785 quarantine.DrainAndRecycle(&fallback_quarantine_cache,
786 QuarantineCallback(&fallback_allocator_cache,
787 stack));
788 }
789
790 allocator.ForceReleaseToOS();
791 }
792
PrintStats__asan::Allocator793 void PrintStats() {
794 allocator.PrintStats();
795 quarantine.PrintStats();
796 }
797
ForceLock__asan::Allocator798 void ForceLock() {
799 allocator.ForceLock();
800 fallback_mutex.Lock();
801 }
802
ForceUnlock__asan::Allocator803 void ForceUnlock() {
804 fallback_mutex.Unlock();
805 allocator.ForceUnlock();
806 }
807 };
808
809 static Allocator instance(LINKER_INITIALIZED);
810
get_allocator()811 static AsanAllocator &get_allocator() {
812 return instance.allocator;
813 }
814
IsValid() const815 bool AsanChunkView::IsValid() const {
816 return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
817 }
IsAllocated() const818 bool AsanChunkView::IsAllocated() const {
819 return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
820 }
IsQuarantined() const821 bool AsanChunkView::IsQuarantined() const {
822 return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
823 }
Beg() const824 uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
End() const825 uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
UsedSize() const826 uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
UserRequestedAlignment() const827 u32 AsanChunkView::UserRequestedAlignment() const {
828 return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
829 }
AllocTid() const830 uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
FreeTid() const831 uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
GetAllocType() const832 AllocType AsanChunkView::GetAllocType() const {
833 return (AllocType)chunk_->alloc_type;
834 }
835
GetStackTraceFromId(u32 id)836 static StackTrace GetStackTraceFromId(u32 id) {
837 CHECK(id);
838 StackTrace res = StackDepotGet(id);
839 CHECK(res.trace);
840 return res;
841 }
842
GetAllocStackId() const843 u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
GetFreeStackId() const844 u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
845
GetAllocStack() const846 StackTrace AsanChunkView::GetAllocStack() const {
847 return GetStackTraceFromId(GetAllocStackId());
848 }
849
GetFreeStack() const850 StackTrace AsanChunkView::GetFreeStack() const {
851 return GetStackTraceFromId(GetFreeStackId());
852 }
853
InitializeAllocator(const AllocatorOptions & options)854 void InitializeAllocator(const AllocatorOptions &options) {
855 instance.InitLinkerInitialized(options);
856 }
857
ReInitializeAllocator(const AllocatorOptions & options)858 void ReInitializeAllocator(const AllocatorOptions &options) {
859 instance.ReInitialize(options);
860 }
861
GetAllocatorOptions(AllocatorOptions * options)862 void GetAllocatorOptions(AllocatorOptions *options) {
863 instance.GetOptions(options);
864 }
865
FindHeapChunkByAddress(uptr addr)866 AsanChunkView FindHeapChunkByAddress(uptr addr) {
867 return instance.FindHeapChunkByAddress(addr);
868 }
FindHeapChunkByAllocBeg(uptr addr)869 AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
870 return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
871 }
872
CommitBack()873 void AsanThreadLocalMallocStorage::CommitBack() {
874 GET_STACK_TRACE_MALLOC;
875 instance.CommitBack(this, &stack);
876 }
877
PrintInternalAllocatorStats()878 void PrintInternalAllocatorStats() {
879 instance.PrintStats();
880 }
881
asan_free(void * ptr,BufferedStackTrace * stack,AllocType alloc_type)882 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
883 instance.Deallocate(ptr, 0, 0, stack, alloc_type);
884 }
885
asan_delete(void * ptr,usize size,usize alignment,BufferedStackTrace * stack,AllocType alloc_type)886 void asan_delete(void *ptr, usize size, usize alignment,
887 BufferedStackTrace *stack, AllocType alloc_type) {
888 instance.Deallocate(ptr, size, alignment, stack, alloc_type);
889 }
890
asan_malloc(usize size,BufferedStackTrace * stack)891 void *asan_malloc(usize size, BufferedStackTrace *stack) {
892 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
893 }
894
asan_calloc(uptr nmemb,usize size,BufferedStackTrace * stack)895 void *asan_calloc(uptr nmemb, usize size, BufferedStackTrace *stack) {
896 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
897 }
898
asan_reallocarray(void * p,usize nmemb,usize size,BufferedStackTrace * stack)899 void *asan_reallocarray(void *p, usize nmemb, usize size,
900 BufferedStackTrace *stack) {
901 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
902 errno = errno_ENOMEM;
903 if (AllocatorMayReturnNull())
904 return nullptr;
905 ReportReallocArrayOverflow(nmemb, size, stack);
906 }
907 return asan_realloc(p, nmemb * size, stack);
908 }
909
asan_realloc(void * p,usize size,BufferedStackTrace * stack)910 void *asan_realloc(void *p, usize size, BufferedStackTrace *stack) {
911 if (!p)
912 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
913 if (size == 0) {
914 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
915 instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
916 return nullptr;
917 }
918 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
919 size = 1;
920 }
921 return SetErrnoOnNull(instance.Reallocate(p, size, stack));
922 }
923
asan_valloc(usize size,BufferedStackTrace * stack)924 void *asan_valloc(usize size, BufferedStackTrace *stack) {
925 return SetErrnoOnNull(
926 instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
927 }
928
asan_pvalloc(usize size,BufferedStackTrace * stack)929 void *asan_pvalloc(usize size, BufferedStackTrace *stack) {
930 uptr PageSize = GetPageSizeCached();
931 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
932 errno = errno_ENOMEM;
933 if (AllocatorMayReturnNull())
934 return nullptr;
935 ReportPvallocOverflow(size, stack);
936 }
937 // pvalloc(0) should allocate one page.
938 size = size ? RoundUpTo(size, PageSize) : PageSize;
939 return SetErrnoOnNull(
940 instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
941 }
942
asan_memalign(uptr alignment,usize size,BufferedStackTrace * stack,AllocType alloc_type)943 void *asan_memalign(uptr alignment, usize size, BufferedStackTrace *stack,
944 AllocType alloc_type) {
945 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
946 errno = errno_EINVAL;
947 if (AllocatorMayReturnNull())
948 return nullptr;
949 ReportInvalidAllocationAlignment(alignment, stack);
950 }
951 return SetErrnoOnNull(
952 instance.Allocate(size, alignment, stack, alloc_type, true));
953 }
954
asan_aligned_alloc(uptr alignment,usize size,BufferedStackTrace * stack)955 void *asan_aligned_alloc(uptr alignment, usize size, BufferedStackTrace *stack) {
956 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
957 errno = errno_EINVAL;
958 if (AllocatorMayReturnNull())
959 return nullptr;
960 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
961 }
962 return SetErrnoOnNull(
963 instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
964 }
965
asan_posix_memalign(void ** memptr,usize alignment,usize size,BufferedStackTrace * stack)966 int asan_posix_memalign(void **memptr, usize alignment, usize size,
967 BufferedStackTrace *stack) {
968 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
969 if (AllocatorMayReturnNull())
970 return errno_EINVAL;
971 ReportInvalidPosixMemalignAlignment(alignment, stack);
972 }
973 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
974 if (UNLIKELY(!ptr))
975 // OOM error is already taken care of by Allocate.
976 return errno_ENOMEM;
977 CHECK(IsAligned((uptr)ptr, alignment));
978 *memptr = ptr;
979 return 0;
980 }
981
asan_malloc_usable_size(const void * ptr,uptr pc,uptr bp)982 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
983 if (!ptr) return 0;
984 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
985 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
986 GET_STACK_TRACE_FATAL(pc, bp);
987 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
988 }
989 return usable_size;
990 }
991
asan_mz_size(const void * ptr)992 uptr asan_mz_size(const void *ptr) {
993 return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
994 }
995
asan_mz_force_lock()996 void asan_mz_force_lock() {
997 instance.ForceLock();
998 }
999
asan_mz_force_unlock()1000 void asan_mz_force_unlock() {
1001 instance.ForceUnlock();
1002 }
1003
AsanSoftRssLimitExceededCallback(bool limit_exceeded)1004 void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
1005 instance.SetRssLimitExceeded(limit_exceeded);
1006 }
1007
1008 } // namespace __asan
1009
1010 // --- Implementation of LSan-specific functions --- {{{1
1011 namespace __lsan {
LockAllocator()1012 void LockAllocator() {
1013 __asan::get_allocator().ForceLock();
1014 }
1015
UnlockAllocator()1016 void UnlockAllocator() {
1017 __asan::get_allocator().ForceUnlock();
1018 }
1019
GetAllocatorGlobalRange(uptr * begin,uptr * end)1020 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1021 *begin = (uptr)&__asan::get_allocator();
1022 *end = *begin + sizeof(__asan::get_allocator());
1023 }
1024
PointsIntoChunk(void * p)1025 uptr PointsIntoChunk(void* p) {
1026 uptr addr = reinterpret_cast<uptr>(p);
1027 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1028 if (!m) return 0;
1029 uptr chunk = m->Beg();
1030 if (m->chunk_state != __asan::CHUNK_ALLOCATED)
1031 return 0;
1032 if (m->AddrIsInside(addr, /*locked_version=*/true))
1033 return chunk;
1034 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
1035 addr))
1036 return chunk;
1037 return 0;
1038 }
1039
1040 // Debug code. Delete once issue #1193 is chased down.
1041 extern "C" SANITIZER_WEAK_ATTRIBUTE const char *__lsan_current_stage;
1042
GetUserBegin(uptr chunk)1043 uptr GetUserBegin(uptr chunk) {
1044 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1045 if (!m)
1046 Printf(
1047 "ASAN is about to crash with a CHECK failure.\n"
1048 "The ASAN developers are trying to chase down this bug,\n"
1049 "so if you've encountered this bug please let us know.\n"
1050 "See also: https://github.com/google/sanitizers/issues/1193\n"
1051 "chunk: %p caller %p __lsan_current_stage %s\n",
1052 chunk, GET_CALLER_PC(), __lsan_current_stage);
1053 CHECK(m);
1054 return m->Beg();
1055 }
1056
LsanMetadata(uptr chunk)1057 LsanMetadata::LsanMetadata(uptr chunk) {
1058 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
1059 }
1060
allocated() const1061 bool LsanMetadata::allocated() const {
1062 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1063 return m->chunk_state == __asan::CHUNK_ALLOCATED;
1064 }
1065
tag() const1066 ChunkTag LsanMetadata::tag() const {
1067 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1068 return static_cast<ChunkTag>(m->lsan_tag);
1069 }
1070
set_tag(ChunkTag value)1071 void LsanMetadata::set_tag(ChunkTag value) {
1072 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1073 m->lsan_tag = value;
1074 }
1075
requested_size() const1076 usize LsanMetadata::requested_size() const {
1077 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1078 return m->UsedSize(/*locked_version=*/true);
1079 }
1080
stack_trace_id() const1081 u32 LsanMetadata::stack_trace_id() const {
1082 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1083 return m->alloc_context_id;
1084 }
1085
ForEachChunk(ForEachChunkCallback callback,void * arg)1086 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1087 __asan::get_allocator().ForEachChunk(callback, arg);
1088 }
1089
IgnoreObjectLocked(const void * p)1090 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1091 uptr addr = reinterpret_cast<uptr>(p);
1092 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1093 if (!m) return kIgnoreObjectInvalid;
1094 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
1095 if (m->lsan_tag == kIgnored)
1096 return kIgnoreObjectAlreadyIgnored;
1097 m->lsan_tag = __lsan::kIgnored;
1098 return kIgnoreObjectSuccess;
1099 } else {
1100 return kIgnoreObjectInvalid;
1101 }
1102 }
1103 } // namespace __lsan
1104
1105 // ---------------------- Interface ---------------- {{{1
1106 using namespace __asan;
1107
1108 // ASan allocator doesn't reserve extra bytes, so normally we would
1109 // just return "size". We don't want to expose our redzone sizes, etc here.
__sanitizer_get_estimated_allocated_size(usize size)1110 usize __sanitizer_get_estimated_allocated_size(usize size) {
1111 return size;
1112 }
1113
__sanitizer_get_ownership(const void * p)1114 int __sanitizer_get_ownership(const void *p) {
1115 uptr ptr = reinterpret_cast<uptr>(p);
1116 return instance.AllocationSize(ptr) > 0;
1117 }
1118
__sanitizer_get_allocated_size(const void * p)1119 usize __sanitizer_get_allocated_size(const void *p) {
1120 if (!p) return 0;
1121 uptr ptr = reinterpret_cast<uptr>(p);
1122 usize allocated_size = instance.AllocationSize(ptr);
1123 // Die if p is not malloced or if it is already freed.
1124 if (allocated_size == 0) {
1125 GET_STACK_TRACE_FATAL_HERE;
1126 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1127 }
1128 return allocated_size;
1129 }
1130
__sanitizer_purge_allocator()1131 void __sanitizer_purge_allocator() {
1132 GET_STACK_TRACE_MALLOC;
1133 instance.Purge(&stack);
1134 }
1135
__asan_update_allocation_context(void * addr)1136 int __asan_update_allocation_context(void* addr) {
1137 GET_STACK_TRACE_MALLOC;
1138 return instance.UpdateAllocationStack((uptr)addr, &stack);
1139 }
1140
1141 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1142 // Provide default (no-op) implementation of malloc hooks.
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_malloc_hook,void * ptr,usize size)1143 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
1144 void *ptr, usize size) {
1145 (void)ptr;
1146 (void)size;
1147 }
1148
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_free_hook,void * ptr)1149 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
1150 (void)ptr;
1151 }
1152 #endif
1153