1 //===-- asan_allocator.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "asan_allocator.h"
18 
19 #include "asan_internal.h"
20 #include "asan_mapping.h"
21 #include "asan_poisoning.h"
22 #include "asan_report.h"
23 #include "asan_stack.h"
24 #include "asan_thread.h"
25 #include "lsan/lsan_common.h"
26 #include "sanitizer_common/sanitizer_allocator_checks.h"
27 #include "sanitizer_common/sanitizer_allocator_interface.h"
28 #include "sanitizer_common/sanitizer_common.h"
29 #include "sanitizer_common/sanitizer_errno.h"
30 #include "sanitizer_common/sanitizer_flags.h"
31 #include "sanitizer_common/sanitizer_internal_defs.h"
32 #include "sanitizer_common/sanitizer_list.h"
33 #include "sanitizer_common/sanitizer_quarantine.h"
34 #include "sanitizer_common/sanitizer_stackdepot.h"
35 
36 namespace __asan {
37 
38 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
39 // We use adaptive redzones: for larger allocation larger redzones are used.
RZLog2Size(u32 rz_log)40 static u32 RZLog2Size(u32 rz_log) {
41   CHECK_LT(rz_log, 8);
42   return 16 << rz_log;
43 }
44 
RZSize2Log(u32 rz_size)45 static u32 RZSize2Log(u32 rz_size) {
46   CHECK_GE(rz_size, 16);
47   CHECK_LE(rz_size, 2048);
48   CHECK(IsPowerOfTwo(rz_size));
49   u32 res = Log2(rz_size) - 4;
50   CHECK_EQ(rz_size, RZLog2Size(res));
51   return res;
52 }
53 
54 static AsanAllocator &get_allocator();
55 
AtomicContextStore(volatile atomic_uint64_t * atomic_context,u32 tid,u32 stack)56 static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
57                                u32 tid, u32 stack) {
58   u64 context = tid;
59   context <<= 32;
60   context += stack;
61   atomic_store(atomic_context, context, memory_order_relaxed);
62 }
63 
AtomicContextLoad(const volatile atomic_uint64_t * atomic_context,u32 & tid,u32 & stack)64 static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
65                               u32 &tid, u32 &stack) {
66   u64 context = atomic_load(atomic_context, memory_order_relaxed);
67   stack = context;
68   context >>= 32;
69   tid = context;
70 }
71 
72 // The memory chunk allocated from the underlying allocator looks like this:
73 // L L L L L L H H U U U U U U R R
74 //   L -- left redzone words (0 or more bytes)
75 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
76 //   U -- user memory.
77 //   R -- right redzone (0 or more bytes)
78 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
79 // memory.
80 
81 // If the left redzone is greater than the ChunkHeader size we store a magic
82 // value in the first uptr word of the memory block and store the address of
83 // ChunkBase in the next uptr.
84 // M B L L L L L L L L L  H H U U U U U U
85 //   |                    ^
86 //   ---------------------|
87 //   M -- magic value kAllocBegMagic
88 //   B -- address of ChunkHeader pointing to the first 'H'
89 
90 class ChunkHeader {
91  public:
92   atomic_uint8_t chunk_state;
93   u8 alloc_type : 2;
94   u8 lsan_tag : 2;
95 
96   // align < 8 -> 0
97   // else      -> log2(min(align, 512)) - 2
98   u8 user_requested_alignment_log : 3;
99 
100  private:
101   u16 user_requested_size_hi;
102   u32 user_requested_size_lo;
103   atomic_uint64_t alloc_context_id;
104 
105  public:
UsedSize() const106   uptr UsedSize() const {
107     static_assert(sizeof(user_requested_size_lo) == 4,
108                   "Expression below requires this");
109     return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
110            user_requested_size_lo;
111   }
112 
SetUsedSize(uptr size)113   void SetUsedSize(uptr size) {
114     user_requested_size_lo = size;
115     static_assert(sizeof(user_requested_size_lo) == 4,
116                   "Expression below requires this");
117     user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
118     CHECK_EQ(UsedSize(), size);
119   }
120 
SetAllocContext(u32 tid,u32 stack)121   void SetAllocContext(u32 tid, u32 stack) {
122     AtomicContextStore(&alloc_context_id, tid, stack);
123   }
124 
GetAllocContext(u32 & tid,u32 & stack) const125   void GetAllocContext(u32 &tid, u32 &stack) const {
126     AtomicContextLoad(&alloc_context_id, tid, stack);
127   }
128 };
129 
130 class ChunkBase : public ChunkHeader {
131   atomic_uint64_t free_context_id;
132 
133  public:
SetFreeContext(u32 tid,u32 stack)134   void SetFreeContext(u32 tid, u32 stack) {
135     AtomicContextStore(&free_context_id, tid, stack);
136   }
137 
GetFreeContext(u32 & tid,u32 & stack) const138   void GetFreeContext(u32 &tid, u32 &stack) const {
139     AtomicContextLoad(&free_context_id, tid, stack);
140   }
141 };
142 
143 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
144 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
145 COMPILER_CHECK(kChunkHeaderSize == 16);
146 COMPILER_CHECK(kChunkHeader2Size <= 16);
147 
148 enum {
149   // Either just allocated by underlying allocator, but AsanChunk is not yet
150   // ready, or almost returned to undelying allocator and AsanChunk is already
151   // meaningless.
152   CHUNK_INVALID = 0,
153   // The chunk is allocated and not yet freed.
154   CHUNK_ALLOCATED = 2,
155   // The chunk was freed and put into quarantine zone.
156   CHUNK_QUARANTINE = 3,
157 };
158 
159 class AsanChunk : public ChunkBase {
160  public:
Beg()161   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
AddrIsInside(uptr addr)162   bool AddrIsInside(uptr addr) {
163     return (addr >= Beg()) && (addr < Beg() + UsedSize());
164   }
165 };
166 
167 class LargeChunkHeader {
168   static constexpr uptr kAllocBegMagic =
169       FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
170   atomic_uintptr_t magic;
171   AsanChunk *chunk_header;
172 
173  public:
Get() const174   AsanChunk *Get() const {
175     return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
176                ? chunk_header
177                : nullptr;
178   }
179 
Set(AsanChunk * p)180   void Set(AsanChunk *p) {
181     if (p) {
182       chunk_header = p;
183       atomic_store(&magic, kAllocBegMagic, memory_order_release);
184       return;
185     }
186 
187     uptr old = kAllocBegMagic;
188     if (!atomic_compare_exchange_strong(&magic, &old, 0,
189                                         memory_order_release)) {
190       CHECK_EQ(old, kAllocBegMagic);
191     }
192   }
193 };
194 
FillChunk(AsanChunk * m)195 static void FillChunk(AsanChunk *m) {
196   // FIXME: Use ReleaseMemoryPagesToOS.
197   Flags &fl = *flags();
198 
199   if (fl.max_free_fill_size > 0) {
200     // We have to skip the chunk header, it contains free_context_id.
201     uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
202     if (m->UsedSize() >= kChunkHeader2Size) {  // Skip Header2 in user area.
203       uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
204       size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
205       REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
206     }
207   }
208 }
209 
210 struct QuarantineCallback {
QuarantineCallback__asan::QuarantineCallback211   QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
212       : cache_(cache),
213         stack_(stack) {
214   }
215 
PreQuarantine__asan::QuarantineCallback216   void PreQuarantine(AsanChunk *m) const {
217     FillChunk(m);
218     // Poison the region.
219     PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
220                  kAsanHeapFreeMagic);
221   }
222 
Recycle__asan::QuarantineCallback223   void Recycle(AsanChunk *m) const {
224     void *p = get_allocator().GetBlockBegin(m);
225 
226     // The secondary will immediately unpoison and unmap the memory, so this
227     // branch is unnecessary.
228     if (get_allocator().FromPrimary(p)) {
229       if (p != m) {
230         // Clear the magic value, as allocator internals may overwrite the
231         // contents of deallocated chunk, confusing GetAsanChunk lookup.
232         reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
233       }
234 
235       u8 old_chunk_state = CHUNK_QUARANTINE;
236       if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
237                                           CHUNK_INVALID,
238                                           memory_order_acquire)) {
239         CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
240       }
241 
242       PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
243                    kAsanHeapLeftRedzoneMagic);
244     }
245 
246     // Statistics.
247     AsanStats &thread_stats = GetCurrentThreadStats();
248     thread_stats.real_frees++;
249     thread_stats.really_freed += m->UsedSize();
250 
251     get_allocator().Deallocate(cache_, p);
252   }
253 
RecyclePassThrough__asan::QuarantineCallback254   void RecyclePassThrough(AsanChunk *m) const {
255     // Recycle for the secondary will immediately unpoison and unmap the
256     // memory, so quarantine preparation is unnecessary.
257     if (get_allocator().FromPrimary(m)) {
258       // The primary allocation may need pattern fill if enabled.
259       FillChunk(m);
260     }
261     Recycle(m);
262   }
263 
Allocate__asan::QuarantineCallback264   void *Allocate(uptr size) const {
265     void *res = get_allocator().Allocate(cache_, size, 1);
266     // TODO(alekseys): Consider making quarantine OOM-friendly.
267     if (UNLIKELY(!res))
268       ReportOutOfMemory(size, stack_);
269     return res;
270   }
271 
Deallocate__asan::QuarantineCallback272   void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); }
273 
274  private:
275   AllocatorCache* const cache_;
276   BufferedStackTrace* const stack_;
277 };
278 
279 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
280 typedef AsanQuarantine::Cache QuarantineCache;
281 
OnMap(uptr p,uptr size) const282 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
283   PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
284   // Statistics.
285   AsanStats &thread_stats = GetCurrentThreadStats();
286   thread_stats.mmaps++;
287   thread_stats.mmaped += size;
288 }
289 
OnMapSecondary(uptr p,uptr size,uptr user_begin,uptr user_size) const290 void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin,
291                                           uptr user_size) const {
292   uptr user_end = RoundDownTo(user_begin + user_size, ASAN_SHADOW_GRANULARITY);
293   user_begin = RoundUpTo(user_begin, ASAN_SHADOW_GRANULARITY);
294   // The secondary mapping will be immediately returned to user, no value
295   // poisoning that with non-zero just before unpoisoning by Allocate(). So just
296   // poison head/tail invisible to Allocate().
297   PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic);
298   PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic);
299   // Statistics.
300   AsanStats &thread_stats = GetCurrentThreadStats();
301   thread_stats.mmaps++;
302   thread_stats.mmaped += size;
303 }
304 
OnUnmap(uptr p,uptr size) const305 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
306   PoisonShadow(p, size, 0);
307   // We are about to unmap a chunk of user memory.
308   // Mark the corresponding shadow memory as not needed.
309   FlushUnneededASanShadowMemory(p, size);
310   // Statistics.
311   AsanStats &thread_stats = GetCurrentThreadStats();
312   thread_stats.munmaps++;
313   thread_stats.munmaped += size;
314 }
315 
316 // We can not use THREADLOCAL because it is not supported on some of the
317 // platforms we care about (OSX 10.6, Android).
318 // static THREADLOCAL AllocatorCache cache;
GetAllocatorCache(AsanThreadLocalMallocStorage * ms)319 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
320   CHECK(ms);
321   return &ms->allocator_cache;
322 }
323 
GetQuarantineCache(AsanThreadLocalMallocStorage * ms)324 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
325   CHECK(ms);
326   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
327   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
328 }
329 
SetFrom(const Flags * f,const CommonFlags * cf)330 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
331   quarantine_size_mb = f->quarantine_size_mb;
332   thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
333   min_redzone = f->redzone;
334   max_redzone = f->max_redzone;
335   may_return_null = cf->allocator_may_return_null;
336   alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
337   release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
338 }
339 
CopyTo(Flags * f,CommonFlags * cf)340 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
341   f->quarantine_size_mb = quarantine_size_mb;
342   f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
343   f->redzone = min_redzone;
344   f->max_redzone = max_redzone;
345   cf->allocator_may_return_null = may_return_null;
346   f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
347   cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
348 }
349 
350 struct Allocator {
351   static const uptr kMaxAllowedMallocSize =
352       FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
353 
354   AsanAllocator allocator;
355   AsanQuarantine quarantine;
356   StaticSpinMutex fallback_mutex;
357   AllocatorCache fallback_allocator_cache;
358   QuarantineCache fallback_quarantine_cache;
359 
360   uptr max_user_defined_malloc_size;
361 
362   // ------------------- Options --------------------------
363   atomic_uint16_t min_redzone;
364   atomic_uint16_t max_redzone;
365   atomic_uint8_t alloc_dealloc_mismatch;
366 
367   // ------------------- Initialization ------------------------
Allocator__asan::Allocator368   explicit Allocator(LinkerInitialized)
369       : quarantine(LINKER_INITIALIZED),
370         fallback_quarantine_cache(LINKER_INITIALIZED) {}
371 
CheckOptions__asan::Allocator372   void CheckOptions(const AllocatorOptions &options) const {
373     CHECK_GE(options.min_redzone, 16);
374     CHECK_GE(options.max_redzone, options.min_redzone);
375     CHECK_LE(options.max_redzone, 2048);
376     CHECK(IsPowerOfTwo(options.min_redzone));
377     CHECK(IsPowerOfTwo(options.max_redzone));
378   }
379 
SharedInitCode__asan::Allocator380   void SharedInitCode(const AllocatorOptions &options) {
381     CheckOptions(options);
382     quarantine.Init((uptr)options.quarantine_size_mb << 20,
383                     (uptr)options.thread_local_quarantine_size_kb << 10);
384     atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
385                  memory_order_release);
386     atomic_store(&min_redzone, options.min_redzone, memory_order_release);
387     atomic_store(&max_redzone, options.max_redzone, memory_order_release);
388   }
389 
InitLinkerInitialized__asan::Allocator390   void InitLinkerInitialized(const AllocatorOptions &options) {
391     SetAllocatorMayReturnNull(options.may_return_null);
392     allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
393     SharedInitCode(options);
394     max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
395                                        ? common_flags()->max_allocation_size_mb
396                                              << 20
397                                        : kMaxAllowedMallocSize;
398   }
399 
RePoisonChunk__asan::Allocator400   void RePoisonChunk(uptr chunk) {
401     // This could be a user-facing chunk (with redzones), or some internal
402     // housekeeping chunk, like TransferBatch. Start by assuming the former.
403     AsanChunk *ac = GetAsanChunk((void *)chunk);
404     uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
405     if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
406                   CHUNK_ALLOCATED) {
407       uptr beg = ac->Beg();
408       uptr end = ac->Beg() + ac->UsedSize();
409       uptr chunk_end = chunk + allocated_size;
410       if (chunk < beg && beg < end && end <= chunk_end) {
411         // Looks like a valid AsanChunk in use, poison redzones only.
412         PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
413         uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
414         FastPoisonShadowPartialRightRedzone(
415             end_aligned_down, end - end_aligned_down,
416             chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
417         return;
418       }
419     }
420 
421     // This is either not an AsanChunk or freed or quarantined AsanChunk.
422     // In either case, poison everything.
423     PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
424   }
425 
ReInitialize__asan::Allocator426   void ReInitialize(const AllocatorOptions &options) {
427     SetAllocatorMayReturnNull(options.may_return_null);
428     allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
429     SharedInitCode(options);
430 
431     // Poison all existing allocation's redzones.
432     if (CanPoisonMemory()) {
433       allocator.ForceLock();
434       allocator.ForEachChunk(
435           [](uptr chunk, void *alloc) {
436             ((Allocator *)alloc)->RePoisonChunk(chunk);
437           },
438           this);
439       allocator.ForceUnlock();
440     }
441   }
442 
GetOptions__asan::Allocator443   void GetOptions(AllocatorOptions *options) const {
444     options->quarantine_size_mb = quarantine.GetMaxSize() >> 20;
445     options->thread_local_quarantine_size_kb =
446         quarantine.GetMaxCacheSize() >> 10;
447     options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
448     options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
449     options->may_return_null = AllocatorMayReturnNull();
450     options->alloc_dealloc_mismatch =
451         atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
452     options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
453   }
454 
455   // -------------------- Helper methods. -------------------------
ComputeRZLog__asan::Allocator456   uptr ComputeRZLog(uptr user_requested_size) {
457     u32 rz_log = user_requested_size <= 64 - 16            ? 0
458                  : user_requested_size <= 128 - 32         ? 1
459                  : user_requested_size <= 512 - 64         ? 2
460                  : user_requested_size <= 4096 - 128       ? 3
461                  : user_requested_size <= (1 << 14) - 256  ? 4
462                  : user_requested_size <= (1 << 15) - 512  ? 5
463                  : user_requested_size <= (1 << 16) - 1024 ? 6
464                                                            : 7;
465     u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
466     u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
467     u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
468     return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
469   }
470 
ComputeUserRequestedAlignmentLog__asan::Allocator471   static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
472     if (user_requested_alignment < 8)
473       return 0;
474     if (user_requested_alignment > 512)
475       user_requested_alignment = 512;
476     return Log2(user_requested_alignment) - 2;
477   }
478 
ComputeUserAlignment__asan::Allocator479   static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
480     if (user_requested_alignment_log == 0)
481       return 0;
482     return 1LL << (user_requested_alignment_log + 2);
483   }
484 
485   // We have an address between two chunks, and we want to report just one.
ChooseChunk__asan::Allocator486   AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
487                          AsanChunk *right_chunk) {
488     if (!left_chunk)
489       return right_chunk;
490     if (!right_chunk)
491       return left_chunk;
492     // Prefer an allocated chunk over freed chunk and freed chunk
493     // over available chunk.
494     u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
495     u8 right_state =
496         atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
497     if (left_state != right_state) {
498       if (left_state == CHUNK_ALLOCATED)
499         return left_chunk;
500       if (right_state == CHUNK_ALLOCATED)
501         return right_chunk;
502       if (left_state == CHUNK_QUARANTINE)
503         return left_chunk;
504       if (right_state == CHUNK_QUARANTINE)
505         return right_chunk;
506     }
507     // Same chunk_state: choose based on offset.
508     sptr l_offset = 0, r_offset = 0;
509     CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
510     CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
511     if (l_offset < r_offset)
512       return left_chunk;
513     return right_chunk;
514   }
515 
UpdateAllocationStack__asan::Allocator516   bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
517     AsanChunk *m = GetAsanChunkByAddr(addr);
518     if (!m) return false;
519     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
520       return false;
521     if (m->Beg() != addr) return false;
522     AsanThread *t = GetCurrentThread();
523     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
524     return true;
525   }
526 
527   // -------------------- Allocation/Deallocation routines ---------------
Allocate__asan::Allocator528   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
529                  AllocType alloc_type, bool can_fill) {
530     if (UNLIKELY(!AsanInited()))
531       AsanInitFromRtl();
532     if (UNLIKELY(IsRssLimitExceeded())) {
533       if (AllocatorMayReturnNull())
534         return nullptr;
535       ReportRssLimitExceeded(stack);
536     }
537     Flags &fl = *flags();
538     CHECK(stack);
539     const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
540     const uptr user_requested_alignment_log =
541         ComputeUserRequestedAlignmentLog(alignment);
542     if (alignment < min_alignment)
543       alignment = min_alignment;
544     if (size == 0) {
545       // We'd be happy to avoid allocating memory for zero-size requests, but
546       // some programs/tests depend on this behavior and assume that malloc
547       // would not return NULL even for zero-size allocations. Moreover, it
548       // looks like operator new should never return NULL, and results of
549       // consecutive "new" calls must be different even if the allocated size
550       // is zero.
551       size = 1;
552     }
553     CHECK(IsPowerOfTwo(alignment));
554     uptr rz_log = ComputeRZLog(size);
555     uptr rz_size = RZLog2Size(rz_log);
556     uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
557     uptr needed_size = rounded_size + rz_size;
558     if (alignment > min_alignment)
559       needed_size += alignment;
560     bool from_primary = PrimaryAllocator::CanAllocate(needed_size, alignment);
561     // If we are allocating from the secondary allocator, there will be no
562     // automatic right redzone, so add the right redzone manually.
563     if (!from_primary)
564       needed_size += rz_size;
565     CHECK(IsAligned(needed_size, min_alignment));
566     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
567         size > max_user_defined_malloc_size) {
568       if (AllocatorMayReturnNull()) {
569         Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
570                size);
571         return nullptr;
572       }
573       uptr malloc_limit =
574           Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
575       ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
576     }
577 
578     AsanThread *t = GetCurrentThread();
579     void *allocated;
580     if (t) {
581       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
582       allocated = allocator.Allocate(cache, needed_size, 8);
583     } else {
584       SpinMutexLock l(&fallback_mutex);
585       AllocatorCache *cache = &fallback_allocator_cache;
586       allocated = allocator.Allocate(cache, needed_size, 8);
587     }
588     if (UNLIKELY(!allocated)) {
589       SetAllocatorOutOfMemory();
590       if (AllocatorMayReturnNull())
591         return nullptr;
592       ReportOutOfMemory(size, stack);
593     }
594 
595     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
596     uptr alloc_end = alloc_beg + needed_size;
597     uptr user_beg = alloc_beg + rz_size;
598     if (!IsAligned(user_beg, alignment))
599       user_beg = RoundUpTo(user_beg, alignment);
600     uptr user_end = user_beg + size;
601     CHECK_LE(user_end, alloc_end);
602     uptr chunk_beg = user_beg - kChunkHeaderSize;
603     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
604     m->alloc_type = alloc_type;
605     CHECK(size);
606     m->SetUsedSize(size);
607     m->user_requested_alignment_log = user_requested_alignment_log;
608 
609     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
610 
611     if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) {
612       // The allocator provides an unpoisoned chunk. This is possible for the
613       // secondary allocator, or if CanPoisonMemory() was false for some time,
614       // for example, due to flags()->start_disabled. Anyway, poison left and
615       // right of the block before using it for anything else.
616       uptr tail_beg = RoundUpTo(user_end, ASAN_SHADOW_GRANULARITY);
617       uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(allocated);
618       PoisonShadow(alloc_beg, user_beg - alloc_beg, kAsanHeapLeftRedzoneMagic);
619       PoisonShadow(tail_beg, tail_end - tail_beg, kAsanHeapLeftRedzoneMagic);
620     }
621 
622     uptr size_rounded_down_to_granularity =
623         RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
624     // Unpoison the bulk of the memory region.
625     if (size_rounded_down_to_granularity)
626       PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
627     // Deal with the end of the region if size is not aligned to granularity.
628     if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
629       u8 *shadow =
630           (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
631       *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
632     }
633 
634     AsanStats &thread_stats = GetCurrentThreadStats();
635     thread_stats.mallocs++;
636     thread_stats.malloced += size;
637     thread_stats.malloced_redzones += needed_size - size;
638     if (needed_size > SizeClassMap::kMaxSize)
639       thread_stats.malloc_large++;
640     else
641       thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
642 
643     void *res = reinterpret_cast<void *>(user_beg);
644     if (can_fill && fl.max_malloc_fill_size) {
645       uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
646       REAL(memset)(res, fl.malloc_fill_byte, fill_size);
647     }
648 #if CAN_SANITIZE_LEAKS
649     m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
650                                                  : __lsan::kDirectlyLeaked;
651 #endif
652     // Must be the last mutation of metadata in this function.
653     atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
654     if (alloc_beg != chunk_beg) {
655       CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
656       reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
657     }
658     RunMallocHooks(res, size);
659     return res;
660   }
661 
662   // Set quarantine flag if chunk is allocated, issue ASan error report on
663   // available and quarantined chunks. Return true on success, false otherwise.
AtomicallySetQuarantineFlagIfAllocated__asan::Allocator664   bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
665                                               BufferedStackTrace *stack) {
666     u8 old_chunk_state = CHUNK_ALLOCATED;
667     // Flip the chunk_state atomically to avoid race on double-free.
668     if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
669                                         CHUNK_QUARANTINE,
670                                         memory_order_acquire)) {
671       ReportInvalidFree(ptr, old_chunk_state, stack);
672       // It's not safe to push a chunk in quarantine on invalid free.
673       return false;
674     }
675     CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
676     // It was a user data.
677     m->SetFreeContext(kInvalidTid, 0);
678     return true;
679   }
680 
681   // Expects the chunk to already be marked as quarantined by using
682   // AtomicallySetQuarantineFlagIfAllocated.
QuarantineChunk__asan::Allocator683   void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
684     CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
685              CHUNK_QUARANTINE);
686     AsanThread *t = GetCurrentThread();
687     m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
688 
689     // Push into quarantine.
690     if (t) {
691       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
692       AllocatorCache *ac = GetAllocatorCache(ms);
693       quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
694                      m->UsedSize());
695     } else {
696       SpinMutexLock l(&fallback_mutex);
697       AllocatorCache *ac = &fallback_allocator_cache;
698       quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
699                      m, m->UsedSize());
700     }
701   }
702 
Deallocate__asan::Allocator703   void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
704                   BufferedStackTrace *stack, AllocType alloc_type) {
705     uptr p = reinterpret_cast<uptr>(ptr);
706     if (p == 0) return;
707 
708     uptr chunk_beg = p - kChunkHeaderSize;
709     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
710 
711     // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
712     // malloc. Don't report an invalid free in this case.
713     if (SANITIZER_WINDOWS &&
714         !get_allocator().PointerIsMine(ptr)) {
715       if (!IsSystemHeapAddress(p))
716         ReportFreeNotMalloced(p, stack);
717       return;
718     }
719 
720     RunFreeHooks(ptr);
721 
722     // Must mark the chunk as quarantined before any changes to its metadata.
723     // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
724     if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
725 
726     if (m->alloc_type != alloc_type) {
727       if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
728         ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
729                                 (AllocType)alloc_type);
730       }
731     } else {
732       if (flags()->new_delete_type_mismatch &&
733           (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
734           ((delete_size && delete_size != m->UsedSize()) ||
735            ComputeUserRequestedAlignmentLog(delete_alignment) !=
736                m->user_requested_alignment_log)) {
737         ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
738       }
739     }
740 
741     AsanStats &thread_stats = GetCurrentThreadStats();
742     thread_stats.frees++;
743     thread_stats.freed += m->UsedSize();
744 
745     QuarantineChunk(m, ptr, stack);
746   }
747 
Reallocate__asan::Allocator748   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
749     CHECK(old_ptr && new_size);
750     uptr p = reinterpret_cast<uptr>(old_ptr);
751     uptr chunk_beg = p - kChunkHeaderSize;
752     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
753 
754     AsanStats &thread_stats = GetCurrentThreadStats();
755     thread_stats.reallocs++;
756     thread_stats.realloced += new_size;
757 
758     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
759     if (new_ptr) {
760       u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
761       if (chunk_state != CHUNK_ALLOCATED)
762         ReportInvalidFree(old_ptr, chunk_state, stack);
763       CHECK_NE(REAL(memcpy), nullptr);
764       uptr memcpy_size = Min(new_size, m->UsedSize());
765       // If realloc() races with free(), we may start copying freed memory.
766       // However, we will report racy double-free later anyway.
767       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
768       Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
769     }
770     return new_ptr;
771   }
772 
Calloc__asan::Allocator773   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
774     if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
775       if (AllocatorMayReturnNull())
776         return nullptr;
777       ReportCallocOverflow(nmemb, size, stack);
778     }
779     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
780     // If the memory comes from the secondary allocator no need to clear it
781     // as it comes directly from mmap.
782     if (ptr && allocator.FromPrimary(ptr))
783       REAL(memset)(ptr, 0, nmemb * size);
784     return ptr;
785   }
786 
ReportInvalidFree__asan::Allocator787   void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
788     if (chunk_state == CHUNK_QUARANTINE)
789       ReportDoubleFree((uptr)ptr, stack);
790     else
791       ReportFreeNotMalloced((uptr)ptr, stack);
792   }
793 
CommitBack__asan::Allocator794   void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
795     AllocatorCache *ac = GetAllocatorCache(ms);
796     quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
797     allocator.SwallowCache(ac);
798   }
799 
800   // -------------------------- Chunk lookup ----------------------
801 
802   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
803   // Returns nullptr if AsanChunk is not yet initialized just after
804   // get_allocator().Allocate(), or is being destroyed just before
805   // get_allocator().Deallocate().
GetAsanChunk__asan::Allocator806   AsanChunk *GetAsanChunk(void *alloc_beg) {
807     if (!alloc_beg)
808       return nullptr;
809     AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
810     if (!p) {
811       if (!allocator.FromPrimary(alloc_beg))
812         return nullptr;
813       p = reinterpret_cast<AsanChunk *>(alloc_beg);
814     }
815     u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
816     // It does not guaranty that Chunk is initialized, but it's
817     // definitely not for any other value.
818     if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
819       return p;
820     return nullptr;
821   }
822 
GetAsanChunkByAddr__asan::Allocator823   AsanChunk *GetAsanChunkByAddr(uptr p) {
824     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
825     return GetAsanChunk(alloc_beg);
826   }
827 
828   // Allocator must be locked when this function is called.
GetAsanChunkByAddrFastLocked__asan::Allocator829   AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
830     void *alloc_beg =
831         allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
832     return GetAsanChunk(alloc_beg);
833   }
834 
AllocationSize__asan::Allocator835   uptr AllocationSize(uptr p) {
836     AsanChunk *m = GetAsanChunkByAddr(p);
837     if (!m) return 0;
838     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
839       return 0;
840     if (m->Beg() != p) return 0;
841     return m->UsedSize();
842   }
843 
AllocationSizeFast__asan::Allocator844   uptr AllocationSizeFast(uptr p) {
845     return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize();
846   }
847 
FindHeapChunkByAddress__asan::Allocator848   AsanChunkView FindHeapChunkByAddress(uptr addr) {
849     AsanChunk *m1 = GetAsanChunkByAddr(addr);
850     sptr offset = 0;
851     if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
852       // The address is in the chunk's left redzone, so maybe it is actually
853       // a right buffer overflow from the other chunk before.
854       // Search a bit before to see if there is another chunk.
855       AsanChunk *m2 = nullptr;
856       for (uptr l = 1; l < GetPageSizeCached(); l++) {
857         m2 = GetAsanChunkByAddr(addr - l);
858         if (m2 == m1) continue;  // Still the same chunk.
859         break;
860       }
861       if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
862         m1 = ChooseChunk(addr, m2, m1);
863     }
864     return AsanChunkView(m1);
865   }
866 
Purge__asan::Allocator867   void Purge(BufferedStackTrace *stack) {
868     AsanThread *t = GetCurrentThread();
869     if (t) {
870       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
871       quarantine.DrainAndRecycle(GetQuarantineCache(ms),
872                                  QuarantineCallback(GetAllocatorCache(ms),
873                                                     stack));
874     }
875     {
876       SpinMutexLock l(&fallback_mutex);
877       quarantine.DrainAndRecycle(&fallback_quarantine_cache,
878                                  QuarantineCallback(&fallback_allocator_cache,
879                                                     stack));
880     }
881 
882     allocator.ForceReleaseToOS();
883   }
884 
PrintStats__asan::Allocator885   void PrintStats() {
886     allocator.PrintStats();
887     quarantine.PrintStats();
888   }
889 
ForceLock__asan::Allocator890   void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
891     allocator.ForceLock();
892     fallback_mutex.Lock();
893   }
894 
ForceUnlock__asan::Allocator895   void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
896     fallback_mutex.Unlock();
897     allocator.ForceUnlock();
898   }
899 };
900 
901 static Allocator instance(LINKER_INITIALIZED);
902 
get_allocator()903 static AsanAllocator &get_allocator() {
904   return instance.allocator;
905 }
906 
IsValid() const907 bool AsanChunkView::IsValid() const {
908   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
909                        CHUNK_INVALID;
910 }
IsAllocated() const911 bool AsanChunkView::IsAllocated() const {
912   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
913                        CHUNK_ALLOCATED;
914 }
IsQuarantined() const915 bool AsanChunkView::IsQuarantined() const {
916   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
917                        CHUNK_QUARANTINE;
918 }
Beg() const919 uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
End() const920 uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
UsedSize() const921 uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
UserRequestedAlignment() const922 u32 AsanChunkView::UserRequestedAlignment() const {
923   return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
924 }
925 
AllocTid() const926 uptr AsanChunkView::AllocTid() const {
927   u32 tid = 0;
928   u32 stack = 0;
929   chunk_->GetAllocContext(tid, stack);
930   return tid;
931 }
932 
FreeTid() const933 uptr AsanChunkView::FreeTid() const {
934   if (!IsQuarantined())
935     return kInvalidTid;
936   u32 tid = 0;
937   u32 stack = 0;
938   chunk_->GetFreeContext(tid, stack);
939   return tid;
940 }
941 
GetAllocType() const942 AllocType AsanChunkView::GetAllocType() const {
943   return (AllocType)chunk_->alloc_type;
944 }
945 
GetAllocStackId() const946 u32 AsanChunkView::GetAllocStackId() const {
947   u32 tid = 0;
948   u32 stack = 0;
949   chunk_->GetAllocContext(tid, stack);
950   return stack;
951 }
952 
GetFreeStackId() const953 u32 AsanChunkView::GetFreeStackId() const {
954   if (!IsQuarantined())
955     return 0;
956   u32 tid = 0;
957   u32 stack = 0;
958   chunk_->GetFreeContext(tid, stack);
959   return stack;
960 }
961 
InitializeAllocator(const AllocatorOptions & options)962 void InitializeAllocator(const AllocatorOptions &options) {
963   instance.InitLinkerInitialized(options);
964 }
965 
ReInitializeAllocator(const AllocatorOptions & options)966 void ReInitializeAllocator(const AllocatorOptions &options) {
967   instance.ReInitialize(options);
968 }
969 
GetAllocatorOptions(AllocatorOptions * options)970 void GetAllocatorOptions(AllocatorOptions *options) {
971   instance.GetOptions(options);
972 }
973 
FindHeapChunkByAddress(uptr addr)974 AsanChunkView FindHeapChunkByAddress(uptr addr) {
975   return instance.FindHeapChunkByAddress(addr);
976 }
FindHeapChunkByAllocBeg(uptr addr)977 AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
978   return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
979 }
980 
CommitBack()981 void AsanThreadLocalMallocStorage::CommitBack() {
982   GET_STACK_TRACE_MALLOC;
983   instance.CommitBack(this, &stack);
984 }
985 
PrintInternalAllocatorStats()986 void PrintInternalAllocatorStats() {
987   instance.PrintStats();
988 }
989 
asan_free(void * ptr,BufferedStackTrace * stack,AllocType alloc_type)990 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
991   instance.Deallocate(ptr, 0, 0, stack, alloc_type);
992 }
993 
asan_delete(void * ptr,uptr size,uptr alignment,BufferedStackTrace * stack,AllocType alloc_type)994 void asan_delete(void *ptr, uptr size, uptr alignment,
995                  BufferedStackTrace *stack, AllocType alloc_type) {
996   instance.Deallocate(ptr, size, alignment, stack, alloc_type);
997 }
998 
asan_malloc(uptr size,BufferedStackTrace * stack)999 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
1000   return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
1001 }
1002 
asan_calloc(uptr nmemb,uptr size,BufferedStackTrace * stack)1003 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
1004   return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
1005 }
1006 
asan_reallocarray(void * p,uptr nmemb,uptr size,BufferedStackTrace * stack)1007 void *asan_reallocarray(void *p, uptr nmemb, uptr size,
1008                         BufferedStackTrace *stack) {
1009   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
1010     errno = errno_ENOMEM;
1011     if (AllocatorMayReturnNull())
1012       return nullptr;
1013     ReportReallocArrayOverflow(nmemb, size, stack);
1014   }
1015   return asan_realloc(p, nmemb * size, stack);
1016 }
1017 
asan_realloc(void * p,uptr size,BufferedStackTrace * stack)1018 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
1019   if (!p)
1020     return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
1021   if (size == 0) {
1022     if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
1023       instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
1024       return nullptr;
1025     }
1026     // Allocate a size of 1 if we shouldn't free() on Realloc to 0
1027     size = 1;
1028   }
1029   return SetErrnoOnNull(instance.Reallocate(p, size, stack));
1030 }
1031 
asan_valloc(uptr size,BufferedStackTrace * stack)1032 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
1033   return SetErrnoOnNull(
1034       instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
1035 }
1036 
asan_pvalloc(uptr size,BufferedStackTrace * stack)1037 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
1038   uptr PageSize = GetPageSizeCached();
1039   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
1040     errno = errno_ENOMEM;
1041     if (AllocatorMayReturnNull())
1042       return nullptr;
1043     ReportPvallocOverflow(size, stack);
1044   }
1045   // pvalloc(0) should allocate one page.
1046   size = size ? RoundUpTo(size, PageSize) : PageSize;
1047   return SetErrnoOnNull(
1048       instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
1049 }
1050 
asan_memalign(uptr alignment,uptr size,BufferedStackTrace * stack,AllocType alloc_type)1051 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
1052                     AllocType alloc_type) {
1053   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
1054     errno = errno_EINVAL;
1055     if (AllocatorMayReturnNull())
1056       return nullptr;
1057     ReportInvalidAllocationAlignment(alignment, stack);
1058   }
1059   return SetErrnoOnNull(
1060       instance.Allocate(size, alignment, stack, alloc_type, true));
1061 }
1062 
asan_aligned_alloc(uptr alignment,uptr size,BufferedStackTrace * stack)1063 void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
1064   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
1065     errno = errno_EINVAL;
1066     if (AllocatorMayReturnNull())
1067       return nullptr;
1068     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
1069   }
1070   return SetErrnoOnNull(
1071       instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
1072 }
1073 
asan_posix_memalign(void ** memptr,uptr alignment,uptr size,BufferedStackTrace * stack)1074 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
1075                         BufferedStackTrace *stack) {
1076   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
1077     if (AllocatorMayReturnNull())
1078       return errno_EINVAL;
1079     ReportInvalidPosixMemalignAlignment(alignment, stack);
1080   }
1081   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
1082   if (UNLIKELY(!ptr))
1083     // OOM error is already taken care of by Allocate.
1084     return errno_ENOMEM;
1085   CHECK(IsAligned((uptr)ptr, alignment));
1086   *memptr = ptr;
1087   return 0;
1088 }
1089 
asan_malloc_usable_size(const void * ptr,uptr pc,uptr bp)1090 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
1091   if (!ptr) return 0;
1092   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1093   if (flags()->check_malloc_usable_size && (usable_size == 0)) {
1094     GET_STACK_TRACE_FATAL(pc, bp);
1095     ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
1096   }
1097   return usable_size;
1098 }
1099 
asan_mz_size(const void * ptr)1100 uptr asan_mz_size(const void *ptr) {
1101   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1102 }
1103 
asan_mz_force_lock()1104 void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1105   instance.ForceLock();
1106 }
1107 
asan_mz_force_unlock()1108 void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1109   instance.ForceUnlock();
1110 }
1111 
1112 }  // namespace __asan
1113 
1114 // --- Implementation of LSan-specific functions --- {{{1
1115 namespace __lsan {
LockAllocator()1116 void LockAllocator() {
1117   __asan::get_allocator().ForceLock();
1118 }
1119 
UnlockAllocator()1120 void UnlockAllocator() {
1121   __asan::get_allocator().ForceUnlock();
1122 }
1123 
GetAllocatorGlobalRange(uptr * begin,uptr * end)1124 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1125   *begin = (uptr)&__asan::get_allocator();
1126   *end = *begin + sizeof(__asan::get_allocator());
1127 }
1128 
PointsIntoChunk(void * p)1129 uptr PointsIntoChunk(void *p) {
1130   uptr addr = reinterpret_cast<uptr>(p);
1131   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1132   if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
1133                 __asan::CHUNK_ALLOCATED)
1134     return 0;
1135   uptr chunk = m->Beg();
1136   if (m->AddrIsInside(addr))
1137     return chunk;
1138   if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
1139     return chunk;
1140   return 0;
1141 }
1142 
GetUserBegin(uptr chunk)1143 uptr GetUserBegin(uptr chunk) {
1144   // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
1145   // not needed.
1146   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1147   return m ? m->Beg() : 0;
1148 }
1149 
GetUserAddr(uptr chunk)1150 uptr GetUserAddr(uptr chunk) {
1151   return chunk;
1152 }
1153 
LsanMetadata(uptr chunk)1154 LsanMetadata::LsanMetadata(uptr chunk) {
1155   metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
1156                     : nullptr;
1157 }
1158 
allocated() const1159 bool LsanMetadata::allocated() const {
1160   if (!metadata_)
1161     return false;
1162   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1163   return atomic_load(&m->chunk_state, memory_order_relaxed) ==
1164          __asan::CHUNK_ALLOCATED;
1165 }
1166 
tag() const1167 ChunkTag LsanMetadata::tag() const {
1168   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1169   return static_cast<ChunkTag>(m->lsan_tag);
1170 }
1171 
set_tag(ChunkTag value)1172 void LsanMetadata::set_tag(ChunkTag value) {
1173   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1174   m->lsan_tag = value;
1175 }
1176 
requested_size() const1177 uptr LsanMetadata::requested_size() const {
1178   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1179   return m->UsedSize();
1180 }
1181 
stack_trace_id() const1182 u32 LsanMetadata::stack_trace_id() const {
1183   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1184   u32 tid = 0;
1185   u32 stack = 0;
1186   m->GetAllocContext(tid, stack);
1187   return stack;
1188 }
1189 
ForEachChunk(ForEachChunkCallback callback,void * arg)1190 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1191   __asan::get_allocator().ForEachChunk(callback, arg);
1192 }
1193 
IgnoreObject(const void * p)1194 IgnoreObjectResult IgnoreObject(const void *p) {
1195   uptr addr = reinterpret_cast<uptr>(p);
1196   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1197   if (!m ||
1198       (atomic_load(&m->chunk_state, memory_order_acquire) !=
1199        __asan::CHUNK_ALLOCATED) ||
1200       !m->AddrIsInside(addr)) {
1201     return kIgnoreObjectInvalid;
1202   }
1203   if (m->lsan_tag == kIgnored)
1204     return kIgnoreObjectAlreadyIgnored;
1205   m->lsan_tag = __lsan::kIgnored;
1206   return kIgnoreObjectSuccess;
1207 }
1208 
1209 }  // namespace __lsan
1210 
1211 // ---------------------- Interface ---------------- {{{1
1212 using namespace __asan;
1213 
AllocationBegin(const void * p)1214 static const void *AllocationBegin(const void *p) {
1215   AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
1216   if (!m)
1217     return nullptr;
1218   if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
1219     return nullptr;
1220   if (m->UsedSize() == 0)
1221     return nullptr;
1222   return (const void *)(m->Beg());
1223 }
1224 
1225 // ASan allocator doesn't reserve extra bytes, so normally we would
1226 // just return "size". We don't want to expose our redzone sizes, etc here.
__sanitizer_get_estimated_allocated_size(uptr size)1227 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1228   return size;
1229 }
1230 
__sanitizer_get_ownership(const void * p)1231 int __sanitizer_get_ownership(const void *p) {
1232   uptr ptr = reinterpret_cast<uptr>(p);
1233   return instance.AllocationSize(ptr) > 0;
1234 }
1235 
__sanitizer_get_allocated_size(const void * p)1236 uptr __sanitizer_get_allocated_size(const void *p) {
1237   if (!p) return 0;
1238   uptr ptr = reinterpret_cast<uptr>(p);
1239   uptr allocated_size = instance.AllocationSize(ptr);
1240   // Die if p is not malloced or if it is already freed.
1241   if (allocated_size == 0) {
1242     GET_STACK_TRACE_FATAL_HERE;
1243     ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1244   }
1245   return allocated_size;
1246 }
1247 
__sanitizer_get_allocated_size_fast(const void * p)1248 uptr __sanitizer_get_allocated_size_fast(const void *p) {
1249   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
1250   uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
1251   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
1252   return ret;
1253 }
1254 
__sanitizer_get_allocated_begin(const void * p)1255 const void *__sanitizer_get_allocated_begin(const void *p) {
1256   return AllocationBegin(p);
1257 }
1258 
__sanitizer_purge_allocator()1259 void __sanitizer_purge_allocator() {
1260   GET_STACK_TRACE_MALLOC;
1261   instance.Purge(&stack);
1262 }
1263 
__asan_update_allocation_context(void * addr)1264 int __asan_update_allocation_context(void* addr) {
1265   GET_STACK_TRACE_MALLOC;
1266   return instance.UpdateAllocationStack((uptr)addr, &stack);
1267 }
1268