1 //===-- asan_allocator2.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of AddressSanitizer, an address sanity checker.
9 //
10 // Implementation of ASan's memory allocator, 2-nd version.
11 // This variant uses the allocator from sanitizer_common, i.e. the one shared
12 // with ThreadSanitizer and MemorySanitizer.
13 //
14 // Status: under development, not enabled by default yet.
15 //===----------------------------------------------------------------------===//
16 #include "asan_allocator.h"
17 #if ASAN_ALLOCATOR_VERSION == 2
18 
19 #include "asan_mapping.h"
20 #include "asan_report.h"
21 #include "asan_thread.h"
22 #include "asan_thread_registry.h"
23 #include "sanitizer_common/sanitizer_allocator.h"
24 #include "sanitizer_common/sanitizer_internal_defs.h"
25 #include "sanitizer_common/sanitizer_list.h"
26 #include "sanitizer_common/sanitizer_stackdepot.h"
27 #include "sanitizer_common/sanitizer_quarantine.h"
28 
29 namespace __asan {
30 
31 struct AsanMapUnmapCallback {
OnMap__asan::AsanMapUnmapCallback32   void OnMap(uptr p, uptr size) const {
33     PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
34     // Statistics.
35     AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
36     thread_stats.mmaps++;
37     thread_stats.mmaped += size;
38   }
OnUnmap__asan::AsanMapUnmapCallback39   void OnUnmap(uptr p, uptr size) const {
40     PoisonShadow(p, size, 0);
41     // We are about to unmap a chunk of user memory.
42     // Mark the corresponding shadow memory as not needed.
43     // Since asan's mapping is compacting, the shadow chunk may be
44     // not page-aligned, so we only flush the page-aligned portion.
45     uptr page_size = GetPageSizeCached();
46     uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
47     uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
48     FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
49     // Statistics.
50     AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
51     thread_stats.munmaps++;
52     thread_stats.munmaped += size;
53   }
54 };
55 
56 #if SANITIZER_WORDSIZE == 64
57 #if defined(__powerpc64__)
58 const uptr kAllocatorSpace =  0xa0000000000ULL;
59 #else
60 const uptr kAllocatorSpace = 0x600000000000ULL;
61 #endif
62 const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
63 typedef DefaultSizeClassMap SizeClassMap;
64 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
65     SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
66 #elif SANITIZER_WORDSIZE == 32
67 static const u64 kAddressSpaceSize = 1ULL << 32;
68 typedef CompactSizeClassMap SizeClassMap;
69 typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
70   SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
71 #endif
72 
73 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
74 typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
75 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
76     SecondaryAllocator> Allocator;
77 
78 // We can not use THREADLOCAL because it is not supported on some of the
79 // platforms we care about (OSX 10.6, Android).
80 // static THREADLOCAL AllocatorCache cache;
GetAllocatorCache(AsanThreadLocalMallocStorage * ms)81 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
82   CHECK(ms);
83   CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
84   return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
85 }
86 
87 static Allocator allocator;
88 
89 static const uptr kMaxAllowedMallocSize =
90   FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
91 
92 static const uptr kMaxThreadLocalQuarantine =
93   FIRST_32_SECOND_64(1 << 18, 1 << 20);
94 
95 // Every chunk of memory allocated by this allocator can be in one of 3 states:
96 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
97 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
98 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
99 enum {
100   CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
101   CHUNK_ALLOCATED  = 2,
102   CHUNK_QUARANTINE = 3
103 };
104 
105 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
106 // We use adaptive redzones: for larger allocation larger redzones are used.
RZLog2Size(u32 rz_log)107 static u32 RZLog2Size(u32 rz_log) {
108   CHECK_LT(rz_log, 8);
109   return 16 << rz_log;
110 }
111 
RZSize2Log(u32 rz_size)112 static u32 RZSize2Log(u32 rz_size) {
113   CHECK_GE(rz_size, 16);
114   CHECK_LE(rz_size, 2048);
115   CHECK(IsPowerOfTwo(rz_size));
116   u32 res = Log2(rz_size) - 4;
117   CHECK_EQ(rz_size, RZLog2Size(res));
118   return res;
119 }
120 
ComputeRZLog(uptr user_requested_size)121 static uptr ComputeRZLog(uptr user_requested_size) {
122   u32 rz_log =
123     user_requested_size <= 64        - 16   ? 0 :
124     user_requested_size <= 128       - 32   ? 1 :
125     user_requested_size <= 512       - 64   ? 2 :
126     user_requested_size <= 4096      - 128  ? 3 :
127     user_requested_size <= (1 << 14) - 256  ? 4 :
128     user_requested_size <= (1 << 15) - 512  ? 5 :
129     user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
130   return Max(rz_log, RZSize2Log(flags()->redzone));
131 }
132 
133 // The memory chunk allocated from the underlying allocator looks like this:
134 // L L L L L L H H U U U U U U R R
135 //   L -- left redzone words (0 or more bytes)
136 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
137 //   U -- user memory.
138 //   R -- right redzone (0 or more bytes)
139 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
140 // memory.
141 
142 // If a memory chunk is allocated by memalign and we had to increase the
143 // allocation size to achieve the proper alignment, then we store this magic
144 // value in the first uptr word of the memory block and store the address of
145 // ChunkBase in the next uptr.
146 // M B ? ? ? L L L L L L  H H U U U U U U
147 //   M -- magic value kMemalignMagic
148 //   B -- address of ChunkHeader pointing to the first 'H'
149 static const uptr kMemalignMagic = 0xCC6E96B9;
150 
151 struct ChunkHeader {
152   // 1-st 8 bytes.
153   u32 chunk_state       : 8;  // Must be first.
154   u32 alloc_tid         : 24;
155 
156   u32 free_tid          : 24;
157   u32 from_memalign     : 1;
158   u32 alloc_type        : 2;
159   u32 rz_log            : 3;
160   // 2-nd 8 bytes
161   // This field is used for small sizes. For large sizes it is equal to
162   // SizeClassMap::kMaxSize and the actual size is stored in the
163   // SecondaryAllocator's metadata.
164   u32 user_requested_size;
165   u32 alloc_context_id;
166 };
167 
168 struct ChunkBase : ChunkHeader {
169   // Header2, intersects with user memory.
170   AsanChunk *next;
171   u32 free_context_id;
172 };
173 
174 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
175 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
176 COMPILER_CHECK(kChunkHeaderSize == 16);
177 COMPILER_CHECK(kChunkHeader2Size <= 16);
178 
179 struct AsanChunk: ChunkBase {
Beg__asan::AsanChunk180   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
UsedSize__asan::AsanChunk181   uptr UsedSize() {
182     if (user_requested_size != SizeClassMap::kMaxSize)
183       return user_requested_size;
184     return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
185   }
AllocBeg__asan::AsanChunk186   void *AllocBeg() {
187     if (from_memalign)
188       return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
189     return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
190   }
191   // We store the alloc/free stack traces in the chunk itself.
AllocStackBeg__asan::AsanChunk192   u32 *AllocStackBeg() {
193     return (u32*)(Beg() - RZLog2Size(rz_log));
194   }
AllocStackSize__asan::AsanChunk195   uptr AllocStackSize() {
196     CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
197     return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
198   }
FreeStackBeg__asan::AsanChunk199   u32 *FreeStackBeg() {
200     return (u32*)(Beg() + kChunkHeader2Size);
201   }
FreeStackSize__asan::AsanChunk202   uptr FreeStackSize() {
203     if (user_requested_size < kChunkHeader2Size) return 0;
204     uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
205     return (available - kChunkHeader2Size) / sizeof(u32);
206   }
207 };
208 
Beg()209 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
End()210 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
UsedSize()211 uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
AllocTid()212 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
FreeTid()213 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
214 
GetStackTraceFromId(u32 id,StackTrace * stack)215 static void GetStackTraceFromId(u32 id, StackTrace *stack) {
216   CHECK(id);
217   uptr size = 0;
218   const uptr *trace = StackDepotGet(id, &size);
219   CHECK_LT(size, kStackTraceMax);
220   internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
221   stack->size = size;
222 }
223 
GetAllocStack(StackTrace * stack)224 void AsanChunkView::GetAllocStack(StackTrace *stack) {
225   if (flags()->use_stack_depot)
226     GetStackTraceFromId(chunk_->alloc_context_id, stack);
227   else
228     StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
229                                 chunk_->AllocStackSize());
230 }
231 
GetFreeStack(StackTrace * stack)232 void AsanChunkView::GetFreeStack(StackTrace *stack) {
233   if (flags()->use_stack_depot)
234     GetStackTraceFromId(chunk_->free_context_id, stack);
235   else
236     StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
237                                 chunk_->FreeStackSize());
238 }
239 
240 struct QuarantineCallback;
241 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
242 typedef AsanQuarantine::Cache QuarantineCache;
243 static AsanQuarantine quarantine(LINKER_INITIALIZED);
244 static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
245 static AllocatorCache fallback_allocator_cache;
246 static SpinMutex fallback_mutex;
247 
GetQuarantineCache(AsanThreadLocalMallocStorage * ms)248 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
249   CHECK(ms);
250   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
251   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
252 }
253 
254 struct QuarantineCallback {
QuarantineCallback__asan::QuarantineCallback255   explicit QuarantineCallback(AllocatorCache *cache)
256       : cache_(cache) {
257   }
258 
Recycle__asan::QuarantineCallback259   void Recycle(AsanChunk *m) {
260     CHECK(m->chunk_state == CHUNK_QUARANTINE);
261     m->chunk_state = CHUNK_AVAILABLE;
262     CHECK_NE(m->alloc_tid, kInvalidTid);
263     CHECK_NE(m->free_tid, kInvalidTid);
264     PoisonShadow(m->Beg(),
265                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
266                  kAsanHeapLeftRedzoneMagic);
267     void *p = reinterpret_cast<void *>(m->AllocBeg());
268     if (m->from_memalign) {
269       uptr *memalign_magic = reinterpret_cast<uptr *>(p);
270       CHECK_EQ(memalign_magic[0], kMemalignMagic);
271       CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m));
272     }
273 
274     // Statistics.
275     AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
276     thread_stats.real_frees++;
277     thread_stats.really_freed += m->UsedSize();
278 
279     allocator.Deallocate(cache_, p);
280   }
281 
Allocate__asan::QuarantineCallback282   void *Allocate(uptr size) {
283     return allocator.Allocate(cache_, size, 1, false);
284   }
285 
Deallocate__asan::QuarantineCallback286   void Deallocate(void *p) {
287     allocator.Deallocate(cache_, p);
288   }
289 
290   AllocatorCache *cache_;
291 };
292 
InitializeAllocator()293 void InitializeAllocator() {
294   allocator.Init();
295   quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
296 }
297 
Allocate(uptr size,uptr alignment,StackTrace * stack,AllocType alloc_type)298 static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
299                       AllocType alloc_type) {
300   if (!asan_inited)
301     __asan_init();
302   CHECK(stack);
303   const uptr min_alignment = SHADOW_GRANULARITY;
304   if (alignment < min_alignment)
305     alignment = min_alignment;
306   if (size == 0) {
307     // We'd be happy to avoid allocating memory for zero-size requests, but
308     // some programs/tests depend on this behavior and assume that malloc would
309     // not return NULL even for zero-size allocations. Moreover, it looks like
310     // operator new should never return NULL, and results of consecutive "new"
311     // calls must be different even if the allocated size is zero.
312     size = 1;
313   }
314   CHECK(IsPowerOfTwo(alignment));
315   uptr rz_log = ComputeRZLog(size);
316   uptr rz_size = RZLog2Size(rz_log);
317   uptr rounded_size = RoundUpTo(size, alignment);
318   if (rounded_size < kChunkHeader2Size)
319     rounded_size = kChunkHeader2Size;
320   uptr needed_size = rounded_size + rz_size;
321   if (alignment > min_alignment)
322     needed_size += alignment;
323   bool using_primary_allocator = true;
324   // If we are allocating from the secondary allocator, there will be no
325   // automatic right redzone, so add the right redzone manually.
326   if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
327     needed_size += rz_size;
328     using_primary_allocator = false;
329   }
330   CHECK(IsAligned(needed_size, min_alignment));
331   if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
332     Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
333            (void*)size);
334     return 0;
335   }
336 
337   AsanThread *t = asanThreadRegistry().GetCurrent();
338   void *allocated;
339   if (t) {
340     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
341     allocated = allocator.Allocate(cache, needed_size, 8, false);
342   } else {
343     SpinMutexLock l(&fallback_mutex);
344     AllocatorCache *cache = &fallback_allocator_cache;
345     allocated = allocator.Allocate(cache, needed_size, 8, false);
346   }
347   uptr alloc_beg = reinterpret_cast<uptr>(allocated);
348   // Clear the first allocated word (an old kMemalignMagic may still be there).
349   reinterpret_cast<uptr *>(alloc_beg)[0] = 0;
350   uptr alloc_end = alloc_beg + needed_size;
351   uptr beg_plus_redzone = alloc_beg + rz_size;
352   uptr user_beg = beg_plus_redzone;
353   if (!IsAligned(user_beg, alignment))
354     user_beg = RoundUpTo(user_beg, alignment);
355   uptr user_end = user_beg + size;
356   CHECK_LE(user_end, alloc_end);
357   uptr chunk_beg = user_beg - kChunkHeaderSize;
358   AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
359   m->chunk_state = CHUNK_ALLOCATED;
360   m->alloc_type = alloc_type;
361   m->rz_log = rz_log;
362   u32 alloc_tid = t ? t->tid() : 0;
363   m->alloc_tid = alloc_tid;
364   CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
365   m->free_tid = kInvalidTid;
366   m->from_memalign = user_beg != beg_plus_redzone;
367   if (m->from_memalign) {
368     CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg);
369     uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
370     memalign_magic[0] = kMemalignMagic;
371     memalign_magic[1] = chunk_beg;
372   }
373   if (using_primary_allocator) {
374     CHECK(size);
375     m->user_requested_size = size;
376     CHECK(allocator.FromPrimary(allocated));
377   } else {
378     CHECK(!allocator.FromPrimary(allocated));
379     m->user_requested_size = SizeClassMap::kMaxSize;
380     uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
381     meta[0] = size;
382     meta[1] = chunk_beg;
383   }
384 
385   if (flags()->use_stack_depot) {
386     m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
387   } else {
388     m->alloc_context_id = 0;
389     StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
390   }
391 
392   uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
393   // Unpoison the bulk of the memory region.
394   if (size_rounded_down_to_granularity)
395     PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
396   // Deal with the end of the region if size is not aligned to granularity.
397   if (size != size_rounded_down_to_granularity && flags()->poison_heap) {
398     u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
399     *shadow = size & (SHADOW_GRANULARITY - 1);
400   }
401 
402   AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
403   thread_stats.mallocs++;
404   thread_stats.malloced += size;
405   thread_stats.malloced_redzones += needed_size - size;
406   uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
407   thread_stats.malloced_by_size[class_id]++;
408   if (needed_size > SizeClassMap::kMaxSize)
409     thread_stats.malloc_large++;
410 
411   void *res = reinterpret_cast<void *>(user_beg);
412   ASAN_MALLOC_HOOK(res, size);
413   return res;
414 }
415 
Deallocate(void * ptr,StackTrace * stack,AllocType alloc_type)416 static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
417   uptr p = reinterpret_cast<uptr>(ptr);
418   if (p == 0) return;
419   ASAN_FREE_HOOK(ptr);
420   uptr chunk_beg = p - kChunkHeaderSize;
421   AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
422 
423   // Flip the chunk_state atomically to avoid race on double-free.
424   u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
425                                        memory_order_relaxed);
426 
427   if (old_chunk_state == CHUNK_QUARANTINE)
428     ReportDoubleFree((uptr)ptr, stack);
429   else if (old_chunk_state != CHUNK_ALLOCATED)
430     ReportFreeNotMalloced((uptr)ptr, stack);
431   CHECK(old_chunk_state == CHUNK_ALLOCATED);
432   if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
433     ReportAllocTypeMismatch((uptr)ptr, stack,
434                             (AllocType)m->alloc_type, (AllocType)alloc_type);
435 
436   CHECK_GE(m->alloc_tid, 0);
437   if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
438     CHECK_EQ(m->free_tid, kInvalidTid);
439   AsanThread *t = asanThreadRegistry().GetCurrent();
440   m->free_tid = t ? t->tid() : 0;
441   if (flags()->use_stack_depot) {
442     m->free_context_id = StackDepotPut(stack->trace, stack->size);
443   } else {
444     m->free_context_id = 0;
445     StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
446   }
447   CHECK(m->chunk_state == CHUNK_QUARANTINE);
448   // Poison the region.
449   PoisonShadow(m->Beg(),
450                RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
451                kAsanHeapFreeMagic);
452 
453   AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
454   thread_stats.frees++;
455   thread_stats.freed += m->UsedSize();
456 
457   // Push into quarantine.
458   if (t) {
459     AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
460     AllocatorCache *ac = GetAllocatorCache(ms);
461     quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
462                    m, m->UsedSize());
463   } else {
464     SpinMutexLock l(&fallback_mutex);
465     AllocatorCache *ac = &fallback_allocator_cache;
466     quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
467                    m, m->UsedSize());
468   }
469 }
470 
Reallocate(void * old_ptr,uptr new_size,StackTrace * stack)471 static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
472   CHECK(old_ptr && new_size);
473   uptr p = reinterpret_cast<uptr>(old_ptr);
474   uptr chunk_beg = p - kChunkHeaderSize;
475   AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
476 
477   AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
478   thread_stats.reallocs++;
479   thread_stats.realloced += new_size;
480 
481   CHECK(m->chunk_state == CHUNK_ALLOCATED);
482   uptr old_size = m->UsedSize();
483   uptr memcpy_size = Min(new_size, old_size);
484   void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
485   if (new_ptr) {
486     CHECK(REAL(memcpy) != 0);
487     REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
488     Deallocate(old_ptr, stack, FROM_MALLOC);
489   }
490   return new_ptr;
491 }
492 
GetAsanChunkByAddr(uptr p)493 static AsanChunk *GetAsanChunkByAddr(uptr p) {
494   void *ptr = reinterpret_cast<void *>(p);
495   uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
496   if (!alloc_beg) return 0;
497   uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
498   if (memalign_magic[0] == kMemalignMagic) {
499     AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]);
500     CHECK(m->from_memalign);
501     return m;
502   }
503   if (!allocator.FromPrimary(ptr)) {
504     uptr *meta = reinterpret_cast<uptr *>(
505         allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg)));
506     AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
507     return m;
508   }
509   uptr actual_size = allocator.GetActuallyAllocatedSize(ptr);
510   CHECK_LE(actual_size, SizeClassMap::kMaxSize);
511   // We know the actually allocted size, but we don't know the redzone size.
512   // Just try all possible redzone sizes.
513   for (u32 rz_log = 0; rz_log < 8; rz_log++) {
514     u32 rz_size = RZLog2Size(rz_log);
515     uptr max_possible_size = actual_size - rz_size;
516     if (ComputeRZLog(max_possible_size) != rz_log)
517       continue;
518     return reinterpret_cast<AsanChunk *>(
519         alloc_beg + rz_size - kChunkHeaderSize);
520   }
521   return 0;
522 }
523 
AllocationSize(uptr p)524 static uptr AllocationSize(uptr p) {
525   AsanChunk *m = GetAsanChunkByAddr(p);
526   if (!m) return 0;
527   if (m->chunk_state != CHUNK_ALLOCATED) return 0;
528   if (m->Beg() != p) return 0;
529   return m->UsedSize();
530 }
531 
532 // We have an address between two chunks, and we want to report just one.
ChooseChunk(uptr addr,AsanChunk * left_chunk,AsanChunk * right_chunk)533 AsanChunk *ChooseChunk(uptr addr,
534                        AsanChunk *left_chunk, AsanChunk *right_chunk) {
535   // Prefer an allocated chunk over freed chunk and freed chunk
536   // over available chunk.
537   if (left_chunk->chunk_state != right_chunk->chunk_state) {
538     if (left_chunk->chunk_state == CHUNK_ALLOCATED)
539       return left_chunk;
540     if (right_chunk->chunk_state == CHUNK_ALLOCATED)
541       return right_chunk;
542     if (left_chunk->chunk_state == CHUNK_QUARANTINE)
543       return left_chunk;
544     if (right_chunk->chunk_state == CHUNK_QUARANTINE)
545       return right_chunk;
546   }
547   // Same chunk_state: choose based on offset.
548   sptr l_offset = 0, r_offset = 0;
549   CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
550   CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
551   if (l_offset < r_offset)
552     return left_chunk;
553   return right_chunk;
554 }
555 
FindHeapChunkByAddress(uptr addr)556 AsanChunkView FindHeapChunkByAddress(uptr addr) {
557   AsanChunk *m1 = GetAsanChunkByAddr(addr);
558   if (!m1) return AsanChunkView(m1);
559   sptr offset = 0;
560   if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
561     // The address is in the chunk's left redzone, so maybe it is actually
562     // a right buffer overflow from the other chunk to the left.
563     // Search a bit to the left to see if there is another chunk.
564     AsanChunk *m2 = 0;
565     for (uptr l = 1; l < GetPageSizeCached(); l++) {
566       m2 = GetAsanChunkByAddr(addr - l);
567       if (m2 == m1) continue;  // Still the same chunk.
568       break;
569     }
570     if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
571       m1 = ChooseChunk(addr, m2, m1);
572   }
573   return AsanChunkView(m1);
574 }
575 
CommitBack()576 void AsanThreadLocalMallocStorage::CommitBack() {
577   AllocatorCache *ac = GetAllocatorCache(this);
578   quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
579   allocator.SwallowCache(GetAllocatorCache(this));
580 }
581 
PrintInternalAllocatorStats()582 void PrintInternalAllocatorStats() {
583   allocator.PrintStats();
584 }
585 
586 SANITIZER_INTERFACE_ATTRIBUTE
asan_memalign(uptr alignment,uptr size,StackTrace * stack,AllocType alloc_type)587 void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
588                     AllocType alloc_type) {
589   return Allocate(size, alignment, stack, alloc_type);
590 }
591 
592 SANITIZER_INTERFACE_ATTRIBUTE
asan_free(void * ptr,StackTrace * stack,AllocType alloc_type)593 void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
594   Deallocate(ptr, stack, alloc_type);
595 }
596 
597 SANITIZER_INTERFACE_ATTRIBUTE
asan_malloc(uptr size,StackTrace * stack)598 void *asan_malloc(uptr size, StackTrace *stack) {
599   return Allocate(size, 8, stack, FROM_MALLOC);
600 }
601 
asan_calloc(uptr nmemb,uptr size,StackTrace * stack)602 void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
603   if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
604   void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
605   if (ptr)
606     REAL(memset)(ptr, 0, nmemb * size);
607   return ptr;
608 }
609 
asan_realloc(void * p,uptr size,StackTrace * stack)610 void *asan_realloc(void *p, uptr size, StackTrace *stack) {
611   if (p == 0)
612     return Allocate(size, 8, stack, FROM_MALLOC);
613   if (size == 0) {
614     Deallocate(p, stack, FROM_MALLOC);
615     return 0;
616   }
617   return Reallocate(p, size, stack);
618 }
619 
asan_valloc(uptr size,StackTrace * stack)620 void *asan_valloc(uptr size, StackTrace *stack) {
621   return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC);
622 }
623 
asan_pvalloc(uptr size,StackTrace * stack)624 void *asan_pvalloc(uptr size, StackTrace *stack) {
625   uptr PageSize = GetPageSizeCached();
626   size = RoundUpTo(size, PageSize);
627   if (size == 0) {
628     // pvalloc(0) should allocate one page.
629     size = PageSize;
630   }
631   return Allocate(size, PageSize, stack, FROM_MALLOC);
632 }
633 
asan_posix_memalign(void ** memptr,uptr alignment,uptr size,StackTrace * stack)634 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
635                         StackTrace *stack) {
636   void *ptr = Allocate(size, alignment, stack, FROM_MALLOC);
637   CHECK(IsAligned((uptr)ptr, alignment));
638   *memptr = ptr;
639   return 0;
640 }
641 
asan_malloc_usable_size(void * ptr,StackTrace * stack)642 uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
643   CHECK(stack);
644   if (ptr == 0) return 0;
645   uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
646   if (flags()->check_malloc_usable_size && (usable_size == 0))
647     ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
648   return usable_size;
649 }
650 
asan_mz_size(const void * ptr)651 uptr asan_mz_size(const void *ptr) {
652   return AllocationSize(reinterpret_cast<uptr>(ptr));
653 }
654 
asan_mz_force_lock()655 void asan_mz_force_lock() {
656   allocator.ForceLock();
657   fallback_mutex.Lock();
658 }
659 
asan_mz_force_unlock()660 void asan_mz_force_unlock() {
661   fallback_mutex.Unlock();
662   allocator.ForceUnlock();
663 }
664 
665 }  // namespace __asan
666 
667 // ---------------------- Interface ---------------- {{{1
668 using namespace __asan;  // NOLINT
669 
670 // ASan allocator doesn't reserve extra bytes, so normally we would
671 // just return "size". We don't want to expose our redzone sizes, etc here.
__asan_get_estimated_allocated_size(uptr size)672 uptr __asan_get_estimated_allocated_size(uptr size) {
673   return size;
674 }
675 
__asan_get_ownership(const void * p)676 bool __asan_get_ownership(const void *p) {
677   uptr ptr = reinterpret_cast<uptr>(p);
678   return (AllocationSize(ptr) > 0);
679 }
680 
__asan_get_allocated_size(const void * p)681 uptr __asan_get_allocated_size(const void *p) {
682   if (p == 0) return 0;
683   uptr ptr = reinterpret_cast<uptr>(p);
684   uptr allocated_size = AllocationSize(ptr);
685   // Die if p is not malloced or if it is already freed.
686   if (allocated_size == 0) {
687     GET_STACK_TRACE_FATAL_HERE;
688     ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
689   }
690   return allocated_size;
691 }
692 
693 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
694 // Provide default (no-op) implementation of malloc hooks.
695 extern "C" {
696 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
__asan_malloc_hook(void * ptr,uptr size)697 void __asan_malloc_hook(void *ptr, uptr size) {
698   (void)ptr;
699   (void)size;
700 }
701 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
__asan_free_hook(void * ptr)702 void __asan_free_hook(void *ptr) {
703   (void)ptr;
704 }
705 }  // extern "C"
706 #endif
707 
708 
709 #endif  // ASAN_ALLOCATOR_VERSION
710