1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 // HWAddressSanitizer allocator.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "hwasan.h"
18 #include "hwasan_allocator.h"
19 #include "hwasan_checks.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_malloc_bisect.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_report.h"
24 #include "lsan/lsan_common.h"
25 
26 namespace __hwasan {
27 
28 static Allocator allocator;
29 static AllocatorCache fallback_allocator_cache;
30 static SpinMutex fallback_mutex;
31 static atomic_uint8_t hwasan_allocator_tagging_enabled;
32 
33 static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
34 static constexpr tag_t kFallbackFreeTag = 0xBC;
35 
36 enum {
37   // Either just allocated by underlying allocator, but AsanChunk is not yet
38   // ready, or almost returned to undelying allocator and AsanChunk is already
39   // meaningless.
40   CHUNK_INVALID = 0,
41   // The chunk is allocated and not yet freed.
42   CHUNK_ALLOCATED = 1,
43 };
44 
45 
46 // Initialized in HwasanAllocatorInit, an never changed.
47 static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
48 static uptr max_malloc_size;
49 
50 bool HwasanChunkView::IsAllocated() const {
51   return metadata_ && metadata_->IsAllocated();
52 }
53 
54 uptr HwasanChunkView::Beg() const {
55   return block_;
56 }
57 uptr HwasanChunkView::End() const {
58   return Beg() + UsedSize();
59 }
60 uptr HwasanChunkView::UsedSize() const {
61   return metadata_->GetRequestedSize();
62 }
63 u32 HwasanChunkView::GetAllocStackId() const {
64   return metadata_->GetAllocStackId();
65 }
66 
67 u32 HwasanChunkView::GetAllocThreadId() const {
68   return metadata_->GetAllocThreadId();
69 }
70 
71 uptr HwasanChunkView::ActualSize() const {
72   return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
73 }
74 
75 bool HwasanChunkView::FromSmallHeap() const {
76   return allocator.FromPrimary(reinterpret_cast<void *>(block_));
77 }
78 
79 bool HwasanChunkView::AddrIsInside(uptr addr) const {
80   return (addr >= Beg()) && (addr < Beg() + UsedSize());
81 }
82 
83 inline void Metadata::SetAllocated(u32 stack, u64 size) {
84   Thread *t = GetCurrentThread();
85   u64 context = t ? t->unique_id() : kMainTid;
86   context <<= 32;
87   context += stack;
88   requested_size_low = size & ((1ul << 32) - 1);
89   requested_size_high = size >> 32;
90   atomic_store(&alloc_context_id, context, memory_order_relaxed);
91   atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
92 }
93 
94 inline void Metadata::SetUnallocated() {
95   atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
96   requested_size_low = 0;
97   requested_size_high = 0;
98   atomic_store(&alloc_context_id, 0, memory_order_relaxed);
99 }
100 
101 inline bool Metadata::IsAllocated() const {
102   return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
103 }
104 
105 inline u64 Metadata::GetRequestedSize() const {
106   return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
107 }
108 
109 inline u32 Metadata::GetAllocStackId() const {
110   return atomic_load(&alloc_context_id, memory_order_relaxed);
111 }
112 
113 inline u32 Metadata::GetAllocThreadId() const {
114   u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
115   u32 tid = context >> 32;
116   return tid;
117 }
118 
119 void GetAllocatorStats(AllocatorStatCounters s) {
120   allocator.GetStats(s);
121 }
122 
123 inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
124   lsan_tag = tag;
125 }
126 
127 inline __lsan::ChunkTag Metadata::GetLsanTag() const {
128   return static_cast<__lsan::ChunkTag>(lsan_tag);
129 }
130 
131 uptr GetAliasRegionStart() {
132 #if defined(HWASAN_ALIASING_MODE)
133   constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
134   uptr AliasRegionStart =
135       __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
136 
137   CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
138            __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
139   CHECK_EQ(
140       (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
141       __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
142   return AliasRegionStart;
143 #else
144   return 0;
145 #endif
146 }
147 
148 void HwasanAllocatorInit() {
149   atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
150                        !flags()->disable_allocator_tagging);
151   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
152   allocator.InitLinkerInitialized(
153       common_flags()->allocator_release_to_os_interval_ms,
154       GetAliasRegionStart());
155   for (uptr i = 0; i < sizeof(tail_magic); i++)
156     tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
157   if (common_flags()->max_allocation_size_mb) {
158     max_malloc_size = common_flags()->max_allocation_size_mb << 20;
159     max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
160   } else {
161     max_malloc_size = kMaxAllowedMallocSize;
162   }
163 }
164 
165 void HwasanAllocatorLock() { allocator.ForceLock(); }
166 
167 void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
168 
169 void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
170 
171 void AllocatorThreadFinish(AllocatorCache *cache) {
172   allocator.SwallowCache(cache);
173   allocator.DestroyCache(cache);
174 }
175 
176 static uptr TaggedSize(uptr size) {
177   if (!size) size = 1;
178   uptr new_size = RoundUpTo(size, kShadowAlignment);
179   CHECK_GE(new_size, size);
180   return new_size;
181 }
182 
183 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
184                             bool zeroise) {
185   // Keep this consistent with LSAN and ASAN behavior.
186   if (UNLIKELY(orig_size == 0))
187     orig_size = 1;
188   if (UNLIKELY(orig_size > max_malloc_size)) {
189     if (AllocatorMayReturnNull()) {
190       Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
191              orig_size);
192       return nullptr;
193     }
194     ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
195   }
196   if (UNLIKELY(IsRssLimitExceeded())) {
197     if (AllocatorMayReturnNull())
198       return nullptr;
199     ReportRssLimitExceeded(stack);
200   }
201 
202   alignment = Max(alignment, kShadowAlignment);
203   uptr size = TaggedSize(orig_size);
204   Thread *t = GetCurrentThread();
205   void *allocated;
206   if (t) {
207     allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
208   } else {
209     SpinMutexLock l(&fallback_mutex);
210     AllocatorCache *cache = &fallback_allocator_cache;
211     allocated = allocator.Allocate(cache, size, alignment);
212   }
213   if (UNLIKELY(!allocated)) {
214     SetAllocatorOutOfMemory();
215     if (AllocatorMayReturnNull())
216       return nullptr;
217     ReportOutOfMemory(size, stack);
218   }
219   if (zeroise) {
220     // The secondary allocator mmaps memory, which should be zero-inited so we
221     // don't need to explicitly clear it.
222     if (allocator.FromPrimary(allocated))
223       internal_memset(allocated, 0, size);
224   } else if (flags()->max_malloc_fill_size > 0) {
225     uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
226     internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
227   }
228   if (size != orig_size) {
229     u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
230     uptr tail_length = size - orig_size;
231     internal_memcpy(tail, tail_magic, tail_length - 1);
232     // Short granule is excluded from magic tail, so we explicitly untag.
233     tail[tail_length - 1] = 0;
234   }
235 
236   void *user_ptr = allocated;
237   // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
238   // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
239   // retag to 0.
240   if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
241       (flags()->tag_in_malloc || flags()->tag_in_free) &&
242       atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
243     if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
244       tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
245       uptr tag_size = orig_size ? orig_size : 1;
246       uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
247       user_ptr =
248           (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
249       if (full_granule_size != tag_size) {
250         u8 *short_granule =
251             reinterpret_cast<u8 *>(allocated) + full_granule_size;
252         TagMemoryAligned((uptr)short_granule, kShadowAlignment,
253                          tag_size % kShadowAlignment);
254         short_granule[kShadowAlignment - 1] = tag;
255       }
256     } else {
257       user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
258     }
259   }
260 
261   Metadata *meta =
262       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
263 #if CAN_SANITIZE_LEAKS
264   meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
265                                                   : __lsan::kDirectlyLeaked);
266 #endif
267   meta->SetAllocated(StackDepotPut(*stack), orig_size);
268   RunMallocHooks(user_ptr, orig_size);
269   return user_ptr;
270 }
271 
272 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
273   CHECK(tagged_ptr);
274   uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
275   if (!InTaggableRegion(tagged_uptr))
276     return true;
277   tag_t mem_tag = *reinterpret_cast<tag_t *>(
278       MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
279   return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
280 }
281 
282 static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
283                              void *tagged_ptr) {
284   // This function can return true if halt_on_error is false.
285   if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
286       !PointerAndMemoryTagsMatch(tagged_ptr)) {
287     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
288     return true;
289   }
290   return false;
291 }
292 
293 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
294   CHECK(tagged_ptr);
295   void *untagged_ptr = UntagPtr(tagged_ptr);
296 
297   if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
298     return;
299 
300   void *aligned_ptr = reinterpret_cast<void *>(
301       RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
302   tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
303   Metadata *meta =
304       reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
305   if (!meta) {
306     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
307     return;
308   }
309 
310   RunFreeHooks(tagged_ptr);
311 
312   uptr orig_size = meta->GetRequestedSize();
313   u32 free_context_id = StackDepotPut(*stack);
314   u32 alloc_context_id = meta->GetAllocStackId();
315   u32 alloc_thread_id = meta->GetAllocThreadId();
316 
317   bool in_taggable_region =
318       InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
319 
320   // Check tail magic.
321   uptr tagged_size = TaggedSize(orig_size);
322   if (flags()->free_checks_tail_magic && orig_size &&
323       tagged_size != orig_size) {
324     uptr tail_size = tagged_size - orig_size - 1;
325     CHECK_LT(tail_size, kShadowAlignment);
326     void *tail_beg = reinterpret_cast<void *>(
327         reinterpret_cast<uptr>(aligned_ptr) + orig_size);
328     tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
329         reinterpret_cast<uptr>(tail_beg) + tail_size));
330     if (tail_size &&
331         (internal_memcmp(tail_beg, tail_magic, tail_size) ||
332          (in_taggable_region && pointer_tag != short_granule_memtag)))
333       ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
334                             orig_size, tail_magic);
335   }
336 
337   // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
338   meta->SetUnallocated();
339   // This memory will not be reused by anyone else, so we are free to keep it
340   // poisoned.
341   Thread *t = GetCurrentThread();
342   if (flags()->max_free_fill_size > 0) {
343     uptr fill_size =
344         Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
345     internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
346   }
347   if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
348       atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
349     // Always store full 8-bit tags on free to maximize UAF detection.
350     tag_t tag;
351     if (t) {
352       // Make sure we are not using a short granule tag as a poison tag. This
353       // would make us attempt to read the memory on a UaF.
354       // The tag can be zero if tagging is disabled on this thread.
355       do {
356         tag = t->GenerateRandomTag(/*num_bits=*/8);
357       } while (
358           UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
359     } else {
360       static_assert(kFallbackFreeTag >= kShadowAlignment,
361                     "fallback tag must not be a short granule tag.");
362       tag = kFallbackFreeTag;
363     }
364     TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
365                      tag);
366   }
367   if (t) {
368     allocator.Deallocate(t->allocator_cache(), aligned_ptr);
369     if (auto *ha = t->heap_allocations())
370       ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
371                 alloc_context_id, free_context_id,
372                 static_cast<u32>(orig_size)});
373   } else {
374     SpinMutexLock l(&fallback_mutex);
375     AllocatorCache *cache = &fallback_allocator_cache;
376     allocator.Deallocate(cache, aligned_ptr);
377   }
378 }
379 
380 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
381                               uptr new_size, uptr alignment) {
382   void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
383   if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
384     return nullptr;
385   void *tagged_ptr_new =
386       HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
387   if (tagged_ptr_old && tagged_ptr_new) {
388     Metadata *meta =
389         reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
390     void *untagged_ptr_new = UntagPtr(tagged_ptr_new);
391     internal_memcpy(untagged_ptr_new, untagged_ptr_old,
392                     Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
393     HwasanDeallocate(stack, tagged_ptr_old);
394   }
395   return tagged_ptr_new;
396 }
397 
398 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
399   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
400     if (AllocatorMayReturnNull())
401       return nullptr;
402     ReportCallocOverflow(nmemb, size, stack);
403   }
404   return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
405 }
406 
407 HwasanChunkView FindHeapChunkByAddress(uptr address) {
408   if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
409     return HwasanChunkView();
410   void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
411   if (!block)
412     return HwasanChunkView();
413   Metadata *metadata =
414       reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
415   return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
416 }
417 
418 static const void *AllocationBegin(const void *p) {
419   const void *untagged_ptr = UntagPtr(p);
420   if (!untagged_ptr)
421     return nullptr;
422 
423   const void *beg = allocator.GetBlockBegin(untagged_ptr);
424   if (!beg)
425     return nullptr;
426 
427   Metadata *b = (Metadata *)allocator.GetMetaData(beg);
428   if (b->GetRequestedSize() == 0)
429     return nullptr;
430 
431   tag_t tag = GetTagFromPointer((uptr)p);
432   return (const void *)AddTagToPointer((uptr)beg, tag);
433 }
434 
435 static uptr AllocationSize(const void *p) {
436   const void *untagged_ptr = UntagPtr(p);
437   if (!untagged_ptr) return 0;
438   const void *beg = allocator.GetBlockBegin(untagged_ptr);
439   if (!beg)
440     return 0;
441   Metadata *b = (Metadata *)allocator.GetMetaData(beg);
442   return b->GetRequestedSize();
443 }
444 
445 static uptr AllocationSizeFast(const void *p) {
446   const void *untagged_ptr = UntagPtr(p);
447   void *aligned_ptr = reinterpret_cast<void *>(
448       RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
449   Metadata *meta =
450       reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
451   return meta->GetRequestedSize();
452 }
453 
454 void *hwasan_malloc(uptr size, StackTrace *stack) {
455   return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
456 }
457 
458 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
459   return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
460 }
461 
462 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
463   if (!ptr)
464     return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
465   if (size == 0) {
466     HwasanDeallocate(stack, ptr);
467     return nullptr;
468   }
469   return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
470 }
471 
472 void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
473   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
474     errno = errno_ENOMEM;
475     if (AllocatorMayReturnNull())
476       return nullptr;
477     ReportReallocArrayOverflow(nmemb, size, stack);
478   }
479   return hwasan_realloc(ptr, nmemb * size, stack);
480 }
481 
482 void *hwasan_valloc(uptr size, StackTrace *stack) {
483   return SetErrnoOnNull(
484       HwasanAllocate(stack, size, GetPageSizeCached(), false));
485 }
486 
487 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
488   uptr PageSize = GetPageSizeCached();
489   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
490     errno = errno_ENOMEM;
491     if (AllocatorMayReturnNull())
492       return nullptr;
493     ReportPvallocOverflow(size, stack);
494   }
495   // pvalloc(0) should allocate one page.
496   size = size ? RoundUpTo(size, PageSize) : PageSize;
497   return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
498 }
499 
500 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
501   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
502     errno = errno_EINVAL;
503     if (AllocatorMayReturnNull())
504       return nullptr;
505     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
506   }
507   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
508 }
509 
510 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
511   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
512     errno = errno_EINVAL;
513     if (AllocatorMayReturnNull())
514       return nullptr;
515     ReportInvalidAllocationAlignment(alignment, stack);
516   }
517   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
518 }
519 
520 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
521                         StackTrace *stack) {
522   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
523     if (AllocatorMayReturnNull())
524       return errno_EINVAL;
525     ReportInvalidPosixMemalignAlignment(alignment, stack);
526   }
527   void *ptr = HwasanAllocate(stack, size, alignment, false);
528   if (UNLIKELY(!ptr))
529     // OOM error is already taken care of by HwasanAllocate.
530     return errno_ENOMEM;
531   CHECK(IsAligned((uptr)ptr, alignment));
532   *memptr = ptr;
533   return 0;
534 }
535 
536 void hwasan_free(void *ptr, StackTrace *stack) {
537   return HwasanDeallocate(stack, ptr);
538 }
539 
540 }  // namespace __hwasan
541 
542 // --- Implementation of LSan-specific functions --- {{{1
543 namespace __lsan {
544 
545 void LockAllocator() {
546   __hwasan::HwasanAllocatorLock();
547 }
548 
549 void UnlockAllocator() {
550   __hwasan::HwasanAllocatorUnlock();
551 }
552 
553 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
554   *begin = (uptr)&__hwasan::allocator;
555   *end = *begin + sizeof(__hwasan::allocator);
556 }
557 
558 uptr PointsIntoChunk(void *p) {
559   p = UntagPtr(p);
560   uptr addr = reinterpret_cast<uptr>(p);
561   uptr chunk =
562       reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
563   if (!chunk)
564     return 0;
565   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
566       __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
567   if (!metadata || !metadata->IsAllocated())
568     return 0;
569   if (addr < chunk + metadata->GetRequestedSize())
570     return chunk;
571   if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
572     return chunk;
573   return 0;
574 }
575 
576 uptr GetUserBegin(uptr chunk) {
577   CHECK_EQ(UntagAddr(chunk), chunk);
578   void *block = __hwasan::allocator.GetBlockBeginFastLocked(
579       reinterpret_cast<void *>(chunk));
580   if (!block)
581     return 0;
582   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
583       __hwasan::allocator.GetMetaData(block));
584   if (!metadata || !metadata->IsAllocated())
585     return 0;
586 
587   return reinterpret_cast<uptr>(block);
588 }
589 
590 uptr GetUserAddr(uptr chunk) {
591   if (!InTaggableRegion(chunk))
592     return chunk;
593   tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
594   return AddTagToPointer(chunk, mem_tag);
595 }
596 
597 LsanMetadata::LsanMetadata(uptr chunk) {
598   CHECK_EQ(UntagAddr(chunk), chunk);
599   metadata_ =
600       chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
601             : nullptr;
602 }
603 
604 bool LsanMetadata::allocated() const {
605   if (!metadata_)
606     return false;
607   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
608   return m->IsAllocated();
609 }
610 
611 ChunkTag LsanMetadata::tag() const {
612   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
613   return m->GetLsanTag();
614 }
615 
616 void LsanMetadata::set_tag(ChunkTag value) {
617   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
618   m->SetLsanTag(value);
619 }
620 
621 uptr LsanMetadata::requested_size() const {
622   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
623   return m->GetRequestedSize();
624 }
625 
626 u32 LsanMetadata::stack_trace_id() const {
627   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
628   return m->GetAllocStackId();
629 }
630 
631 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
632   __hwasan::allocator.ForEachChunk(callback, arg);
633 }
634 
635 IgnoreObjectResult IgnoreObject(const void *p) {
636   p = UntagPtr(p);
637   uptr addr = reinterpret_cast<uptr>(p);
638   uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
639   if (!chunk)
640     return kIgnoreObjectInvalid;
641   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
642       __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
643   if (!metadata || !metadata->IsAllocated())
644     return kIgnoreObjectInvalid;
645   if (addr >= chunk + metadata->GetRequestedSize())
646     return kIgnoreObjectInvalid;
647   if (metadata->GetLsanTag() == kIgnored)
648     return kIgnoreObjectAlreadyIgnored;
649 
650   metadata->SetLsanTag(kIgnored);
651   return kIgnoreObjectSuccess;
652 }
653 
654 }  // namespace __lsan
655 
656 using namespace __hwasan;
657 
658 void __hwasan_enable_allocator_tagging() {
659   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
660 }
661 
662 void __hwasan_disable_allocator_tagging() {
663   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
664 }
665 
666 uptr __sanitizer_get_current_allocated_bytes() {
667   uptr stats[AllocatorStatCount];
668   allocator.GetStats(stats);
669   return stats[AllocatorStatAllocated];
670 }
671 
672 uptr __sanitizer_get_heap_size() {
673   uptr stats[AllocatorStatCount];
674   allocator.GetStats(stats);
675   return stats[AllocatorStatMapped];
676 }
677 
678 uptr __sanitizer_get_free_bytes() { return 1; }
679 
680 uptr __sanitizer_get_unmapped_bytes() { return 1; }
681 
682 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
683 
684 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
685 
686 const void *__sanitizer_get_allocated_begin(const void *p) {
687   return AllocationBegin(p);
688 }
689 
690 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
691 
692 uptr __sanitizer_get_allocated_size_fast(const void *p) {
693   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
694   uptr ret = AllocationSizeFast(p);
695   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
696   return ret;
697 }
698 
699 void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
700