1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 // HWAddressSanitizer allocator.
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "hwasan.h"
18 #include "hwasan_allocator.h"
19 #include "hwasan_checks.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_malloc_bisect.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_report.h"
24
25 namespace __hwasan {
26
27 static Allocator allocator;
28 static AllocatorCache fallback_allocator_cache;
29 static SpinMutex fallback_mutex;
30 static atomic_uint8_t hwasan_allocator_tagging_enabled;
31
32 static const tag_t kFallbackAllocTag = 0xBB;
33 static const tag_t kFallbackFreeTag = 0xBC;
34
35 enum RightAlignMode {
36 kRightAlignNever,
37 kRightAlignSometimes,
38 kRightAlignAlways
39 };
40
41 // Initialized in HwasanAllocatorInit, an never changed.
42 static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
43
IsAllocated() const44 bool HwasanChunkView::IsAllocated() const {
45 return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
46 }
47
48 // Aligns the 'addr' right to the granule boundary.
AlignRight(uptr addr,uptr requested_size)49 static uptr AlignRight(uptr addr, uptr requested_size) {
50 uptr tail_size = requested_size % kShadowAlignment;
51 if (!tail_size) return addr;
52 return addr + kShadowAlignment - tail_size;
53 }
54
Beg() const55 uptr HwasanChunkView::Beg() const {
56 if (metadata_ && metadata_->right_aligned)
57 return AlignRight(block_, metadata_->requested_size);
58 return block_;
59 }
End() const60 uptr HwasanChunkView::End() const {
61 return Beg() + UsedSize();
62 }
UsedSize() const63 uptr HwasanChunkView::UsedSize() const {
64 return metadata_->requested_size;
65 }
GetAllocStackId() const66 u32 HwasanChunkView::GetAllocStackId() const {
67 return metadata_->alloc_context_id;
68 }
69
ActualSize() const70 uptr HwasanChunkView::ActualSize() const {
71 return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
72 }
73
FromSmallHeap() const74 bool HwasanChunkView::FromSmallHeap() const {
75 return allocator.FromPrimary(reinterpret_cast<void *>(block_));
76 }
77
GetAllocatorStats(AllocatorStatCounters s)78 void GetAllocatorStats(AllocatorStatCounters s) {
79 allocator.GetStats(s);
80 }
81
HwasanAllocatorInit()82 void HwasanAllocatorInit() {
83 atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
84 !flags()->disable_allocator_tagging);
85 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
86 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
87 for (uptr i = 0; i < sizeof(tail_magic); i++)
88 tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
89 }
90
AllocatorSwallowThreadLocalCache(AllocatorCache * cache)91 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
92 allocator.SwallowCache(cache);
93 }
94
TaggedSize(uptr size)95 static uptr TaggedSize(uptr size) {
96 if (!size) size = 1;
97 uptr new_size = RoundUpTo(size, kShadowAlignment);
98 CHECK_GE(new_size, size);
99 return new_size;
100 }
101
HwasanAllocate(StackTrace * stack,uptr orig_size,uptr alignment,bool zeroise)102 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
103 bool zeroise) {
104 if (orig_size > kMaxAllowedMallocSize) {
105 if (AllocatorMayReturnNull()) {
106 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
107 orig_size);
108 return nullptr;
109 }
110 ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
111 }
112
113 alignment = Max(alignment, kShadowAlignment);
114 uptr size = TaggedSize(orig_size);
115 Thread *t = GetCurrentThread();
116 void *allocated;
117 if (t) {
118 allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
119 } else {
120 SpinMutexLock l(&fallback_mutex);
121 AllocatorCache *cache = &fallback_allocator_cache;
122 allocated = allocator.Allocate(cache, size, alignment);
123 }
124 if (UNLIKELY(!allocated)) {
125 SetAllocatorOutOfMemory();
126 if (AllocatorMayReturnNull())
127 return nullptr;
128 ReportOutOfMemory(size, stack);
129 }
130 Metadata *meta =
131 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
132 meta->requested_size = static_cast<u32>(orig_size);
133 meta->alloc_context_id = StackDepotPut(*stack);
134 meta->right_aligned = false;
135 if (zeroise) {
136 internal_memset(allocated, 0, size);
137 } else if (flags()->max_malloc_fill_size > 0) {
138 uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
139 internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
140 }
141 if (size != orig_size) {
142 internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
143 size - orig_size - 1);
144 }
145
146 void *user_ptr = allocated;
147 // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
148 // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
149 // retag to 0.
150 if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
151 atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
152 if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
153 tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
154 uptr tag_size = orig_size ? orig_size : 1;
155 uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
156 user_ptr =
157 (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
158 if (full_granule_size != tag_size) {
159 u8 *short_granule =
160 reinterpret_cast<u8 *>(allocated) + full_granule_size;
161 TagMemoryAligned((uptr)short_granule, kShadowAlignment,
162 tag_size % kShadowAlignment);
163 short_granule[kShadowAlignment - 1] = tag;
164 }
165 } else {
166 user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
167 }
168 }
169
170 HWASAN_MALLOC_HOOK(user_ptr, size);
171 return user_ptr;
172 }
173
PointerAndMemoryTagsMatch(void * tagged_ptr)174 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
175 CHECK(tagged_ptr);
176 uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
177 tag_t mem_tag = *reinterpret_cast<tag_t *>(
178 MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
179 return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
180 }
181
HwasanDeallocate(StackTrace * stack,void * tagged_ptr)182 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
183 CHECK(tagged_ptr);
184 HWASAN_FREE_HOOK(tagged_ptr);
185
186 if (!PointerAndMemoryTagsMatch(tagged_ptr))
187 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
188
189 void *untagged_ptr = UntagPtr(tagged_ptr);
190 void *aligned_ptr = reinterpret_cast<void *>(
191 RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
192 Metadata *meta =
193 reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
194 uptr orig_size = meta->requested_size;
195 u32 free_context_id = StackDepotPut(*stack);
196 u32 alloc_context_id = meta->alloc_context_id;
197
198 // Check tail magic.
199 uptr tagged_size = TaggedSize(orig_size);
200 if (flags()->free_checks_tail_magic && orig_size &&
201 tagged_size != orig_size) {
202 uptr tail_size = tagged_size - orig_size - 1;
203 CHECK_LT(tail_size, kShadowAlignment);
204 void *tail_beg = reinterpret_cast<void *>(
205 reinterpret_cast<uptr>(aligned_ptr) + orig_size);
206 if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
207 ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
208 orig_size, tail_magic);
209 }
210
211 meta->requested_size = 0;
212 meta->alloc_context_id = 0;
213 // This memory will not be reused by anyone else, so we are free to keep it
214 // poisoned.
215 Thread *t = GetCurrentThread();
216 if (flags()->max_free_fill_size > 0) {
217 uptr fill_size =
218 Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
219 internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
220 }
221 if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
222 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
223 TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
224 t ? t->GenerateRandomTag() : kFallbackFreeTag);
225 if (t) {
226 allocator.Deallocate(t->allocator_cache(), aligned_ptr);
227 if (auto *ha = t->heap_allocations())
228 ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
229 free_context_id, static_cast<u32>(orig_size)});
230 } else {
231 SpinMutexLock l(&fallback_mutex);
232 AllocatorCache *cache = &fallback_allocator_cache;
233 allocator.Deallocate(cache, aligned_ptr);
234 }
235 }
236
HwasanReallocate(StackTrace * stack,void * tagged_ptr_old,uptr new_size,uptr alignment)237 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
238 uptr new_size, uptr alignment) {
239 if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
240 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
241
242 void *tagged_ptr_new =
243 HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
244 if (tagged_ptr_old && tagged_ptr_new) {
245 void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
246 Metadata *meta =
247 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
248 internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
249 Min(new_size, static_cast<uptr>(meta->requested_size)));
250 HwasanDeallocate(stack, tagged_ptr_old);
251 }
252 return tagged_ptr_new;
253 }
254
HwasanCalloc(StackTrace * stack,uptr nmemb,uptr size)255 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
256 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
257 if (AllocatorMayReturnNull())
258 return nullptr;
259 ReportCallocOverflow(nmemb, size, stack);
260 }
261 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
262 }
263
FindHeapChunkByAddress(uptr address)264 HwasanChunkView FindHeapChunkByAddress(uptr address) {
265 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
266 if (!block)
267 return HwasanChunkView();
268 Metadata *metadata =
269 reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
270 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
271 }
272
AllocationSize(const void * tagged_ptr)273 static uptr AllocationSize(const void *tagged_ptr) {
274 const void *untagged_ptr = UntagPtr(tagged_ptr);
275 if (!untagged_ptr) return 0;
276 const void *beg = allocator.GetBlockBegin(untagged_ptr);
277 Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
278 if (b->right_aligned) {
279 if (beg != reinterpret_cast<void *>(RoundDownTo(
280 reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
281 return 0;
282 } else {
283 if (beg != untagged_ptr) return 0;
284 }
285 return b->requested_size;
286 }
287
hwasan_malloc(uptr size,StackTrace * stack)288 void *hwasan_malloc(uptr size, StackTrace *stack) {
289 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
290 }
291
hwasan_calloc(uptr nmemb,uptr size,StackTrace * stack)292 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
293 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
294 }
295
hwasan_realloc(void * ptr,uptr size,StackTrace * stack)296 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
297 if (!ptr)
298 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
299 if (size == 0) {
300 HwasanDeallocate(stack, ptr);
301 return nullptr;
302 }
303 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
304 }
305
hwasan_reallocarray(void * ptr,uptr nmemb,uptr size,StackTrace * stack)306 void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
307 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
308 errno = errno_ENOMEM;
309 if (AllocatorMayReturnNull())
310 return nullptr;
311 ReportReallocArrayOverflow(nmemb, size, stack);
312 }
313 return hwasan_realloc(ptr, nmemb * size, stack);
314 }
315
hwasan_valloc(uptr size,StackTrace * stack)316 void *hwasan_valloc(uptr size, StackTrace *stack) {
317 return SetErrnoOnNull(
318 HwasanAllocate(stack, size, GetPageSizeCached(), false));
319 }
320
hwasan_pvalloc(uptr size,StackTrace * stack)321 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
322 uptr PageSize = GetPageSizeCached();
323 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
324 errno = errno_ENOMEM;
325 if (AllocatorMayReturnNull())
326 return nullptr;
327 ReportPvallocOverflow(size, stack);
328 }
329 // pvalloc(0) should allocate one page.
330 size = size ? RoundUpTo(size, PageSize) : PageSize;
331 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
332 }
333
hwasan_aligned_alloc(uptr alignment,uptr size,StackTrace * stack)334 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
335 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
336 errno = errno_EINVAL;
337 if (AllocatorMayReturnNull())
338 return nullptr;
339 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
340 }
341 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
342 }
343
hwasan_memalign(uptr alignment,uptr size,StackTrace * stack)344 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
345 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
346 errno = errno_EINVAL;
347 if (AllocatorMayReturnNull())
348 return nullptr;
349 ReportInvalidAllocationAlignment(alignment, stack);
350 }
351 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
352 }
353
hwasan_posix_memalign(void ** memptr,uptr alignment,uptr size,StackTrace * stack)354 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
355 StackTrace *stack) {
356 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
357 if (AllocatorMayReturnNull())
358 return errno_EINVAL;
359 ReportInvalidPosixMemalignAlignment(alignment, stack);
360 }
361 void *ptr = HwasanAllocate(stack, size, alignment, false);
362 if (UNLIKELY(!ptr))
363 // OOM error is already taken care of by HwasanAllocate.
364 return errno_ENOMEM;
365 CHECK(IsAligned((uptr)ptr, alignment));
366 *(void **)UntagPtr(memptr) = ptr;
367 return 0;
368 }
369
hwasan_free(void * ptr,StackTrace * stack)370 void hwasan_free(void *ptr, StackTrace *stack) {
371 return HwasanDeallocate(stack, ptr);
372 }
373
374 } // namespace __hwasan
375
376 using namespace __hwasan;
377
__hwasan_enable_allocator_tagging()378 void __hwasan_enable_allocator_tagging() {
379 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
380 }
381
__hwasan_disable_allocator_tagging()382 void __hwasan_disable_allocator_tagging() {
383 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
384 }
385
__sanitizer_get_current_allocated_bytes()386 uptr __sanitizer_get_current_allocated_bytes() {
387 uptr stats[AllocatorStatCount];
388 allocator.GetStats(stats);
389 return stats[AllocatorStatAllocated];
390 }
391
__sanitizer_get_heap_size()392 uptr __sanitizer_get_heap_size() {
393 uptr stats[AllocatorStatCount];
394 allocator.GetStats(stats);
395 return stats[AllocatorStatMapped];
396 }
397
__sanitizer_get_free_bytes()398 uptr __sanitizer_get_free_bytes() { return 1; }
399
__sanitizer_get_unmapped_bytes()400 uptr __sanitizer_get_unmapped_bytes() { return 1; }
401
__sanitizer_get_estimated_allocated_size(uptr size)402 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
403
__sanitizer_get_ownership(const void * p)404 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
405
__sanitizer_get_allocated_size(const void * p)406 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
407