1 //===-- msan_allocator.cc --------------------------- ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of MemorySanitizer.
11 //
12 // MemorySanitizer allocator.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_checks.h"
17 #include "sanitizer_common/sanitizer_allocator_interface.h"
18 #include "sanitizer_common/sanitizer_allocator_report.h"
19 #include "sanitizer_common/sanitizer_errno.h"
20 #include "msan.h"
21 #include "msan_allocator.h"
22 #include "msan_origin.h"
23 #include "msan_thread.h"
24 #include "msan_poisoning.h"
25
26 namespace __msan {
27
28 struct Metadata {
29 uptr requested_size;
30 };
31
32 struct MsanMapUnmapCallback {
OnMap__msan::MsanMapUnmapCallback33 void OnMap(uptr p, uptr size) const {}
OnUnmap__msan::MsanMapUnmapCallback34 void OnUnmap(uptr p, uptr size) const {
35 __msan_unpoison((void *)p, size);
36
37 // We are about to unmap a chunk of user memory.
38 // Mark the corresponding shadow memory as not needed.
39 uptr shadow_p = MEM_TO_SHADOW(p);
40 ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
41 if (__msan_get_track_origins()) {
42 uptr origin_p = MEM_TO_ORIGIN(p);
43 ReleaseMemoryPagesToOS(origin_p, origin_p + size);
44 }
45 }
46 };
47
48 #if defined(__mips64)
49 static const uptr kMaxAllowedMallocSize = 2UL << 30;
50 static const uptr kRegionSizeLog = 20;
51 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
52 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
53
54 struct AP32 {
55 static const uptr kSpaceBeg = 0;
56 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
57 static const uptr kMetadataSize = sizeof(Metadata);
58 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
59 static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
60 using AddressSpaceView = LocalAddressSpaceView;
61 using ByteMap = __msan::ByteMap;
62 typedef MsanMapUnmapCallback MapUnmapCallback;
63 static const uptr kFlags = 0;
64 };
65 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
66 #elif defined(__x86_64__)
67 #if SANITIZER_NETBSD || \
68 (SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING))
69 static const uptr kAllocatorSpace = 0x700000000000ULL;
70 #else
71 static const uptr kAllocatorSpace = 0x600000000000ULL;
72 #endif
73 static const uptr kMaxAllowedMallocSize = 8UL << 30;
74
75 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
76 static const uptr kSpaceBeg = kAllocatorSpace;
77 static const uptr kSpaceSize = 0x40000000000; // 4T.
78 static const uptr kMetadataSize = sizeof(Metadata);
79 typedef DefaultSizeClassMap SizeClassMap;
80 typedef MsanMapUnmapCallback MapUnmapCallback;
81 static const uptr kFlags = 0;
82 using AddressSpaceView = LocalAddressSpaceView;
83 };
84
85 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
86
87 #elif defined(__powerpc64__)
88 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
89
90 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
91 static const uptr kSpaceBeg = 0x300000000000;
92 static const uptr kSpaceSize = 0x020000000000; // 2T.
93 static const uptr kMetadataSize = sizeof(Metadata);
94 typedef DefaultSizeClassMap SizeClassMap;
95 typedef MsanMapUnmapCallback MapUnmapCallback;
96 static const uptr kFlags = 0;
97 using AddressSpaceView = LocalAddressSpaceView;
98 };
99
100 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
101 #elif defined(__aarch64__)
102 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
103 static const uptr kRegionSizeLog = 20;
104 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
105 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
106
107 struct AP32 {
108 static const uptr kSpaceBeg = 0;
109 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
110 static const uptr kMetadataSize = sizeof(Metadata);
111 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
112 static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
113 using AddressSpaceView = LocalAddressSpaceView;
114 using ByteMap = __msan::ByteMap;
115 typedef MsanMapUnmapCallback MapUnmapCallback;
116 static const uptr kFlags = 0;
117 };
118 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
119 #endif
120 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
121 typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
122 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
123 SecondaryAllocator> Allocator;
124
125 static Allocator allocator;
126 static AllocatorCache fallback_allocator_cache;
127 static StaticSpinMutex fallback_mutex;
128
MsanAllocatorInit()129 void MsanAllocatorInit() {
130 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
131 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
132 }
133
GetAllocatorCache(MsanThreadLocalMallocStorage * ms)134 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
135 CHECK(ms);
136 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
137 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
138 }
139
CommitBack()140 void MsanThreadLocalMallocStorage::CommitBack() {
141 allocator.SwallowCache(GetAllocatorCache(this));
142 }
143
MsanAllocate(StackTrace * stack,uptr size,uptr alignment,bool zeroise)144 static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
145 bool zeroise) {
146 if (size > kMaxAllowedMallocSize) {
147 if (AllocatorMayReturnNull()) {
148 Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
149 return nullptr;
150 }
151 ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, stack);
152 }
153 MsanThread *t = GetCurrentThread();
154 void *allocated;
155 if (t) {
156 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
157 allocated = allocator.Allocate(cache, size, alignment);
158 } else {
159 SpinMutexLock l(&fallback_mutex);
160 AllocatorCache *cache = &fallback_allocator_cache;
161 allocated = allocator.Allocate(cache, size, alignment);
162 }
163 if (UNLIKELY(!allocated)) {
164 SetAllocatorOutOfMemory();
165 if (AllocatorMayReturnNull())
166 return nullptr;
167 ReportOutOfMemory(size, stack);
168 }
169 Metadata *meta =
170 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
171 meta->requested_size = size;
172 if (zeroise) {
173 __msan_clear_and_unpoison(allocated, size);
174 } else if (flags()->poison_in_malloc) {
175 __msan_poison(allocated, size);
176 if (__msan_get_track_origins()) {
177 stack->tag = StackTrace::TAG_ALLOC;
178 Origin o = Origin::CreateHeapOrigin(stack);
179 __msan_set_origin(allocated, size, o.raw_id());
180 }
181 }
182 MSAN_MALLOC_HOOK(allocated, size);
183 return allocated;
184 }
185
MsanDeallocate(StackTrace * stack,void * p)186 void MsanDeallocate(StackTrace *stack, void *p) {
187 CHECK(p);
188 MSAN_FREE_HOOK(p);
189 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
190 uptr size = meta->requested_size;
191 meta->requested_size = 0;
192 // This memory will not be reused by anyone else, so we are free to keep it
193 // poisoned.
194 if (flags()->poison_in_free) {
195 __msan_poison(p, size);
196 if (__msan_get_track_origins()) {
197 stack->tag = StackTrace::TAG_DEALLOC;
198 Origin o = Origin::CreateHeapOrigin(stack);
199 __msan_set_origin(p, size, o.raw_id());
200 }
201 }
202 MsanThread *t = GetCurrentThread();
203 if (t) {
204 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
205 allocator.Deallocate(cache, p);
206 } else {
207 SpinMutexLock l(&fallback_mutex);
208 AllocatorCache *cache = &fallback_allocator_cache;
209 allocator.Deallocate(cache, p);
210 }
211 }
212
MsanReallocate(StackTrace * stack,void * old_p,uptr new_size,uptr alignment)213 void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
214 uptr alignment) {
215 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
216 uptr old_size = meta->requested_size;
217 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
218 if (new_size <= actually_allocated_size) {
219 // We are not reallocating here.
220 meta->requested_size = new_size;
221 if (new_size > old_size) {
222 if (flags()->poison_in_malloc) {
223 stack->tag = StackTrace::TAG_ALLOC;
224 PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
225 }
226 }
227 return old_p;
228 }
229 uptr memcpy_size = Min(new_size, old_size);
230 void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
231 if (new_p) {
232 CopyMemory(new_p, old_p, memcpy_size, stack);
233 MsanDeallocate(stack, old_p);
234 }
235 return new_p;
236 }
237
MsanCalloc(StackTrace * stack,uptr nmemb,uptr size)238 void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
239 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
240 if (AllocatorMayReturnNull())
241 return nullptr;
242 ReportCallocOverflow(nmemb, size, stack);
243 }
244 return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
245 }
246
AllocationSize(const void * p)247 static uptr AllocationSize(const void *p) {
248 if (!p) return 0;
249 const void *beg = allocator.GetBlockBegin(p);
250 if (beg != p) return 0;
251 Metadata *b = (Metadata *)allocator.GetMetaData(p);
252 return b->requested_size;
253 }
254
msan_malloc(uptr size,StackTrace * stack)255 void *msan_malloc(uptr size, StackTrace *stack) {
256 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
257 }
258
msan_calloc(uptr nmemb,uptr size,StackTrace * stack)259 void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
260 return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
261 }
262
msan_realloc(void * ptr,uptr size,StackTrace * stack)263 void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
264 if (!ptr)
265 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
266 if (size == 0) {
267 MsanDeallocate(stack, ptr);
268 return nullptr;
269 }
270 return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
271 }
272
msan_valloc(uptr size,StackTrace * stack)273 void *msan_valloc(uptr size, StackTrace *stack) {
274 return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
275 }
276
msan_pvalloc(uptr size,StackTrace * stack)277 void *msan_pvalloc(uptr size, StackTrace *stack) {
278 uptr PageSize = GetPageSizeCached();
279 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
280 errno = errno_ENOMEM;
281 if (AllocatorMayReturnNull())
282 return nullptr;
283 ReportPvallocOverflow(size, stack);
284 }
285 // pvalloc(0) should allocate one page.
286 size = size ? RoundUpTo(size, PageSize) : PageSize;
287 return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
288 }
289
msan_aligned_alloc(uptr alignment,uptr size,StackTrace * stack)290 void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
291 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
292 errno = errno_EINVAL;
293 if (AllocatorMayReturnNull())
294 return nullptr;
295 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
296 }
297 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
298 }
299
msan_memalign(uptr alignment,uptr size,StackTrace * stack)300 void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
301 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
302 errno = errno_EINVAL;
303 if (AllocatorMayReturnNull())
304 return nullptr;
305 ReportInvalidAllocationAlignment(alignment, stack);
306 }
307 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
308 }
309
msan_posix_memalign(void ** memptr,uptr alignment,uptr size,StackTrace * stack)310 int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
311 StackTrace *stack) {
312 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
313 if (AllocatorMayReturnNull())
314 return errno_EINVAL;
315 ReportInvalidPosixMemalignAlignment(alignment, stack);
316 }
317 void *ptr = MsanAllocate(stack, size, alignment, false);
318 if (UNLIKELY(!ptr))
319 // OOM error is already taken care of by MsanAllocate.
320 return errno_ENOMEM;
321 CHECK(IsAligned((uptr)ptr, alignment));
322 *memptr = ptr;
323 return 0;
324 }
325
326 } // namespace __msan
327
328 using namespace __msan;
329
__sanitizer_get_current_allocated_bytes()330 uptr __sanitizer_get_current_allocated_bytes() {
331 uptr stats[AllocatorStatCount];
332 allocator.GetStats(stats);
333 return stats[AllocatorStatAllocated];
334 }
335
__sanitizer_get_heap_size()336 uptr __sanitizer_get_heap_size() {
337 uptr stats[AllocatorStatCount];
338 allocator.GetStats(stats);
339 return stats[AllocatorStatMapped];
340 }
341
__sanitizer_get_free_bytes()342 uptr __sanitizer_get_free_bytes() { return 1; }
343
__sanitizer_get_unmapped_bytes()344 uptr __sanitizer_get_unmapped_bytes() { return 1; }
345
__sanitizer_get_estimated_allocated_size(uptr size)346 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
347
__sanitizer_get_ownership(const void * p)348 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
349
__sanitizer_get_allocated_size(const void * p)350 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
351