1 //===-- sanitizer_allocator.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 // This allocator is used inside run-times.
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_allocator.h"
15
16 #include "sanitizer_allocator_checks.h"
17 #include "sanitizer_allocator_internal.h"
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_common.h"
20 #include "sanitizer_platform.h"
21
22 namespace __sanitizer {
23
24 // Default allocator names.
25 const char *PrimaryAllocatorName = "SizeClassAllocator";
26 const char *SecondaryAllocatorName = "LargeMmapAllocator";
27
28 alignas(64) static char internal_alloc_placeholder[sizeof(InternalAllocator)];
29 static atomic_uint8_t internal_allocator_initialized;
30 static StaticSpinMutex internal_alloc_init_mu;
31
32 static InternalAllocatorCache internal_allocator_cache;
33 static StaticSpinMutex internal_allocator_cache_mu;
34
internal_allocator()35 InternalAllocator *internal_allocator() {
36 InternalAllocator *internal_allocator_instance =
37 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
38 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
39 SpinMutexLock l(&internal_alloc_init_mu);
40 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
41 0) {
42 internal_allocator_instance->Init(kReleaseToOSIntervalNever);
43 atomic_store(&internal_allocator_initialized, 1, memory_order_release);
44 }
45 }
46 return internal_allocator_instance;
47 }
48
RawInternalAlloc(uptr size,InternalAllocatorCache * cache,uptr alignment)49 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
50 uptr alignment) {
51 if (alignment == 0) alignment = 8;
52 if (cache == 0) {
53 SpinMutexLock l(&internal_allocator_cache_mu);
54 return internal_allocator()->Allocate(&internal_allocator_cache, size,
55 alignment);
56 }
57 return internal_allocator()->Allocate(cache, size, alignment);
58 }
59
RawInternalRealloc(void * ptr,uptr size,InternalAllocatorCache * cache)60 static void *RawInternalRealloc(void *ptr, uptr size,
61 InternalAllocatorCache *cache) {
62 uptr alignment = 8;
63 if (cache == 0) {
64 SpinMutexLock l(&internal_allocator_cache_mu);
65 return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
66 size, alignment);
67 }
68 return internal_allocator()->Reallocate(cache, ptr, size, alignment);
69 }
70
RawInternalFree(void * ptr,InternalAllocatorCache * cache)71 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
72 if (!cache) {
73 SpinMutexLock l(&internal_allocator_cache_mu);
74 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
75 }
76 internal_allocator()->Deallocate(cache, ptr);
77 }
78
ReportInternalAllocatorOutOfMemory(uptr requested_size)79 static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
80 SetAllocatorOutOfMemory();
81 Report("FATAL: %s: internal allocator is out of memory trying to allocate "
82 "0x%zx bytes\n", SanitizerToolName, requested_size);
83 Die();
84 }
85
InternalAlloc(uptr size,InternalAllocatorCache * cache,uptr alignment)86 void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
87 void *p = RawInternalAlloc(size, cache, alignment);
88 if (UNLIKELY(!p))
89 ReportInternalAllocatorOutOfMemory(size);
90 return p;
91 }
92
InternalRealloc(void * addr,uptr size,InternalAllocatorCache * cache)93 void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
94 void *p = RawInternalRealloc(addr, size, cache);
95 if (UNLIKELY(!p))
96 ReportInternalAllocatorOutOfMemory(size);
97 return p;
98 }
99
InternalReallocArray(void * addr,uptr count,uptr size,InternalAllocatorCache * cache)100 void *InternalReallocArray(void *addr, uptr count, uptr size,
101 InternalAllocatorCache *cache) {
102 if (UNLIKELY(CheckForCallocOverflow(count, size))) {
103 Report(
104 "FATAL: %s: reallocarray parameters overflow: count * size (%zd * %zd) "
105 "cannot be represented in type size_t\n",
106 SanitizerToolName, count, size);
107 Die();
108 }
109 return InternalRealloc(addr, count * size, cache);
110 }
111
InternalCalloc(uptr count,uptr size,InternalAllocatorCache * cache)112 void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
113 if (UNLIKELY(CheckForCallocOverflow(count, size))) {
114 Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
115 "cannot be represented in type size_t\n", SanitizerToolName, count,
116 size);
117 Die();
118 }
119 void *p = InternalAlloc(count * size, cache);
120 if (LIKELY(p))
121 internal_memset(p, 0, count * size);
122 return p;
123 }
124
InternalFree(void * addr,InternalAllocatorCache * cache)125 void InternalFree(void *addr, InternalAllocatorCache *cache) {
126 RawInternalFree(addr, cache);
127 }
128
InternalAllocatorLock()129 void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
130 internal_allocator_cache_mu.Lock();
131 internal_allocator()->ForceLock();
132 }
133
InternalAllocatorUnlock()134 void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
135 internal_allocator()->ForceUnlock();
136 internal_allocator_cache_mu.Unlock();
137 }
138
139 // LowLevelAllocator
140 constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
141 constexpr uptr kMinNumPagesRounded = 16;
142 constexpr uptr kMinRoundedSize = 65536;
143 static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
144 static LowLevelAllocateCallback low_level_alloc_callback;
145
146 static LowLevelAllocator Alloc;
GetGlobalLowLevelAllocator()147 LowLevelAllocator &GetGlobalLowLevelAllocator() { return Alloc; }
148
Allocate(uptr size)149 void *LowLevelAllocator::Allocate(uptr size) {
150 // Align allocation size.
151 size = RoundUpTo(size, low_level_alloc_min_alignment);
152 if (allocated_end_ - allocated_current_ < (sptr)size) {
153 uptr size_to_allocate = RoundUpTo(
154 size, Min(GetPageSizeCached() * kMinNumPagesRounded, kMinRoundedSize));
155 allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
156 allocated_end_ = allocated_current_ + size_to_allocate;
157 if (low_level_alloc_callback) {
158 low_level_alloc_callback((uptr)allocated_current_, size_to_allocate);
159 }
160 }
161 CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
162 void *res = allocated_current_;
163 allocated_current_ += size;
164 return res;
165 }
166
SetLowLevelAllocateMinAlignment(uptr alignment)167 void SetLowLevelAllocateMinAlignment(uptr alignment) {
168 CHECK(IsPowerOfTwo(alignment));
169 low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
170 }
171
SetLowLevelAllocateCallback(LowLevelAllocateCallback callback)172 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
173 low_level_alloc_callback = callback;
174 }
175
176 // Allocator's OOM and other errors handling support.
177
178 static atomic_uint8_t allocator_out_of_memory = {0};
179 static atomic_uint8_t allocator_may_return_null = {0};
180
IsAllocatorOutOfMemory()181 bool IsAllocatorOutOfMemory() {
182 return atomic_load_relaxed(&allocator_out_of_memory);
183 }
184
SetAllocatorOutOfMemory()185 void SetAllocatorOutOfMemory() {
186 atomic_store_relaxed(&allocator_out_of_memory, 1);
187 }
188
AllocatorMayReturnNull()189 bool AllocatorMayReturnNull() {
190 return atomic_load(&allocator_may_return_null, memory_order_relaxed);
191 }
192
SetAllocatorMayReturnNull(bool may_return_null)193 void SetAllocatorMayReturnNull(bool may_return_null) {
194 atomic_store(&allocator_may_return_null, may_return_null,
195 memory_order_relaxed);
196 }
197
PrintHintAllocatorCannotReturnNull()198 void PrintHintAllocatorCannotReturnNull() {
199 Report("HINT: if you don't care about these errors you may set "
200 "allocator_may_return_null=1\n");
201 }
202
203 static atomic_uint8_t rss_limit_exceeded;
204
IsRssLimitExceeded()205 bool IsRssLimitExceeded() {
206 return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
207 }
208
SetRssLimitExceeded(bool limit_exceeded)209 void SetRssLimitExceeded(bool limit_exceeded) {
210 atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
211 }
212
213 } // namespace __sanitizer
214