1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "gwp_asan/guarded_pool_allocator.h"
10
11 #include "gwp_asan/options.h"
12 #include "gwp_asan/utilities.h"
13
14 #include <assert.h>
15 #include <stddef.h>
16
17 using AllocationMetadata = gwp_asan::AllocationMetadata;
18 using Error = gwp_asan::Error;
19
20 namespace gwp_asan {
21 namespace {
22 // Forward declare the pointer to the singleton version of this class.
23 // Instantiated during initialisation, this allows the signal handler
24 // to find this class in order to deduce the root cause of failures. Must not be
25 // referenced by users outside this translation unit, in order to avoid
26 // init-order-fiasco.
27 GuardedPoolAllocator *SingletonPtr = nullptr;
28
roundUpTo(size_t Size,size_t Boundary)29 size_t roundUpTo(size_t Size, size_t Boundary) {
30 return (Size + Boundary - 1) & ~(Boundary - 1);
31 }
32
getPageAddr(uintptr_t Ptr,uintptr_t PageSize)33 uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
34 return Ptr & ~(PageSize - 1);
35 }
36
isPowerOfTwo(uintptr_t X)37 bool isPowerOfTwo(uintptr_t X) { return (X & (X - 1)) == 0; }
38 } // anonymous namespace
39
40 // Gets the singleton implementation of this class. Thread-compatible until
41 // init() is called, thread-safe afterwards.
getSingleton()42 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() {
43 return SingletonPtr;
44 }
45
init(const options::Options & Opts)46 void GuardedPoolAllocator::init(const options::Options &Opts) {
47 // Note: We return from the constructor here if GWP-ASan is not available.
48 // This will stop heap-allocation of class members, as well as mmap() of the
49 // guarded slots.
50 if (!Opts.Enabled || Opts.SampleRate == 0 ||
51 Opts.MaxSimultaneousAllocations == 0)
52 return;
53
54 Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0.");
55 Check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30.");
56 Check(Opts.MaxSimultaneousAllocations >= 0,
57 "GWP-ASan Error: MaxSimultaneousAllocations is < 0.");
58
59 SingletonPtr = this;
60 Backtrace = Opts.Backtrace;
61
62 State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
63
64 const size_t PageSize = getPlatformPageSize();
65 // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
66 assert((PageSize & (PageSize - 1)) == 0);
67 State.PageSize = PageSize;
68
69 size_t PoolBytesRequired =
70 PageSize * (1 + State.MaxSimultaneousAllocations) +
71 State.MaxSimultaneousAllocations * State.maximumAllocationSize();
72 assert(PoolBytesRequired % PageSize == 0);
73 void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
74
75 size_t BytesRequired =
76 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
77 Metadata = reinterpret_cast<AllocationMetadata *>(
78 map(BytesRequired, kGwpAsanMetadataName));
79
80 // Allocate memory and set up the free pages queue.
81 BytesRequired = roundUpTo(
82 State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
83 FreeSlots =
84 reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
85
86 // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
87 // SampleRate) chance of sampling.
88 if (Opts.SampleRate != 1)
89 AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1;
90 else
91 AdjustedSampleRatePlusOne = 2;
92
93 initPRNG();
94 getThreadLocals()->NextSampleCounter =
95 ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
96 ThreadLocalPackedVariables::NextSampleCounterMask;
97
98 State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
99 State.GuardedPagePoolEnd =
100 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
101
102 if (Opts.InstallForkHandlers)
103 installAtFork();
104 }
105
disable()106 void GuardedPoolAllocator::disable() {
107 PoolMutex.lock();
108 BacktraceMutex.lock();
109 }
110
enable()111 void GuardedPoolAllocator::enable() {
112 PoolMutex.unlock();
113 BacktraceMutex.unlock();
114 }
115
iterate(void * Base,size_t Size,iterate_callback Cb,void * Arg)116 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
117 void *Arg) {
118 uintptr_t Start = reinterpret_cast<uintptr_t>(Base);
119 for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) {
120 const AllocationMetadata &Meta = Metadata[i];
121 if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
122 Meta.Addr < Start + Size)
123 Cb(Meta.Addr, Meta.RequestedSize, Arg);
124 }
125 }
126
uninitTestOnly()127 void GuardedPoolAllocator::uninitTestOnly() {
128 if (State.GuardedPagePool) {
129 unreserveGuardedPool();
130 State.GuardedPagePool = 0;
131 State.GuardedPagePoolEnd = 0;
132 }
133 if (Metadata) {
134 unmap(Metadata,
135 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
136 State.PageSize));
137 Metadata = nullptr;
138 }
139 if (FreeSlots) {
140 unmap(FreeSlots,
141 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
142 State.PageSize));
143 FreeSlots = nullptr;
144 }
145 *getThreadLocals() = ThreadLocalPackedVariables();
146 }
147
148 // Note, minimum backing allocation size in GWP-ASan is always one page, and
149 // each slot could potentially be multiple pages (but always in
150 // page-increments). Thus, for anything that requires less than page size
151 // alignment, we don't need to allocate extra padding to ensure the alignment
152 // can be met.
getRequiredBackingSize(size_t Size,size_t Alignment,size_t PageSize)153 size_t GuardedPoolAllocator::getRequiredBackingSize(size_t Size,
154 size_t Alignment,
155 size_t PageSize) {
156 assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
157 assert(Alignment != 0 && "Alignment should be non-zero");
158 assert(Size != 0 && "Size should be non-zero");
159
160 if (Alignment <= PageSize)
161 return Size;
162
163 return Size + Alignment - PageSize;
164 }
165
alignUp(uintptr_t Ptr,size_t Alignment)166 uintptr_t GuardedPoolAllocator::alignUp(uintptr_t Ptr, size_t Alignment) {
167 assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
168 assert(Alignment != 0 && "Alignment should be non-zero");
169 if ((Ptr & (Alignment - 1)) == 0)
170 return Ptr;
171
172 Ptr += Alignment - (Ptr & (Alignment - 1));
173 return Ptr;
174 }
175
alignDown(uintptr_t Ptr,size_t Alignment)176 uintptr_t GuardedPoolAllocator::alignDown(uintptr_t Ptr, size_t Alignment) {
177 assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
178 assert(Alignment != 0 && "Alignment should be non-zero");
179 if ((Ptr & (Alignment - 1)) == 0)
180 return Ptr;
181
182 Ptr -= Ptr & (Alignment - 1);
183 return Ptr;
184 }
185
allocate(size_t Size,size_t Alignment)186 void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
187 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
188 // back to the supporting allocator.
189 if (State.GuardedPagePoolEnd == 0) {
190 getThreadLocals()->NextSampleCounter =
191 (AdjustedSampleRatePlusOne - 1) &
192 ThreadLocalPackedVariables::NextSampleCounterMask;
193 return nullptr;
194 }
195
196 if (Size == 0)
197 Size = 1;
198 if (Alignment == 0)
199 Alignment = alignof(max_align_t);
200
201 if (!isPowerOfTwo(Alignment) || Alignment > State.maximumAllocationSize() ||
202 Size > State.maximumAllocationSize())
203 return nullptr;
204
205 size_t BackingSize = getRequiredBackingSize(Size, Alignment, State.PageSize);
206 if (BackingSize > State.maximumAllocationSize())
207 return nullptr;
208
209 // Protect against recursivity.
210 if (getThreadLocals()->RecursiveGuard)
211 return nullptr;
212 ScopedRecursiveGuard SRG;
213
214 size_t Index;
215 {
216 ScopedLock L(PoolMutex);
217 Index = reserveSlot();
218 }
219
220 if (Index == kInvalidSlotID)
221 return nullptr;
222
223 uintptr_t SlotStart = State.slotToAddr(Index);
224 AllocationMetadata *Meta = addrToMetadata(SlotStart);
225 uintptr_t SlotEnd = State.slotToAddr(Index) + State.maximumAllocationSize();
226 uintptr_t UserPtr;
227 // Randomly choose whether to left-align or right-align the allocation, and
228 // then apply the necessary adjustments to get an aligned pointer.
229 if (getRandomUnsigned32() % 2 == 0)
230 UserPtr = alignUp(SlotStart, Alignment);
231 else
232 UserPtr = alignDown(SlotEnd - Size, Alignment);
233
234 assert(UserPtr >= SlotStart);
235 assert(UserPtr + Size <= SlotEnd);
236
237 // If a slot is multiple pages in size, and the allocation takes up a single
238 // page, we can improve overflow detection by leaving the unused pages as
239 // unmapped.
240 const size_t PageSize = State.PageSize;
241 allocateInGuardedPool(
242 reinterpret_cast<void *>(getPageAddr(UserPtr, PageSize)),
243 roundUpTo(Size, PageSize));
244
245 Meta->RecordAllocation(UserPtr, Size);
246 {
247 ScopedLock UL(BacktraceMutex);
248 Meta->AllocationTrace.RecordBacktrace(Backtrace);
249 }
250
251 return reinterpret_cast<void *>(UserPtr);
252 }
253
trapOnAddress(uintptr_t Address,Error E)254 void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
255 State.FailureType = E;
256 State.FailureAddress = Address;
257
258 // Raise a SEGV by touching first guard page.
259 volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool);
260 *p = 0;
261 // Normally, would be __builtin_unreachable(), but because of
262 // https://bugs.llvm.org/show_bug.cgi?id=47480, unreachable will DCE the
263 // volatile store above, even though it has side effects.
264 __builtin_trap();
265 }
266
stop()267 void GuardedPoolAllocator::stop() {
268 getThreadLocals()->RecursiveGuard = true;
269 PoolMutex.tryLock();
270 }
271
deallocate(void * Ptr)272 void GuardedPoolAllocator::deallocate(void *Ptr) {
273 assert(pointerIsMine(Ptr) && "Pointer is not mine!");
274 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
275 size_t Slot = State.getNearestSlot(UPtr);
276 uintptr_t SlotStart = State.slotToAddr(Slot);
277 AllocationMetadata *Meta = addrToMetadata(UPtr);
278 if (Meta->Addr != UPtr) {
279 // If multiple errors occur at the same time, use the first one.
280 ScopedLock L(PoolMutex);
281 trapOnAddress(UPtr, Error::INVALID_FREE);
282 }
283
284 // Intentionally scope the mutex here, so that other threads can access the
285 // pool during the expensive markInaccessible() call.
286 {
287 ScopedLock L(PoolMutex);
288 if (Meta->IsDeallocated) {
289 trapOnAddress(UPtr, Error::DOUBLE_FREE);
290 }
291
292 // Ensure that the deallocation is recorded before marking the page as
293 // inaccessible. Otherwise, a racy use-after-free will have inconsistent
294 // metadata.
295 Meta->RecordDeallocation();
296
297 // Ensure that the unwinder is not called if the recursive flag is set,
298 // otherwise non-reentrant unwinders may deadlock.
299 if (!getThreadLocals()->RecursiveGuard) {
300 ScopedRecursiveGuard SRG;
301 ScopedLock UL(BacktraceMutex);
302 Meta->DeallocationTrace.RecordBacktrace(Backtrace);
303 }
304 }
305
306 deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
307 State.maximumAllocationSize());
308
309 // And finally, lock again to release the slot back into the pool.
310 ScopedLock L(PoolMutex);
311 freeSlot(Slot);
312 }
313
getSize(const void * Ptr)314 size_t GuardedPoolAllocator::getSize(const void *Ptr) {
315 assert(pointerIsMine(Ptr));
316 ScopedLock L(PoolMutex);
317 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
318 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
319 return Meta->RequestedSize;
320 }
321
addrToMetadata(uintptr_t Ptr) const322 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
323 return &Metadata[State.getNearestSlot(Ptr)];
324 }
325
reserveSlot()326 size_t GuardedPoolAllocator::reserveSlot() {
327 // Avoid potential reuse of a slot before we have made at least a single
328 // allocation in each slot. Helps with our use-after-free detection.
329 if (NumSampledAllocations < State.MaxSimultaneousAllocations)
330 return NumSampledAllocations++;
331
332 if (FreeSlotsLength == 0)
333 return kInvalidSlotID;
334
335 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
336 size_t SlotIndex = FreeSlots[ReservedIndex];
337 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
338 return SlotIndex;
339 }
340
freeSlot(size_t SlotIndex)341 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
342 assert(FreeSlotsLength < State.MaxSimultaneousAllocations);
343 FreeSlots[FreeSlotsLength++] = SlotIndex;
344 }
345
getRandomUnsigned32()346 uint32_t GuardedPoolAllocator::getRandomUnsigned32() {
347 uint32_t RandomState = getThreadLocals()->RandomState;
348 RandomState ^= RandomState << 13;
349 RandomState ^= RandomState >> 17;
350 RandomState ^= RandomState << 5;
351 getThreadLocals()->RandomState = RandomState;
352 return RandomState;
353 }
354 } // namespace gwp_asan
355