1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "gwp_asan/guarded_pool_allocator.h"
10
11 #include "gwp_asan/options.h"
12 #include "gwp_asan/utilities.h"
13
14 #include <assert.h>
15 #include <stddef.h>
16
17 using AllocationMetadata = gwp_asan::AllocationMetadata;
18 using Error = gwp_asan::Error;
19
20 namespace gwp_asan {
21 namespace {
22 // Forward declare the pointer to the singleton version of this class.
23 // Instantiated during initialisation, this allows the signal handler
24 // to find this class in order to deduce the root cause of failures. Must not be
25 // referenced by users outside this translation unit, in order to avoid
26 // init-order-fiasco.
27 GuardedPoolAllocator *SingletonPtr = nullptr;
28
roundUpTo(size_t Size,size_t Boundary)29 size_t roundUpTo(size_t Size, size_t Boundary) {
30 return (Size + Boundary - 1) & ~(Boundary - 1);
31 }
32
getPageAddr(uintptr_t Ptr,uintptr_t PageSize)33 uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
34 return Ptr & ~(PageSize - 1);
35 }
36
isPowerOfTwo(uintptr_t X)37 bool isPowerOfTwo(uintptr_t X) { return (X & (X - 1)) == 0; }
38 } // anonymous namespace
39
40 // Gets the singleton implementation of this class. Thread-compatible until
41 // init() is called, thread-safe afterwards.
getSingleton()42 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() {
43 return SingletonPtr;
44 }
45
init(const options::Options & Opts)46 void GuardedPoolAllocator::init(const options::Options &Opts) {
47 // Note: We return from the constructor here if GWP-ASan is not available.
48 // This will stop heap-allocation of class members, as well as mmap() of the
49 // guarded slots.
50 if (!Opts.Enabled || Opts.SampleRate == 0 ||
51 Opts.MaxSimultaneousAllocations == 0)
52 return;
53
54 Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0.");
55 Check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30.");
56 Check(Opts.MaxSimultaneousAllocations >= 0,
57 "GWP-ASan Error: MaxSimultaneousAllocations is < 0.");
58
59 SingletonPtr = this;
60 Backtrace = Opts.Backtrace;
61
62 State.VersionMagic = {{AllocatorVersionMagic::kAllocatorVersionMagic[0],
63 AllocatorVersionMagic::kAllocatorVersionMagic[1],
64 AllocatorVersionMagic::kAllocatorVersionMagic[2],
65 AllocatorVersionMagic::kAllocatorVersionMagic[3]},
66 AllocatorVersionMagic::kAllocatorVersion,
67 0};
68
69 State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
70
71 const size_t PageSize = getPlatformPageSize();
72 // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
73 assert((PageSize & (PageSize - 1)) == 0);
74 State.PageSize = PageSize;
75
76 size_t PoolBytesRequired =
77 PageSize * (1 + State.MaxSimultaneousAllocations) +
78 State.MaxSimultaneousAllocations * State.maximumAllocationSize();
79 assert(PoolBytesRequired % PageSize == 0);
80 void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
81
82 size_t BytesRequired =
83 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
84 Metadata = reinterpret_cast<AllocationMetadata *>(
85 map(BytesRequired, kGwpAsanMetadataName));
86
87 // Allocate memory and set up the free pages queue.
88 BytesRequired = roundUpTo(
89 State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
90 FreeSlots =
91 reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
92
93 // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
94 // SampleRate) chance of sampling.
95 if (Opts.SampleRate != 1)
96 AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1;
97 else
98 AdjustedSampleRatePlusOne = 2;
99
100 initPRNG();
101 getThreadLocals()->NextSampleCounter =
102 ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
103 ThreadLocalPackedVariables::NextSampleCounterMask;
104
105 State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
106 State.GuardedPagePoolEnd =
107 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
108
109 if (Opts.InstallForkHandlers)
110 installAtFork();
111 }
112
disable()113 void GuardedPoolAllocator::disable() {
114 PoolMutex.lock();
115 BacktraceMutex.lock();
116 }
117
enable()118 void GuardedPoolAllocator::enable() {
119 PoolMutex.unlock();
120 BacktraceMutex.unlock();
121 }
122
iterate(void * Base,size_t Size,iterate_callback Cb,void * Arg)123 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
124 void *Arg) {
125 uintptr_t Start = reinterpret_cast<uintptr_t>(Base);
126 for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) {
127 const AllocationMetadata &Meta = Metadata[i];
128 if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
129 Meta.Addr < Start + Size)
130 Cb(Meta.Addr, Meta.RequestedSize, Arg);
131 }
132 }
133
uninitTestOnly()134 void GuardedPoolAllocator::uninitTestOnly() {
135 if (State.GuardedPagePool) {
136 unreserveGuardedPool();
137 State.GuardedPagePool = 0;
138 State.GuardedPagePoolEnd = 0;
139 }
140 if (Metadata) {
141 unmap(Metadata,
142 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
143 State.PageSize));
144 Metadata = nullptr;
145 }
146 if (FreeSlots) {
147 unmap(FreeSlots,
148 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
149 State.PageSize));
150 FreeSlots = nullptr;
151 }
152 *getThreadLocals() = ThreadLocalPackedVariables();
153 }
154
155 // Note, minimum backing allocation size in GWP-ASan is always one page, and
156 // each slot could potentially be multiple pages (but always in
157 // page-increments). Thus, for anything that requires less than page size
158 // alignment, we don't need to allocate extra padding to ensure the alignment
159 // can be met.
getRequiredBackingSize(size_t Size,size_t Alignment,size_t PageSize)160 size_t GuardedPoolAllocator::getRequiredBackingSize(size_t Size,
161 size_t Alignment,
162 size_t PageSize) {
163 assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
164 assert(Alignment != 0 && "Alignment should be non-zero");
165 assert(Size != 0 && "Size should be non-zero");
166
167 if (Alignment <= PageSize)
168 return Size;
169
170 return Size + Alignment - PageSize;
171 }
172
alignUp(uintptr_t Ptr,size_t Alignment)173 uintptr_t GuardedPoolAllocator::alignUp(uintptr_t Ptr, size_t Alignment) {
174 assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
175 assert(Alignment != 0 && "Alignment should be non-zero");
176 if ((Ptr & (Alignment - 1)) == 0)
177 return Ptr;
178
179 Ptr += Alignment - (Ptr & (Alignment - 1));
180 return Ptr;
181 }
182
alignDown(uintptr_t Ptr,size_t Alignment)183 uintptr_t GuardedPoolAllocator::alignDown(uintptr_t Ptr, size_t Alignment) {
184 assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
185 assert(Alignment != 0 && "Alignment should be non-zero");
186 if ((Ptr & (Alignment - 1)) == 0)
187 return Ptr;
188
189 Ptr -= Ptr & (Alignment - 1);
190 return Ptr;
191 }
192
allocate(size_t Size,size_t Alignment)193 void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
194 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
195 // back to the supporting allocator.
196 if (State.GuardedPagePoolEnd == 0) {
197 getThreadLocals()->NextSampleCounter =
198 (AdjustedSampleRatePlusOne - 1) &
199 ThreadLocalPackedVariables::NextSampleCounterMask;
200 return nullptr;
201 }
202
203 if (Size == 0)
204 Size = 1;
205 if (Alignment == 0)
206 Alignment = alignof(max_align_t);
207
208 if (!isPowerOfTwo(Alignment) || Alignment > State.maximumAllocationSize() ||
209 Size > State.maximumAllocationSize())
210 return nullptr;
211
212 size_t BackingSize = getRequiredBackingSize(Size, Alignment, State.PageSize);
213 if (BackingSize > State.maximumAllocationSize())
214 return nullptr;
215
216 // Protect against recursivity.
217 if (getThreadLocals()->RecursiveGuard)
218 return nullptr;
219 ScopedRecursiveGuard SRG;
220
221 size_t Index;
222 {
223 ScopedLock L(PoolMutex);
224 Index = reserveSlot();
225 }
226
227 if (Index == kInvalidSlotID)
228 return nullptr;
229
230 uintptr_t SlotStart = State.slotToAddr(Index);
231 AllocationMetadata *Meta = addrToMetadata(SlotStart);
232 uintptr_t SlotEnd = State.slotToAddr(Index) + State.maximumAllocationSize();
233 uintptr_t UserPtr;
234 // Randomly choose whether to left-align or right-align the allocation, and
235 // then apply the necessary adjustments to get an aligned pointer.
236 if (getRandomUnsigned32() % 2 == 0)
237 UserPtr = alignUp(SlotStart, Alignment);
238 else
239 UserPtr = alignDown(SlotEnd - Size, Alignment);
240
241 assert(UserPtr >= SlotStart);
242 assert(UserPtr + Size <= SlotEnd);
243
244 // If a slot is multiple pages in size, and the allocation takes up a single
245 // page, we can improve overflow detection by leaving the unused pages as
246 // unmapped.
247 const size_t PageSize = State.PageSize;
248 allocateInGuardedPool(
249 reinterpret_cast<void *>(getPageAddr(UserPtr, PageSize)),
250 roundUpTo(Size, PageSize));
251
252 Meta->RecordAllocation(UserPtr, Size);
253 {
254 ScopedLock UL(BacktraceMutex);
255 Meta->AllocationTrace.RecordBacktrace(Backtrace);
256 }
257
258 return reinterpret_cast<void *>(UserPtr);
259 }
260
trapOnAddress(uintptr_t Address,Error E)261 void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
262 State.FailureType = E;
263 State.FailureAddress = Address;
264
265 // Raise a SEGV by touching first guard page.
266 volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool);
267 *p = 0;
268 // Normally, would be __builtin_unreachable(), but because of
269 // https://bugs.llvm.org/show_bug.cgi?id=47480, unreachable will DCE the
270 // volatile store above, even though it has side effects.
271 __builtin_trap();
272 }
273
stop()274 void GuardedPoolAllocator::stop() {
275 getThreadLocals()->RecursiveGuard = true;
276 PoolMutex.tryLock();
277 }
278
deallocate(void * Ptr)279 void GuardedPoolAllocator::deallocate(void *Ptr) {
280 assert(pointerIsMine(Ptr) && "Pointer is not mine!");
281 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
282 size_t Slot = State.getNearestSlot(UPtr);
283 uintptr_t SlotStart = State.slotToAddr(Slot);
284 AllocationMetadata *Meta = addrToMetadata(UPtr);
285 if (Meta->Addr != UPtr) {
286 // If multiple errors occur at the same time, use the first one.
287 ScopedLock L(PoolMutex);
288 trapOnAddress(UPtr, Error::INVALID_FREE);
289 }
290
291 // Intentionally scope the mutex here, so that other threads can access the
292 // pool during the expensive markInaccessible() call.
293 {
294 ScopedLock L(PoolMutex);
295 if (Meta->IsDeallocated) {
296 trapOnAddress(UPtr, Error::DOUBLE_FREE);
297 }
298
299 // Ensure that the deallocation is recorded before marking the page as
300 // inaccessible. Otherwise, a racy use-after-free will have inconsistent
301 // metadata.
302 Meta->RecordDeallocation();
303
304 // Ensure that the unwinder is not called if the recursive flag is set,
305 // otherwise non-reentrant unwinders may deadlock.
306 if (!getThreadLocals()->RecursiveGuard) {
307 ScopedRecursiveGuard SRG;
308 ScopedLock UL(BacktraceMutex);
309 Meta->DeallocationTrace.RecordBacktrace(Backtrace);
310 }
311 }
312
313 deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
314 State.maximumAllocationSize());
315
316 // And finally, lock again to release the slot back into the pool.
317 ScopedLock L(PoolMutex);
318 freeSlot(Slot);
319 }
320
getSize(const void * Ptr)321 size_t GuardedPoolAllocator::getSize(const void *Ptr) {
322 assert(pointerIsMine(Ptr));
323 ScopedLock L(PoolMutex);
324 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
325 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
326 return Meta->RequestedSize;
327 }
328
addrToMetadata(uintptr_t Ptr) const329 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
330 return &Metadata[State.getNearestSlot(Ptr)];
331 }
332
reserveSlot()333 size_t GuardedPoolAllocator::reserveSlot() {
334 // Avoid potential reuse of a slot before we have made at least a single
335 // allocation in each slot. Helps with our use-after-free detection.
336 if (NumSampledAllocations < State.MaxSimultaneousAllocations)
337 return NumSampledAllocations++;
338
339 if (FreeSlotsLength == 0)
340 return kInvalidSlotID;
341
342 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
343 size_t SlotIndex = FreeSlots[ReservedIndex];
344 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
345 return SlotIndex;
346 }
347
freeSlot(size_t SlotIndex)348 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
349 assert(FreeSlotsLength < State.MaxSimultaneousAllocations);
350 FreeSlots[FreeSlotsLength++] = SlotIndex;
351 }
352
getRandomUnsigned32()353 uint32_t GuardedPoolAllocator::getRandomUnsigned32() {
354 uint32_t RandomState = getThreadLocals()->RandomState;
355 RandomState ^= RandomState << 13;
356 RandomState ^= RandomState >> 17;
357 RandomState ^= RandomState << 5;
358 getThreadLocals()->RandomState = RandomState;
359 return RandomState;
360 }
361 } // namespace gwp_asan
362