1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "gwp_asan/guarded_pool_allocator.h" 10 11 #include "gwp_asan/options.h" 12 #include "gwp_asan/utilities.h" 13 14 #include <assert.h> 15 16 using AllocationMetadata = gwp_asan::AllocationMetadata; 17 using Error = gwp_asan::Error; 18 19 namespace gwp_asan { 20 namespace { 21 // Forward declare the pointer to the singleton version of this class. 22 // Instantiated during initialisation, this allows the signal handler 23 // to find this class in order to deduce the root cause of failures. Must not be 24 // referenced by users outside this translation unit, in order to avoid 25 // init-order-fiasco. 26 GuardedPoolAllocator *SingletonPtr = nullptr; 27 28 size_t roundUpTo(size_t Size, size_t Boundary) { 29 return (Size + Boundary - 1) & ~(Boundary - 1); 30 } 31 32 uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) { 33 return Ptr & ~(PageSize - 1); 34 } 35 } // anonymous namespace 36 37 // Gets the singleton implementation of this class. Thread-compatible until 38 // init() is called, thread-safe afterwards. 39 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() { 40 return SingletonPtr; 41 } 42 43 void GuardedPoolAllocator::init(const options::Options &Opts) { 44 // Note: We return from the constructor here if GWP-ASan is not available. 45 // This will stop heap-allocation of class members, as well as mmap() of the 46 // guarded slots. 47 if (!Opts.Enabled || Opts.SampleRate == 0 || 48 Opts.MaxSimultaneousAllocations == 0) 49 return; 50 51 Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0."); 52 Check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30."); 53 Check(Opts.MaxSimultaneousAllocations >= 0, 54 "GWP-ASan Error: MaxSimultaneousAllocations is < 0."); 55 56 SingletonPtr = this; 57 Backtrace = Opts.Backtrace; 58 59 State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; 60 61 const size_t PageSize = getPlatformPageSize(); 62 // getPageAddr() and roundUpTo() assume the page size to be a power of 2. 63 assert((PageSize & (PageSize - 1)) == 0); 64 State.PageSize = PageSize; 65 66 PerfectlyRightAlign = Opts.PerfectlyRightAlign; 67 68 size_t PoolBytesRequired = 69 PageSize * (1 + State.MaxSimultaneousAllocations) + 70 State.MaxSimultaneousAllocations * State.maximumAllocationSize(); 71 assert(PoolBytesRequired % PageSize == 0); 72 void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired); 73 74 size_t BytesRequired = 75 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize); 76 Metadata = reinterpret_cast<AllocationMetadata *>( 77 map(BytesRequired, kGwpAsanMetadataName)); 78 79 // Allocate memory and set up the free pages queue. 80 BytesRequired = roundUpTo( 81 State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize); 82 FreeSlots = 83 reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName)); 84 85 // Multiply the sample rate by 2 to give a good, fast approximation for (1 / 86 // SampleRate) chance of sampling. 87 if (Opts.SampleRate != 1) 88 AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1; 89 else 90 AdjustedSampleRatePlusOne = 2; 91 92 initPRNG(); 93 getThreadLocals()->NextSampleCounter = 94 ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) & 95 ThreadLocalPackedVariables::NextSampleCounterMask; 96 97 State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory); 98 State.GuardedPagePoolEnd = 99 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired; 100 101 if (Opts.InstallForkHandlers) 102 installAtFork(); 103 } 104 105 void GuardedPoolAllocator::disable() { PoolMutex.lock(); } 106 107 void GuardedPoolAllocator::enable() { PoolMutex.unlock(); } 108 109 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb, 110 void *Arg) { 111 uintptr_t Start = reinterpret_cast<uintptr_t>(Base); 112 for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) { 113 const AllocationMetadata &Meta = Metadata[i]; 114 if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start && 115 Meta.Addr < Start + Size) 116 Cb(Meta.Addr, Meta.Size, Arg); 117 } 118 } 119 120 void GuardedPoolAllocator::uninitTestOnly() { 121 if (State.GuardedPagePool) { 122 unreserveGuardedPool(); 123 State.GuardedPagePool = 0; 124 State.GuardedPagePoolEnd = 0; 125 } 126 if (Metadata) { 127 unmap(Metadata, 128 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), 129 State.PageSize)); 130 Metadata = nullptr; 131 } 132 if (FreeSlots) { 133 unmap(FreeSlots, 134 roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots), 135 State.PageSize)); 136 FreeSlots = nullptr; 137 } 138 *getThreadLocals() = ThreadLocalPackedVariables(); 139 } 140 141 void *GuardedPoolAllocator::allocate(size_t Size) { 142 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall 143 // back to the supporting allocator. 144 if (State.GuardedPagePoolEnd == 0) { 145 getThreadLocals()->NextSampleCounter = 146 (AdjustedSampleRatePlusOne - 1) & 147 ThreadLocalPackedVariables::NextSampleCounterMask; 148 return nullptr; 149 } 150 151 // Protect against recursivity. 152 if (getThreadLocals()->RecursiveGuard) 153 return nullptr; 154 ScopedRecursiveGuard SRG; 155 156 if (Size == 0 || Size > State.maximumAllocationSize()) 157 return nullptr; 158 159 size_t Index; 160 { 161 ScopedLock L(PoolMutex); 162 Index = reserveSlot(); 163 } 164 165 if (Index == kInvalidSlotID) 166 return nullptr; 167 168 uintptr_t Ptr = State.slotToAddr(Index); 169 // Should we right-align this allocation? 170 if (getRandomUnsigned32() % 2 == 0) { 171 AlignmentStrategy Align = AlignmentStrategy::DEFAULT; 172 if (PerfectlyRightAlign) 173 Align = AlignmentStrategy::PERFECT; 174 Ptr += 175 State.maximumAllocationSize() - rightAlignedAllocationSize(Size, Align); 176 } 177 AllocationMetadata *Meta = addrToMetadata(Ptr); 178 179 // If a slot is multiple pages in size, and the allocation takes up a single 180 // page, we can improve overflow detection by leaving the unused pages as 181 // unmapped. 182 const size_t PageSize = State.PageSize; 183 allocateInGuardedPool(reinterpret_cast<void *>(getPageAddr(Ptr, PageSize)), 184 roundUpTo(Size, PageSize)); 185 186 Meta->RecordAllocation(Ptr, Size); 187 Meta->AllocationTrace.RecordBacktrace(Backtrace); 188 189 return reinterpret_cast<void *>(Ptr); 190 } 191 192 void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) { 193 State.FailureType = E; 194 State.FailureAddress = Address; 195 196 // Raise a SEGV by touching first guard page. 197 volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool); 198 *p = 0; 199 __builtin_unreachable(); 200 } 201 202 void GuardedPoolAllocator::stop() { 203 getThreadLocals()->RecursiveGuard = true; 204 PoolMutex.tryLock(); 205 } 206 207 void GuardedPoolAllocator::deallocate(void *Ptr) { 208 assert(pointerIsMine(Ptr) && "Pointer is not mine!"); 209 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr); 210 size_t Slot = State.getNearestSlot(UPtr); 211 uintptr_t SlotStart = State.slotToAddr(Slot); 212 AllocationMetadata *Meta = addrToMetadata(UPtr); 213 if (Meta->Addr != UPtr) { 214 // If multiple errors occur at the same time, use the first one. 215 ScopedLock L(PoolMutex); 216 trapOnAddress(UPtr, Error::INVALID_FREE); 217 } 218 219 // Intentionally scope the mutex here, so that other threads can access the 220 // pool during the expensive markInaccessible() call. 221 { 222 ScopedLock L(PoolMutex); 223 if (Meta->IsDeallocated) { 224 trapOnAddress(UPtr, Error::DOUBLE_FREE); 225 } 226 227 // Ensure that the deallocation is recorded before marking the page as 228 // inaccessible. Otherwise, a racy use-after-free will have inconsistent 229 // metadata. 230 Meta->RecordDeallocation(); 231 232 // Ensure that the unwinder is not called if the recursive flag is set, 233 // otherwise non-reentrant unwinders may deadlock. 234 if (!getThreadLocals()->RecursiveGuard) { 235 ScopedRecursiveGuard SRG; 236 Meta->DeallocationTrace.RecordBacktrace(Backtrace); 237 } 238 } 239 240 deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart), 241 State.maximumAllocationSize()); 242 243 // And finally, lock again to release the slot back into the pool. 244 ScopedLock L(PoolMutex); 245 freeSlot(Slot); 246 } 247 248 size_t GuardedPoolAllocator::getSize(const void *Ptr) { 249 assert(pointerIsMine(Ptr)); 250 ScopedLock L(PoolMutex); 251 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr)); 252 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr)); 253 return Meta->Size; 254 } 255 256 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const { 257 return &Metadata[State.getNearestSlot(Ptr)]; 258 } 259 260 size_t GuardedPoolAllocator::reserveSlot() { 261 // Avoid potential reuse of a slot before we have made at least a single 262 // allocation in each slot. Helps with our use-after-free detection. 263 if (NumSampledAllocations < State.MaxSimultaneousAllocations) 264 return NumSampledAllocations++; 265 266 if (FreeSlotsLength == 0) 267 return kInvalidSlotID; 268 269 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength; 270 size_t SlotIndex = FreeSlots[ReservedIndex]; 271 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength]; 272 return SlotIndex; 273 } 274 275 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) { 276 assert(FreeSlotsLength < State.MaxSimultaneousAllocations); 277 FreeSlots[FreeSlotsLength++] = SlotIndex; 278 } 279 280 uint32_t GuardedPoolAllocator::getRandomUnsigned32() { 281 uint32_t RandomState = getThreadLocals()->RandomState; 282 RandomState ^= RandomState << 13; 283 RandomState ^= RandomState >> 17; 284 RandomState ^= RandomState << 5; 285 getThreadLocals()->RandomState = RandomState; 286 return RandomState; 287 } 288 } // namespace gwp_asan 289