1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "gwp_asan/guarded_pool_allocator.h" 10 11 #include "gwp_asan/optional/segv_handler.h" 12 #include "gwp_asan/options.h" 13 #include "gwp_asan/random.h" 14 #include "gwp_asan/utilities.h" 15 16 // RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this 17 // macro is defined before including <inttypes.h>. 18 #ifndef __STDC_FORMAT_MACROS 19 #define __STDC_FORMAT_MACROS 1 20 #endif 21 22 #include <assert.h> 23 #include <inttypes.h> 24 #include <signal.h> 25 #include <stdio.h> 26 #include <stdlib.h> 27 #include <string.h> 28 #include <time.h> 29 30 using AllocationMetadata = gwp_asan::AllocationMetadata; 31 using Error = gwp_asan::Error; 32 33 namespace gwp_asan { 34 namespace { 35 // Forward declare the pointer to the singleton version of this class. 36 // Instantiated during initialisation, this allows the signal handler 37 // to find this class in order to deduce the root cause of failures. Must not be 38 // referenced by users outside this translation unit, in order to avoid 39 // init-order-fiasco. 40 GuardedPoolAllocator *SingletonPtr = nullptr; 41 42 class ScopedBoolean { 43 public: 44 ScopedBoolean(bool &B) : Bool(B) { Bool = true; } 45 ~ScopedBoolean() { Bool = false; } 46 47 private: 48 bool &Bool; 49 }; 50 } // anonymous namespace 51 52 // Gets the singleton implementation of this class. Thread-compatible until 53 // init() is called, thread-safe afterwards. 54 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() { 55 return SingletonPtr; 56 } 57 58 void GuardedPoolAllocator::init(const options::Options &Opts) { 59 // Note: We return from the constructor here if GWP-ASan is not available. 60 // This will stop heap-allocation of class members, as well as mmap() of the 61 // guarded slots. 62 if (!Opts.Enabled || Opts.SampleRate == 0 || 63 Opts.MaxSimultaneousAllocations == 0) 64 return; 65 66 Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0."); 67 Check(Opts.SampleRate <= INT32_MAX, "GWP-ASan Error: SampleRate is > 2^31."); 68 Check(Opts.MaxSimultaneousAllocations >= 0, 69 "GWP-ASan Error: MaxSimultaneousAllocations is < 0."); 70 71 SingletonPtr = this; 72 Backtrace = Opts.Backtrace; 73 74 State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; 75 76 State.PageSize = getPlatformPageSize(); 77 78 PerfectlyRightAlign = Opts.PerfectlyRightAlign; 79 80 size_t PoolBytesRequired = 81 State.PageSize * (1 + State.MaxSimultaneousAllocations) + 82 State.MaxSimultaneousAllocations * State.maximumAllocationSize(); 83 void *GuardedPoolMemory = mapMemory(PoolBytesRequired, kGwpAsanGuardPageName); 84 85 size_t BytesRequired = State.MaxSimultaneousAllocations * sizeof(*Metadata); 86 Metadata = reinterpret_cast<AllocationMetadata *>( 87 mapMemory(BytesRequired, kGwpAsanMetadataName)); 88 markReadWrite(Metadata, BytesRequired, kGwpAsanMetadataName); 89 90 // Allocate memory and set up the free pages queue. 91 BytesRequired = State.MaxSimultaneousAllocations * sizeof(*FreeSlots); 92 FreeSlots = reinterpret_cast<size_t *>( 93 mapMemory(BytesRequired, kGwpAsanFreeSlotsName)); 94 markReadWrite(FreeSlots, BytesRequired, kGwpAsanFreeSlotsName); 95 96 // Multiply the sample rate by 2 to give a good, fast approximation for (1 / 97 // SampleRate) chance of sampling. 98 if (Opts.SampleRate != 1) 99 AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1; 100 else 101 AdjustedSampleRatePlusOne = 2; 102 103 initPRNG(); 104 ThreadLocals.NextSampleCounter = 105 (getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1; 106 107 State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory); 108 State.GuardedPagePoolEnd = 109 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired; 110 111 if (Opts.InstallForkHandlers) 112 installAtFork(); 113 } 114 115 void GuardedPoolAllocator::disable() { PoolMutex.lock(); } 116 117 void GuardedPoolAllocator::enable() { PoolMutex.unlock(); } 118 119 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb, 120 void *Arg) { 121 uintptr_t Start = reinterpret_cast<uintptr_t>(Base); 122 for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) { 123 const AllocationMetadata &Meta = Metadata[i]; 124 if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start && 125 Meta.Addr < Start + Size) 126 Cb(Meta.Addr, Meta.Size, Arg); 127 } 128 } 129 130 void GuardedPoolAllocator::uninitTestOnly() { 131 if (State.GuardedPagePool) { 132 unmapMemory(reinterpret_cast<void *>(State.GuardedPagePool), 133 State.GuardedPagePoolEnd - State.GuardedPagePool, 134 kGwpAsanGuardPageName); 135 State.GuardedPagePool = 0; 136 State.GuardedPagePoolEnd = 0; 137 } 138 if (Metadata) { 139 unmapMemory(Metadata, State.MaxSimultaneousAllocations * sizeof(*Metadata), 140 kGwpAsanMetadataName); 141 Metadata = nullptr; 142 } 143 if (FreeSlots) { 144 unmapMemory(FreeSlots, 145 State.MaxSimultaneousAllocations * sizeof(*FreeSlots), 146 kGwpAsanFreeSlotsName); 147 FreeSlots = nullptr; 148 } 149 } 150 151 static uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) { 152 return Ptr & ~(PageSize - 1); 153 } 154 155 void *GuardedPoolAllocator::allocate(size_t Size) { 156 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall 157 // back to the supporting allocator. 158 if (State.GuardedPagePoolEnd == 0) 159 return nullptr; 160 161 // Protect against recursivity. 162 if (ThreadLocals.RecursiveGuard) 163 return nullptr; 164 ScopedBoolean SB(ThreadLocals.RecursiveGuard); 165 166 if (Size == 0 || Size > State.maximumAllocationSize()) 167 return nullptr; 168 169 size_t Index; 170 { 171 ScopedLock L(PoolMutex); 172 Index = reserveSlot(); 173 } 174 175 if (Index == kInvalidSlotID) 176 return nullptr; 177 178 uintptr_t Ptr = State.slotToAddr(Index); 179 // Should we right-align this allocation? 180 if (getRandomUnsigned32() % 2 == 0) { 181 AlignmentStrategy Align = AlignmentStrategy::DEFAULT; 182 if (PerfectlyRightAlign) 183 Align = AlignmentStrategy::PERFECT; 184 Ptr += 185 State.maximumAllocationSize() - rightAlignedAllocationSize(Size, Align); 186 } 187 AllocationMetadata *Meta = addrToMetadata(Ptr); 188 189 // If a slot is multiple pages in size, and the allocation takes up a single 190 // page, we can improve overflow detection by leaving the unused pages as 191 // unmapped. 192 markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr, State.PageSize)), 193 Size, kGwpAsanAliveSlotName); 194 195 Meta->RecordAllocation(Ptr, Size); 196 Meta->AllocationTrace.RecordBacktrace(Backtrace); 197 198 return reinterpret_cast<void *>(Ptr); 199 } 200 201 void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) { 202 State.FailureType = E; 203 State.FailureAddress = Address; 204 205 // Raise a SEGV by touching first guard page. 206 volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool); 207 *p = 0; 208 __builtin_unreachable(); 209 } 210 211 void GuardedPoolAllocator::stop() { 212 ThreadLocals.RecursiveGuard = true; 213 PoolMutex.tryLock(); 214 } 215 216 void GuardedPoolAllocator::deallocate(void *Ptr) { 217 assert(pointerIsMine(Ptr) && "Pointer is not mine!"); 218 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr); 219 size_t Slot = State.getNearestSlot(UPtr); 220 uintptr_t SlotStart = State.slotToAddr(Slot); 221 AllocationMetadata *Meta = addrToMetadata(UPtr); 222 if (Meta->Addr != UPtr) { 223 // If multiple errors occur at the same time, use the first one. 224 ScopedLock L(PoolMutex); 225 trapOnAddress(UPtr, Error::INVALID_FREE); 226 } 227 228 // Intentionally scope the mutex here, so that other threads can access the 229 // pool during the expensive markInaccessible() call. 230 { 231 ScopedLock L(PoolMutex); 232 if (Meta->IsDeallocated) { 233 trapOnAddress(UPtr, Error::DOUBLE_FREE); 234 } 235 236 // Ensure that the deallocation is recorded before marking the page as 237 // inaccessible. Otherwise, a racy use-after-free will have inconsistent 238 // metadata. 239 Meta->RecordDeallocation(); 240 241 // Ensure that the unwinder is not called if the recursive flag is set, 242 // otherwise non-reentrant unwinders may deadlock. 243 if (!ThreadLocals.RecursiveGuard) { 244 ScopedBoolean B(ThreadLocals.RecursiveGuard); 245 Meta->DeallocationTrace.RecordBacktrace(Backtrace); 246 } 247 } 248 249 markInaccessible(reinterpret_cast<void *>(SlotStart), 250 State.maximumAllocationSize(), kGwpAsanGuardPageName); 251 252 // And finally, lock again to release the slot back into the pool. 253 ScopedLock L(PoolMutex); 254 freeSlot(Slot); 255 } 256 257 size_t GuardedPoolAllocator::getSize(const void *Ptr) { 258 assert(pointerIsMine(Ptr)); 259 ScopedLock L(PoolMutex); 260 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr)); 261 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr)); 262 return Meta->Size; 263 } 264 265 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const { 266 return &Metadata[State.getNearestSlot(Ptr)]; 267 } 268 269 size_t GuardedPoolAllocator::reserveSlot() { 270 // Avoid potential reuse of a slot before we have made at least a single 271 // allocation in each slot. Helps with our use-after-free detection. 272 if (NumSampledAllocations < State.MaxSimultaneousAllocations) 273 return NumSampledAllocations++; 274 275 if (FreeSlotsLength == 0) 276 return kInvalidSlotID; 277 278 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength; 279 size_t SlotIndex = FreeSlots[ReservedIndex]; 280 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength]; 281 return SlotIndex; 282 } 283 284 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) { 285 assert(FreeSlotsLength < State.MaxSimultaneousAllocations); 286 FreeSlots[FreeSlotsLength++] = SlotIndex; 287 } 288 289 GWP_ASAN_TLS_INITIAL_EXEC 290 GuardedPoolAllocator::ThreadLocalPackedVariables 291 GuardedPoolAllocator::ThreadLocals; 292 } // namespace gwp_asan 293