1 //===-- guarded_pool_allocator.h --------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_
10 #define GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_
11 
12 #include "gwp_asan/definitions.h"
13 #include "gwp_asan/mutex.h"
14 #include "gwp_asan/options.h"
15 #include "gwp_asan/random.h"
16 #include "gwp_asan/stack_trace_compressor.h"
17 
18 #include <stddef.h>
19 #include <stdint.h>
20 
21 namespace gwp_asan {
22 // This class is the primary implementation of the allocator portion of GWP-
23 // ASan. It is the sole owner of the pool of sequentially allocated guarded
24 // slots. It should always be treated as a singleton.
25 
26 // Functions in the public interface of this class are thread-compatible until
27 // init() is called, at which point they become thread-safe (unless specified
28 // otherwise).
29 class GuardedPoolAllocator {
30 public:
31   static constexpr uint64_t kInvalidThreadID = UINT64_MAX;
32 
33   enum class Error {
34     UNKNOWN,
35     USE_AFTER_FREE,
36     DOUBLE_FREE,
37     INVALID_FREE,
38     BUFFER_OVERFLOW,
39     BUFFER_UNDERFLOW
40   };
41 
42   struct AllocationMetadata {
43     // The number of bytes used to store a compressed stack frame. On 64-bit
44     // platforms, assuming a compression ratio of 50%, this should allow us to
45     // store ~64 frames per trace.
46     static constexpr size_t kStackFrameStorageBytes = 256;
47 
48     // Maximum number of stack frames to collect on allocation/deallocation. The
49     // actual number of collected frames may be less than this as the stack
50     // frames are compressed into a fixed memory range.
51     static constexpr size_t kMaxTraceLengthToCollect = 128;
52 
53     // Records the given allocation metadata into this struct.
54     void RecordAllocation(uintptr_t Addr, size_t Size,
55                           options::Backtrace_t Backtrace);
56 
57     // Record that this allocation is now deallocated.
58     void RecordDeallocation(options::Backtrace_t Backtrace);
59 
60     struct CallSiteInfo {
61       // The compressed backtrace to the allocation/deallocation.
62       uint8_t CompressedTrace[kStackFrameStorageBytes];
63       // The thread ID for this trace, or kInvalidThreadID if not available.
64       uint64_t ThreadID = kInvalidThreadID;
65       // The size of the compressed trace (in bytes). Zero indicates that no
66       // trace was collected.
67       size_t TraceSize = 0;
68     };
69 
70     // The address of this allocation.
71     uintptr_t Addr = 0;
72     // Represents the actual size of the allocation.
73     size_t Size = 0;
74 
75     CallSiteInfo AllocationTrace;
76     CallSiteInfo DeallocationTrace;
77 
78     // Whether this allocation has been deallocated yet.
79     bool IsDeallocated = false;
80   };
81 
82   // During program startup, we must ensure that memory allocations do not land
83   // in this allocation pool if the allocator decides to runtime-disable
84   // GWP-ASan. The constructor value-initialises the class such that if no
85   // further initialisation takes place, calls to shouldSample() and
86   // pointerIsMine() will return false.
87   constexpr GuardedPoolAllocator(){};
88   GuardedPoolAllocator(const GuardedPoolAllocator &) = delete;
89   GuardedPoolAllocator &operator=(const GuardedPoolAllocator &) = delete;
90 
91   // Note: This class is expected to be a singleton for the lifetime of the
92   // program. If this object is initialised, it will leak the guarded page pool
93   // and metadata allocations during destruction. We can't clean up these areas
94   // as this may cause a use-after-free on shutdown.
95   ~GuardedPoolAllocator() = default;
96 
97   // Initialise the rest of the members of this class. Create the allocation
98   // pool using the provided options. See options.inc for runtime configuration
99   // options.
100   void init(const options::Options &Opts);
101 
102   // Return whether the allocation should be randomly chosen for sampling.
103   GWP_ASAN_ALWAYS_INLINE bool shouldSample() {
104     // NextSampleCounter == 0 means we "should regenerate the counter".
105     //                   == 1 means we "should sample this allocation".
106     if (GWP_ASAN_UNLIKELY(ThreadLocals.NextSampleCounter == 0))
107       ThreadLocals.NextSampleCounter =
108           (getRandomUnsigned32() % AdjustedSampleRate) + 1;
109 
110     return GWP_ASAN_UNLIKELY(--ThreadLocals.NextSampleCounter == 0);
111   }
112 
113   // Returns whether the provided pointer is a current sampled allocation that
114   // is owned by this pool.
115   GWP_ASAN_ALWAYS_INLINE bool pointerIsMine(const void *Ptr) const {
116     uintptr_t P = reinterpret_cast<uintptr_t>(Ptr);
117     return GuardedPagePool <= P && P < GuardedPagePoolEnd;
118   }
119 
120   // Allocate memory in a guarded slot, and return a pointer to the new
121   // allocation. Returns nullptr if the pool is empty, the requested size is too
122   // large for this pool to handle, or the requested size is zero.
123   void *allocate(size_t Size);
124 
125   // Deallocate memory in a guarded slot. The provided pointer must have been
126   // allocated using this pool. This will set the guarded slot as inaccessible.
127   void deallocate(void *Ptr);
128 
129   // Returns the size of the allocation at Ptr.
130   size_t getSize(const void *Ptr);
131 
132   // Returns the largest allocation that is supported by this pool. Any
133   // allocations larger than this should go to the regular system allocator.
134   size_t maximumAllocationSize() const;
135 
136   // Dumps an error report (including allocation and deallocation stack traces).
137   // An optional error may be provided if the caller knows what the error is
138   // ahead of time. This is primarily a helper function to locate the static
139   // singleton pointer and call the internal version of this function. This
140   // method is never thread safe, and should only be called when fatal errors
141   // occur.
142   static void reportError(uintptr_t AccessPtr, Error E = Error::UNKNOWN);
143 
144   // Get the current thread ID, or kInvalidThreadID if failure. Note: This
145   // implementation is platform-specific.
146   static uint64_t getThreadID();
147 
148 private:
149   static constexpr size_t kInvalidSlotID = SIZE_MAX;
150 
151   // These functions anonymously map memory or change the permissions of mapped
152   // memory into this process in a platform-specific way. Pointer and size
153   // arguments are expected to be page-aligned. These functions will never
154   // return on error, instead electing to kill the calling process on failure.
155   // Note that memory is initially mapped inaccessible. In order for RW
156   // mappings, call mapMemory() followed by markReadWrite() on the returned
157   // pointer.
158   void *mapMemory(size_t Size) const;
159   void markReadWrite(void *Ptr, size_t Size) const;
160   void markInaccessible(void *Ptr, size_t Size) const;
161 
162   // Get the page size from the platform-specific implementation. Only needs to
163   // be called once, and the result should be cached in PageSize in this class.
164   static size_t getPlatformPageSize();
165 
166   // Install the SIGSEGV crash handler for printing use-after-free and heap-
167   // buffer-{under|over}flow exceptions. This is platform specific as even
168   // though POSIX and Windows both support registering handlers through
169   // signal(), we have to use platform-specific signal handlers to obtain the
170   // address that caused the SIGSEGV exception.
171   static void installSignalHandlers();
172 
173   // Returns the index of the slot that this pointer resides in. If the pointer
174   // is not owned by this pool, the result is undefined.
175   size_t addrToSlot(uintptr_t Ptr) const;
176 
177   // Returns the address of the N-th guarded slot.
178   uintptr_t slotToAddr(size_t N) const;
179 
180   // Returns a pointer to the metadata for the owned pointer. If the pointer is
181   // not owned by this pool, the result is undefined.
182   AllocationMetadata *addrToMetadata(uintptr_t Ptr) const;
183 
184   // Returns the address of the page that this pointer resides in.
185   uintptr_t getPageAddr(uintptr_t Ptr) const;
186 
187   // Gets the nearest slot to the provided address.
188   size_t getNearestSlot(uintptr_t Ptr) const;
189 
190   // Returns whether the provided pointer is a guard page or not. The pointer
191   // must be within memory owned by this pool, else the result is undefined.
192   bool isGuardPage(uintptr_t Ptr) const;
193 
194   // Reserve a slot for a new guarded allocation. Returns kInvalidSlotID if no
195   // slot is available to be reserved.
196   size_t reserveSlot();
197 
198   // Unreserve the guarded slot.
199   void freeSlot(size_t SlotIndex);
200 
201   // Returns the offset (in bytes) between the start of a guarded slot and where
202   // the start of the allocation should take place. Determined using the size of
203   // the allocation and the options provided at init-time.
204   uintptr_t allocationSlotOffset(size_t AllocationSize) const;
205 
206   // Returns the diagnosis for an unknown error. If the diagnosis is not
207   // Error::INVALID_FREE or Error::UNKNOWN, the metadata for the slot
208   // responsible for the error is placed in *Meta.
209   Error diagnoseUnknownError(uintptr_t AccessPtr, AllocationMetadata **Meta);
210 
211   void reportErrorInternal(uintptr_t AccessPtr, Error E);
212 
213   // Cached page size for this system in bytes.
214   size_t PageSize = 0;
215 
216   // A mutex to protect the guarded slot and metadata pool for this class.
217   Mutex PoolMutex;
218   // The number of guarded slots that this pool holds.
219   size_t MaxSimultaneousAllocations = 0;
220   // Record the number allocations that we've sampled. We store this amount so
221   // that we don't randomly choose to recycle a slot that previously had an
222   // allocation before all the slots have been utilised.
223   size_t NumSampledAllocations = 0;
224   // Pointer to the pool of guarded slots. Note that this points to the start of
225   // the pool (which is a guard page), not a pointer to the first guarded page.
226   uintptr_t GuardedPagePool = UINTPTR_MAX;
227   uintptr_t GuardedPagePoolEnd = 0;
228   // Pointer to the allocation metadata (allocation/deallocation stack traces),
229   // if any.
230   AllocationMetadata *Metadata = nullptr;
231 
232   // Pointer to an array of free slot indexes.
233   size_t *FreeSlots = nullptr;
234   // The current length of the list of free slots.
235   size_t FreeSlotsLength = 0;
236 
237   // See options.{h, inc} for more information.
238   bool PerfectlyRightAlign = false;
239 
240   // Printf function supplied by the implementing allocator. We can't (in
241   // general) use printf() from the cstdlib as it may malloc(), causing infinite
242   // recursion.
243   options::Printf_t Printf = nullptr;
244   options::Backtrace_t Backtrace = nullptr;
245   options::PrintBacktrace_t PrintBacktrace = nullptr;
246 
247   // The adjusted sample rate for allocation sampling. Default *must* be
248   // nonzero, as dynamic initialisation may call malloc (e.g. from libstdc++)
249   // before GPA::init() is called. This would cause an error in shouldSample(),
250   // where we would calculate modulo zero. This value is set UINT32_MAX, as when
251   // GWP-ASan is disabled, we wish to never spend wasted cycles recalculating
252   // the sample rate.
253   uint32_t AdjustedSampleRate = UINT32_MAX;
254 
255   // Pack the thread local variables into a struct to ensure that they're in
256   // the same cache line for performance reasons. These are the most touched
257   // variables in GWP-ASan.
258   struct alignas(8) ThreadLocalPackedVariables {
259     constexpr ThreadLocalPackedVariables() {}
260     // Thread-local decrementing counter that indicates that a given allocation
261     // should be sampled when it reaches zero.
262     uint32_t NextSampleCounter = 0;
263     // Guard against recursivity. Unwinders often contain complex behaviour that
264     // may not be safe for the allocator (i.e. the unwinder calls dlopen(),
265     // which calls malloc()). When recursive behaviour is detected, we will
266     // automatically fall back to the supporting allocator to supply the
267     // allocation.
268     bool RecursiveGuard = false;
269   };
270   static GWP_ASAN_TLS_INITIAL_EXEC ThreadLocalPackedVariables ThreadLocals;
271 };
272 } // namespace gwp_asan
273 
274 #endif // GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_
275