1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "flags.h"
15 #include "flags_parser.h"
16 #include "local_cache.h"
17 #include "mem_map.h"
18 #include "memtag.h"
19 #include "options.h"
20 #include "quarantine.h"
21 #include "report.h"
22 #include "secondary.h"
23 #include "stack_depot.h"
24 #include "string_utils.h"
25 #include "tsd.h"
26 
27 #include "scudo/interface.h"
28 
29 #ifdef GWP_ASAN_HOOKS
30 #include "gwp_asan/guarded_pool_allocator.h"
31 #include "gwp_asan/optional/backtrace.h"
32 #include "gwp_asan/optional/segv_handler.h"
33 #endif // GWP_ASAN_HOOKS
34 
35 extern "C" inline void EmptyCallback() {}
36 
37 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
38 // This function is not part of the NDK so it does not appear in any public
39 // header files. We only declare/use it when targeting the platform.
40 extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
41                                                      size_t num_entries);
42 #endif
43 
44 namespace scudo {
45 
46 template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
47 class Allocator {
48 public:
49   using PrimaryT = typename Config::template PrimaryT<Config>;
50   using SecondaryT = typename Config::template SecondaryT<Config>;
51   using CacheT = typename PrimaryT::CacheT;
52   typedef Allocator<Config, PostInitCallback> ThisT;
53   typedef typename Config::template TSDRegistryT<ThisT> TSDRegistryT;
54 
55   void callPostInitCallback() {
56     pthread_once(&PostInitNonce, PostInitCallback);
57   }
58 
59   struct QuarantineCallback {
60     explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
61         : Allocator(Instance), Cache(LocalCache) {}
62 
63     // Chunk recycling function, returns a quarantined chunk to the backend,
64     // first making sure it hasn't been tampered with.
65     void recycle(void *Ptr) {
66       Chunk::UnpackedHeader Header;
67       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
68       if (UNLIKELY(Header.State != Chunk::State::Quarantined))
69         reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
70 
71       Header.State = Chunk::State::Available;
72       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
73 
74       if (allocatorSupportsMemoryTagging<Config>())
75         Ptr = untagPointer(Ptr);
76       void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
77       Cache.deallocate(Header.ClassId, BlockBegin);
78     }
79 
80     // We take a shortcut when allocating a quarantine batch by working with the
81     // appropriate class ID instead of using Size. The compiler should optimize
82     // the class ID computation and work with the associated cache directly.
83     void *allocate(UNUSED uptr Size) {
84       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
85           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
86       void *Ptr = Cache.allocate(QuarantineClassId);
87       // Quarantine batch allocation failure is fatal.
88       if (UNLIKELY(!Ptr))
89         reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
90 
91       Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
92                                      Chunk::getHeaderSize());
93       Chunk::UnpackedHeader Header = {};
94       Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
95       Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
96       Header.State = Chunk::State::Allocated;
97       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
98 
99       // Reset tag to 0 as this chunk may have been previously used for a tagged
100       // user allocation.
101       if (UNLIKELY(useMemoryTagging<Config>(Allocator.Primary.Options.load())))
102         storeTags(reinterpret_cast<uptr>(Ptr),
103                   reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
104 
105       return Ptr;
106     }
107 
108     void deallocate(void *Ptr) {
109       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
110           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
111       Chunk::UnpackedHeader Header;
112       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
113 
114       if (UNLIKELY(Header.State != Chunk::State::Allocated))
115         reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
116       DCHECK_EQ(Header.ClassId, QuarantineClassId);
117       DCHECK_EQ(Header.Offset, 0);
118       DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
119 
120       Header.State = Chunk::State::Available;
121       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
122       Cache.deallocate(QuarantineClassId,
123                        reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
124                                                 Chunk::getHeaderSize()));
125     }
126 
127   private:
128     ThisT &Allocator;
129     CacheT &Cache;
130   };
131 
132   typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
133   typedef typename QuarantineT::CacheT QuarantineCacheT;
134 
135   void init() {
136     performSanityChecks();
137 
138     // Check if hardware CRC32 is supported in the binary and by the platform,
139     // if so, opt for the CRC32 hardware version of the checksum.
140     if (&computeHardwareCRC32 && hasHardwareCRC32())
141       HashAlgorithm = Checksum::HardwareCRC32;
142 
143     if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
144       Cookie = static_cast<u32>(getMonotonicTime() ^
145                                 (reinterpret_cast<uptr>(this) >> 4));
146 
147     initFlags();
148     reportUnrecognizedFlags();
149 
150     // Store some flags locally.
151     if (getFlags()->may_return_null)
152       Primary.Options.set(OptionBit::MayReturnNull);
153     if (getFlags()->zero_contents)
154       Primary.Options.setFillContentsMode(ZeroFill);
155     else if (getFlags()->pattern_fill_contents)
156       Primary.Options.setFillContentsMode(PatternOrZeroFill);
157     if (getFlags()->dealloc_type_mismatch)
158       Primary.Options.set(OptionBit::DeallocTypeMismatch);
159     if (getFlags()->delete_size_mismatch)
160       Primary.Options.set(OptionBit::DeleteSizeMismatch);
161     if (allocatorSupportsMemoryTagging<Config>() &&
162         systemSupportsMemoryTagging())
163       Primary.Options.set(OptionBit::UseMemoryTagging);
164 
165     QuarantineMaxChunkSize =
166         static_cast<u32>(getFlags()->quarantine_max_chunk_size);
167 
168     Stats.init();
169     const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
170     Primary.init(ReleaseToOsIntervalMs);
171     Secondary.init(&Stats, ReleaseToOsIntervalMs);
172     Quarantine.init(
173         static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
174         static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
175 
176     mapAndInitializeRingBuffer();
177   }
178 
179   // Initialize the embedded GWP-ASan instance. Requires the main allocator to
180   // be functional, best called from PostInitCallback.
181   void initGwpAsan() {
182 #ifdef GWP_ASAN_HOOKS
183     gwp_asan::options::Options Opt;
184     Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
185     Opt.MaxSimultaneousAllocations =
186         getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
187     Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
188     Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
189     Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
190     // Embedded GWP-ASan is locked through the Scudo atfork handler (via
191     // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
192     // handler.
193     Opt.InstallForkHandlers = false;
194     Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
195     GuardedAlloc.init(Opt);
196 
197     if (Opt.InstallSignalHandlers)
198       gwp_asan::segv_handler::installSignalHandlers(
199           &GuardedAlloc, Printf,
200           gwp_asan::backtrace::getPrintBacktraceFunction(),
201           gwp_asan::backtrace::getSegvBacktraceFunction(),
202           Opt.Recoverable);
203 
204     GuardedAllocSlotSize =
205         GuardedAlloc.getAllocatorState()->maximumAllocationSize();
206     Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
207                             GuardedAllocSlotSize);
208 #endif // GWP_ASAN_HOOKS
209   }
210 
211 #ifdef GWP_ASAN_HOOKS
212   const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
213     return GuardedAlloc.getMetadataRegion();
214   }
215 
216   const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
217     return GuardedAlloc.getAllocatorState();
218   }
219 #endif // GWP_ASAN_HOOKS
220 
221   ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
222     TSDRegistry.initThreadMaybe(this, MinimalInit);
223   }
224 
225   void unmapTestOnly() {
226     unmapRingBuffer();
227     TSDRegistry.unmapTestOnly(this);
228     Primary.unmapTestOnly();
229     Secondary.unmapTestOnly();
230 #ifdef GWP_ASAN_HOOKS
231     if (getFlags()->GWP_ASAN_InstallSignalHandlers)
232       gwp_asan::segv_handler::uninstallSignalHandlers();
233     GuardedAlloc.uninitTestOnly();
234 #endif // GWP_ASAN_HOOKS
235   }
236 
237   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
238   QuarantineT *getQuarantine() { return &Quarantine; }
239 
240   // The Cache must be provided zero-initialized.
241   void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
242 
243   // Release the resources used by a TSD, which involves:
244   // - draining the local quarantine cache to the global quarantine;
245   // - releasing the cached pointers back to the Primary;
246   // - unlinking the local stats from the global ones (destroying the cache does
247   //   the last two items).
248   void commitBack(TSD<ThisT> *TSD) {
249     TSD->assertLocked(/*BypassCheck=*/true);
250     Quarantine.drain(&TSD->getQuarantineCache(),
251                      QuarantineCallback(*this, TSD->getCache()));
252     TSD->getCache().destroy(&Stats);
253   }
254 
255   void drainCache(TSD<ThisT> *TSD) {
256     TSD->assertLocked(/*BypassCheck=*/true);
257     Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
258                                QuarantineCallback(*this, TSD->getCache()));
259     TSD->getCache().drain();
260   }
261   void drainCaches() { TSDRegistry.drainCaches(this); }
262 
263   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
264     if (!allocatorSupportsMemoryTagging<Config>())
265       return Ptr;
266     auto UntaggedPtr = untagPointer(Ptr);
267     if (UntaggedPtr != Ptr)
268       return UntaggedPtr;
269     // Secondary, or pointer allocated while memory tagging is unsupported or
270     // disabled. The tag mismatch is okay in the latter case because tags will
271     // not be checked.
272     return addHeaderTag(Ptr);
273   }
274 
275   ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
276     if (!allocatorSupportsMemoryTagging<Config>())
277       return Ptr;
278     return addFixedTag(Ptr, 2);
279   }
280 
281   ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
282     return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
283   }
284 
285   NOINLINE u32 collectStackTrace() {
286 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
287     // Discard collectStackTrace() frame and allocator function frame.
288     constexpr uptr DiscardFrames = 2;
289     uptr Stack[MaxTraceSize + DiscardFrames];
290     uptr Size =
291         android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
292     Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
293     return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
294 #else
295     return 0;
296 #endif
297   }
298 
299   uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
300                                          uptr ClassId) {
301     if (!Options.get(OptionBit::UseOddEvenTags))
302       return 0;
303 
304     // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
305     // even, and vice versa. Blocks are laid out Size bytes apart, and adding
306     // Size to Ptr will flip the least significant set bit of Size in Ptr, so
307     // that bit will have the pattern 010101... for consecutive blocks, which we
308     // can use to determine which tag mask to use.
309     return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
310   }
311 
312   NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
313                           uptr Alignment = MinAlignment,
314                           bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
315     initThreadMaybe();
316 
317     const Options Options = Primary.Options.load();
318     if (UNLIKELY(Alignment > MaxAlignment)) {
319       if (Options.get(OptionBit::MayReturnNull))
320         return nullptr;
321       reportAlignmentTooBig(Alignment, MaxAlignment);
322     }
323     if (Alignment < MinAlignment)
324       Alignment = MinAlignment;
325 
326 #ifdef GWP_ASAN_HOOKS
327     if (UNLIKELY(GuardedAlloc.shouldSample())) {
328       if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
329         Stats.lock();
330         Stats.add(StatAllocated, GuardedAllocSlotSize);
331         Stats.sub(StatFree, GuardedAllocSlotSize);
332         Stats.unlock();
333         return Ptr;
334       }
335     }
336 #endif // GWP_ASAN_HOOKS
337 
338     const FillContentsMode FillContents = ZeroContents ? ZeroFill
339                                           : TSDRegistry.getDisableMemInit()
340                                               ? NoFill
341                                               : Options.getFillContentsMode();
342 
343     // If the requested size happens to be 0 (more common than you might think),
344     // allocate MinAlignment bytes on top of the header. Then add the extra
345     // bytes required to fulfill the alignment requirements: we allocate enough
346     // to be sure that there will be an address in the block that will satisfy
347     // the alignment.
348     const uptr NeededSize =
349         roundUp(Size, MinAlignment) +
350         ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
351 
352     // Takes care of extravagantly large sizes as well as integer overflows.
353     static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
354     if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
355       if (Options.get(OptionBit::MayReturnNull))
356         return nullptr;
357       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
358     }
359     DCHECK_LE(Size, NeededSize);
360 
361     void *Block = nullptr;
362     uptr ClassId = 0;
363     uptr SecondaryBlockEnd = 0;
364     if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
365       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
366       DCHECK_NE(ClassId, 0U);
367       bool UnlockRequired;
368       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
369       TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
370       Block = TSD->getCache().allocate(ClassId);
371       // If the allocation failed, retry in each successively larger class until
372       // it fits. If it fails to fit in the largest class, fallback to the
373       // Secondary.
374       if (UNLIKELY(!Block)) {
375         while (ClassId < SizeClassMap::LargestClassId && !Block)
376           Block = TSD->getCache().allocate(++ClassId);
377         if (!Block)
378           ClassId = 0;
379       }
380       if (UnlockRequired)
381         TSD->unlock();
382     }
383     if (UNLIKELY(ClassId == 0)) {
384       Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
385                                  FillContents);
386     }
387 
388     if (UNLIKELY(!Block)) {
389       if (Options.get(OptionBit::MayReturnNull))
390         return nullptr;
391       printStats();
392       reportOutOfMemory(NeededSize);
393     }
394 
395     const uptr BlockUptr = reinterpret_cast<uptr>(Block);
396     const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
397     const uptr UserPtr = roundUp(UnalignedUserPtr, Alignment);
398 
399     void *Ptr = reinterpret_cast<void *>(UserPtr);
400     void *TaggedPtr = Ptr;
401     if (LIKELY(ClassId)) {
402       // We only need to zero or tag the contents for Primary backed
403       // allocations. We only set tags for primary allocations in order to avoid
404       // faulting potentially large numbers of pages for large secondary
405       // allocations. We assume that guard pages are enough to protect these
406       // allocations.
407       //
408       // FIXME: When the kernel provides a way to set the background tag of a
409       // mapping, we should be able to tag secondary allocations as well.
410       //
411       // When memory tagging is enabled, zeroing the contents is done as part of
412       // setting the tag.
413       if (UNLIKELY(useMemoryTagging<Config>(Options))) {
414         uptr PrevUserPtr;
415         Chunk::UnpackedHeader Header;
416         const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
417         const uptr BlockEnd = BlockUptr + BlockSize;
418         // If possible, try to reuse the UAF tag that was set by deallocate().
419         // For simplicity, only reuse tags if we have the same start address as
420         // the previous allocation. This handles the majority of cases since
421         // most allocations will not be more aligned than the minimum alignment.
422         //
423         // We need to handle situations involving reclaimed chunks, and retag
424         // the reclaimed portions if necessary. In the case where the chunk is
425         // fully reclaimed, the chunk's header will be zero, which will trigger
426         // the code path for new mappings and invalid chunks that prepares the
427         // chunk from scratch. There are three possibilities for partial
428         // reclaiming:
429         //
430         // (1) Header was reclaimed, data was partially reclaimed.
431         // (2) Header was not reclaimed, all data was reclaimed (e.g. because
432         //     data started on a page boundary).
433         // (3) Header was not reclaimed, data was partially reclaimed.
434         //
435         // Case (1) will be handled in the same way as for full reclaiming,
436         // since the header will be zero.
437         //
438         // We can detect case (2) by loading the tag from the start
439         // of the chunk. If it is zero, it means that either all data was
440         // reclaimed (since we never use zero as the chunk tag), or that the
441         // previous allocation was of size zero. Either way, we need to prepare
442         // a new chunk from scratch.
443         //
444         // We can detect case (3) by moving to the next page (if covered by the
445         // chunk) and loading the tag of its first granule. If it is zero, it
446         // means that all following pages may need to be retagged. On the other
447         // hand, if it is nonzero, we can assume that all following pages are
448         // still tagged, according to the logic that if any of the pages
449         // following the next page were reclaimed, the next page would have been
450         // reclaimed as well.
451         uptr TaggedUserPtr;
452         if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
453             PrevUserPtr == UserPtr &&
454             (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
455           uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
456           const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
457           if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
458             PrevEnd = NextPage;
459           TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
460           resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
461           if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
462             // If an allocation needs to be zeroed (i.e. calloc) we can normally
463             // avoid zeroing the memory now since we can rely on memory having
464             // been zeroed on free, as this is normally done while setting the
465             // UAF tag. But if tagging was disabled per-thread when the memory
466             // was freed, it would not have been retagged and thus zeroed, and
467             // therefore it needs to be zeroed now.
468             memset(TaggedPtr, 0,
469                    Min(Size, roundUp(PrevEnd - TaggedUserPtr,
470                                      archMemoryTagGranuleSize())));
471           } else if (Size) {
472             // Clear any stack metadata that may have previously been stored in
473             // the chunk data.
474             memset(TaggedPtr, 0, archMemoryTagGranuleSize());
475           }
476         } else {
477           const uptr OddEvenMask =
478               computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
479           TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
480         }
481         storePrimaryAllocationStackMaybe(Options, Ptr);
482       } else {
483         Block = addHeaderTag(Block);
484         Ptr = addHeaderTag(Ptr);
485         if (UNLIKELY(FillContents != NoFill)) {
486           // This condition is not necessarily unlikely, but since memset is
487           // costly, we might as well mark it as such.
488           memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
489                  PrimaryT::getSizeByClassId(ClassId));
490         }
491       }
492     } else {
493       Block = addHeaderTag(Block);
494       Ptr = addHeaderTag(Ptr);
495       if (UNLIKELY(useMemoryTagging<Config>(Options))) {
496         storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
497         storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
498       }
499     }
500 
501     Chunk::UnpackedHeader Header = {};
502     if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
503       const uptr Offset = UserPtr - UnalignedUserPtr;
504       DCHECK_GE(Offset, 2 * sizeof(u32));
505       // The BlockMarker has no security purpose, but is specifically meant for
506       // the chunk iteration function that can be used in debugging situations.
507       // It is the only situation where we have to locate the start of a chunk
508       // based on its block address.
509       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
510       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
511       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
512     }
513     Header.ClassId = ClassId & Chunk::ClassIdMask;
514     Header.State = Chunk::State::Allocated;
515     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
516     Header.SizeOrUnusedBytes =
517         (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
518         Chunk::SizeOrUnusedBytesMask;
519     Chunk::storeHeader(Cookie, Ptr, &Header);
520 
521     return TaggedPtr;
522   }
523 
524   NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
525                            UNUSED uptr Alignment = MinAlignment) {
526     if (UNLIKELY(!Ptr))
527       return;
528 
529     // For a deallocation, we only ensure minimal initialization, meaning thread
530     // local data will be left uninitialized for now (when using ELF TLS). The
531     // fallback cache will be used instead. This is a workaround for a situation
532     // where the only heap operation performed in a thread would be a free past
533     // the TLS destructors, ending up in initialized thread specific data never
534     // being destroyed properly. Any other heap operation will do a full init.
535     initThreadMaybe(/*MinimalInit=*/true);
536 
537 #ifdef GWP_ASAN_HOOKS
538     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
539       GuardedAlloc.deallocate(Ptr);
540       Stats.lock();
541       Stats.add(StatFree, GuardedAllocSlotSize);
542       Stats.sub(StatAllocated, GuardedAllocSlotSize);
543       Stats.unlock();
544       return;
545     }
546 #endif // GWP_ASAN_HOOKS
547 
548     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
549       reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
550 
551     void *TaggedPtr = Ptr;
552     Ptr = getHeaderTaggedPointer(Ptr);
553 
554     Chunk::UnpackedHeader Header;
555     Chunk::loadHeader(Cookie, Ptr, &Header);
556 
557     if (UNLIKELY(Header.State != Chunk::State::Allocated))
558       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
559 
560     const Options Options = Primary.Options.load();
561     if (Options.get(OptionBit::DeallocTypeMismatch)) {
562       if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
563         // With the exception of memalign'd chunks, that can be still be free'd.
564         if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
565             Origin != Chunk::Origin::Malloc)
566           reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
567                                     Header.OriginOrWasZeroed, Origin);
568       }
569     }
570 
571     const uptr Size = getSize(Ptr, &Header);
572     if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
573       if (UNLIKELY(DeleteSize != Size))
574         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
575     }
576 
577     quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
578   }
579 
580   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
581     initThreadMaybe();
582 
583     const Options Options = Primary.Options.load();
584     if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
585       if (Options.get(OptionBit::MayReturnNull))
586         return nullptr;
587       reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
588     }
589 
590     // The following cases are handled by the C wrappers.
591     DCHECK_NE(OldPtr, nullptr);
592     DCHECK_NE(NewSize, 0);
593 
594 #ifdef GWP_ASAN_HOOKS
595     if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
596       uptr OldSize = GuardedAlloc.getSize(OldPtr);
597       void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
598       if (NewPtr)
599         memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
600       GuardedAlloc.deallocate(OldPtr);
601       Stats.lock();
602       Stats.add(StatFree, GuardedAllocSlotSize);
603       Stats.sub(StatAllocated, GuardedAllocSlotSize);
604       Stats.unlock();
605       return NewPtr;
606     }
607 #endif // GWP_ASAN_HOOKS
608 
609     void *OldTaggedPtr = OldPtr;
610     OldPtr = getHeaderTaggedPointer(OldPtr);
611 
612     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
613       reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
614 
615     Chunk::UnpackedHeader Header;
616     Chunk::loadHeader(Cookie, OldPtr, &Header);
617 
618     if (UNLIKELY(Header.State != Chunk::State::Allocated))
619       reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
620 
621     // Pointer has to be allocated with a malloc-type function. Some
622     // applications think that it is OK to realloc a memalign'ed pointer, which
623     // will trigger this check. It really isn't.
624     if (Options.get(OptionBit::DeallocTypeMismatch)) {
625       if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
626         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
627                                   Header.OriginOrWasZeroed,
628                                   Chunk::Origin::Malloc);
629     }
630 
631     void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
632     uptr BlockEnd;
633     uptr OldSize;
634     const uptr ClassId = Header.ClassId;
635     if (LIKELY(ClassId)) {
636       BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
637                  SizeClassMap::getSizeByClassId(ClassId);
638       OldSize = Header.SizeOrUnusedBytes;
639     } else {
640       BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
641       OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
642                             Header.SizeOrUnusedBytes);
643     }
644     // If the new chunk still fits in the previously allocated block (with a
645     // reasonable delta), we just keep the old block, and update the chunk
646     // header to reflect the size change.
647     if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
648       if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
649         Header.SizeOrUnusedBytes =
650             (ClassId ? NewSize
651                      : BlockEnd -
652                            (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
653             Chunk::SizeOrUnusedBytesMask;
654         Chunk::storeHeader(Cookie, OldPtr, &Header);
655         if (UNLIKELY(useMemoryTagging<Config>(Options))) {
656           if (ClassId) {
657             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
658                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
659                               NewSize, untagPointer(BlockEnd));
660             storePrimaryAllocationStackMaybe(Options, OldPtr);
661           } else {
662             storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
663           }
664         }
665         return OldTaggedPtr;
666       }
667     }
668 
669     // Otherwise we allocate a new one, and deallocate the old one. Some
670     // allocators will allocate an even larger chunk (by a fixed factor) to
671     // allow for potential further in-place realloc. The gains of such a trick
672     // are currently unclear.
673     void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
674     if (LIKELY(NewPtr)) {
675       memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
676       quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
677     }
678     return NewPtr;
679   }
680 
681   // TODO(kostyak): disable() is currently best-effort. There are some small
682   //                windows of time when an allocation could still succeed after
683   //                this function finishes. We will revisit that later.
684   void disable() NO_THREAD_SAFETY_ANALYSIS {
685     initThreadMaybe();
686 #ifdef GWP_ASAN_HOOKS
687     GuardedAlloc.disable();
688 #endif
689     TSDRegistry.disable();
690     Stats.disable();
691     Quarantine.disable();
692     Primary.disable();
693     Secondary.disable();
694   }
695 
696   void enable() NO_THREAD_SAFETY_ANALYSIS {
697     initThreadMaybe();
698     Secondary.enable();
699     Primary.enable();
700     Quarantine.enable();
701     Stats.enable();
702     TSDRegistry.enable();
703 #ifdef GWP_ASAN_HOOKS
704     GuardedAlloc.enable();
705 #endif
706   }
707 
708   // The function returns the amount of bytes required to store the statistics,
709   // which might be larger than the amount of bytes provided. Note that the
710   // statistics buffer is not necessarily constant between calls to this
711   // function. This can be called with a null buffer or zero size for buffer
712   // sizing purposes.
713   uptr getStats(char *Buffer, uptr Size) {
714     ScopedString Str;
715     const uptr Length = getStats(&Str) + 1;
716     if (Length < Size)
717       Size = Length;
718     if (Buffer && Size) {
719       memcpy(Buffer, Str.data(), Size);
720       Buffer[Size - 1] = '\0';
721     }
722     return Length;
723   }
724 
725   void printStats() {
726     ScopedString Str;
727     getStats(&Str);
728     Str.output();
729   }
730 
731   void printFragmentationInfo() {
732     ScopedString Str;
733     Primary.getFragmentationInfo(&Str);
734     // Secondary allocator dumps the fragmentation data in getStats().
735     Str.output();
736   }
737 
738   void releaseToOS(ReleaseToOS ReleaseType) {
739     initThreadMaybe();
740     if (ReleaseType == ReleaseToOS::ForceAll)
741       drainCaches();
742     Primary.releaseToOS(ReleaseType);
743     Secondary.releaseToOS();
744   }
745 
746   // Iterate over all chunks and call a callback for all busy chunks located
747   // within the provided memory range. Said callback must not use this allocator
748   // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
749   void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
750                          void *Arg) {
751     initThreadMaybe();
752     if (archSupportsMemoryTagging())
753       Base = untagPointer(Base);
754     const uptr From = Base;
755     const uptr To = Base + Size;
756     bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Config>() &&
757                                 systemSupportsMemoryTagging();
758     auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
759                    Arg](uptr Block) {
760       if (Block < From || Block >= To)
761         return;
762       uptr Chunk;
763       Chunk::UnpackedHeader Header;
764       if (MayHaveTaggedPrimary) {
765         // A chunk header can either have a zero tag (tagged primary) or the
766         // header tag (secondary, or untagged primary). We don't know which so
767         // try both.
768         ScopedDisableMemoryTagChecks x;
769         if (!getChunkFromBlock(Block, &Chunk, &Header) &&
770             !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
771           return;
772       } else {
773         if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
774           return;
775       }
776       if (Header.State == Chunk::State::Allocated) {
777         uptr TaggedChunk = Chunk;
778         if (allocatorSupportsMemoryTagging<Config>())
779           TaggedChunk = untagPointer(TaggedChunk);
780         if (useMemoryTagging<Config>(Primary.Options.load()))
781           TaggedChunk = loadTag(Chunk);
782         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
783                  Arg);
784       }
785     };
786     Primary.iterateOverBlocks(Lambda);
787     Secondary.iterateOverBlocks(Lambda);
788 #ifdef GWP_ASAN_HOOKS
789     GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
790 #endif
791   }
792 
793   bool canReturnNull() {
794     initThreadMaybe();
795     return Primary.Options.load().get(OptionBit::MayReturnNull);
796   }
797 
798   bool setOption(Option O, sptr Value) {
799     initThreadMaybe();
800     if (O == Option::MemtagTuning) {
801       // Enabling odd/even tags involves a tradeoff between use-after-free
802       // detection and buffer overflow detection. Odd/even tags make it more
803       // likely for buffer overflows to be detected by increasing the size of
804       // the guaranteed "red zone" around the allocation, but on the other hand
805       // use-after-free is less likely to be detected because the tag space for
806       // any particular chunk is cut in half. Therefore we use this tuning
807       // setting to control whether odd/even tags are enabled.
808       if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
809         Primary.Options.set(OptionBit::UseOddEvenTags);
810       else if (Value == M_MEMTAG_TUNING_UAF)
811         Primary.Options.clear(OptionBit::UseOddEvenTags);
812       return true;
813     } else {
814       // We leave it to the various sub-components to decide whether or not they
815       // want to handle the option, but we do not want to short-circuit
816       // execution if one of the setOption was to return false.
817       const bool PrimaryResult = Primary.setOption(O, Value);
818       const bool SecondaryResult = Secondary.setOption(O, Value);
819       const bool RegistryResult = TSDRegistry.setOption(O, Value);
820       return PrimaryResult && SecondaryResult && RegistryResult;
821     }
822     return false;
823   }
824 
825   // Return the usable size for a given chunk. Technically we lie, as we just
826   // report the actual size of a chunk. This is done to counteract code actively
827   // writing past the end of a chunk (like sqlite3) when the usable size allows
828   // for it, which then forces realloc to copy the usable size of a chunk as
829   // opposed to its actual size.
830   uptr getUsableSize(const void *Ptr) {
831     if (UNLIKELY(!Ptr))
832       return 0;
833 
834     return getAllocSize(Ptr);
835   }
836 
837   uptr getAllocSize(const void *Ptr) {
838     initThreadMaybe();
839 
840 #ifdef GWP_ASAN_HOOKS
841     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
842       return GuardedAlloc.getSize(Ptr);
843 #endif // GWP_ASAN_HOOKS
844 
845     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
846     Chunk::UnpackedHeader Header;
847     Chunk::loadHeader(Cookie, Ptr, &Header);
848 
849     // Getting the alloc size of a chunk only makes sense if it's allocated.
850     if (UNLIKELY(Header.State != Chunk::State::Allocated))
851       reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
852 
853     return getSize(Ptr, &Header);
854   }
855 
856   void getStats(StatCounters S) {
857     initThreadMaybe();
858     Stats.get(S);
859   }
860 
861   // Returns true if the pointer provided was allocated by the current
862   // allocator instance, which is compliant with tcmalloc's ownership concept.
863   // A corrupted chunk will not be reported as owned, which is WAI.
864   bool isOwned(const void *Ptr) {
865     initThreadMaybe();
866 #ifdef GWP_ASAN_HOOKS
867     if (GuardedAlloc.pointerIsMine(Ptr))
868       return true;
869 #endif // GWP_ASAN_HOOKS
870     if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
871       return false;
872     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
873     Chunk::UnpackedHeader Header;
874     return Chunk::isValid(Cookie, Ptr, &Header) &&
875            Header.State == Chunk::State::Allocated;
876   }
877 
878   bool useMemoryTaggingTestOnly() const {
879     return useMemoryTagging<Config>(Primary.Options.load());
880   }
881   void disableMemoryTagging() {
882     // If we haven't been initialized yet, we need to initialize now in order to
883     // prevent a future call to initThreadMaybe() from enabling memory tagging
884     // based on feature detection. But don't call initThreadMaybe() because it
885     // may end up calling the allocator (via pthread_atfork, via the post-init
886     // callback), which may cause mappings to be created with memory tagging
887     // enabled.
888     TSDRegistry.initOnceMaybe(this);
889     if (allocatorSupportsMemoryTagging<Config>()) {
890       Secondary.disableMemoryTagging();
891       Primary.Options.clear(OptionBit::UseMemoryTagging);
892     }
893   }
894 
895   void setTrackAllocationStacks(bool Track) {
896     initThreadMaybe();
897     if (getFlags()->allocation_ring_buffer_size <= 0) {
898       DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
899       return;
900     }
901     if (Track)
902       Primary.Options.set(OptionBit::TrackAllocationStacks);
903     else
904       Primary.Options.clear(OptionBit::TrackAllocationStacks);
905   }
906 
907   void setFillContents(FillContentsMode FillContents) {
908     initThreadMaybe();
909     Primary.Options.setFillContentsMode(FillContents);
910   }
911 
912   void setAddLargeAllocationSlack(bool AddSlack) {
913     initThreadMaybe();
914     if (AddSlack)
915       Primary.Options.set(OptionBit::AddLargeAllocationSlack);
916     else
917       Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
918   }
919 
920   const char *getStackDepotAddress() const {
921     return reinterpret_cast<const char *>(&Depot);
922   }
923 
924   const char *getRegionInfoArrayAddress() const {
925     return Primary.getRegionInfoArrayAddress();
926   }
927 
928   static uptr getRegionInfoArraySize() {
929     return PrimaryT::getRegionInfoArraySize();
930   }
931 
932   const char *getRingBufferAddress() {
933     initThreadMaybe();
934     return RawRingBuffer;
935   }
936 
937   uptr getRingBufferSize() {
938     initThreadMaybe();
939     return RingBufferElements ? ringBufferSizeInBytes(RingBufferElements) : 0;
940   }
941 
942   static bool setRingBufferSizeForBuffer(char *Buffer, size_t Size) {
943     // Need at least one entry.
944     if (Size < sizeof(AllocationRingBuffer) +
945                    sizeof(typename AllocationRingBuffer::Entry)) {
946       return false;
947     }
948     AllocationRingBuffer *RingBuffer =
949         reinterpret_cast<AllocationRingBuffer *>(Buffer);
950     RingBuffer->Size = (Size - sizeof(AllocationRingBuffer)) /
951                        sizeof(typename AllocationRingBuffer::Entry);
952     return true;
953   }
954 
955   static const uptr MaxTraceSize = 64;
956 
957   static void collectTraceMaybe(const StackDepot *Depot,
958                                 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
959     uptr RingPos, Size;
960     if (!Depot->find(Hash, &RingPos, &Size))
961       return;
962     for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
963       Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
964   }
965 
966   static void getErrorInfo(struct scudo_error_info *ErrorInfo,
967                            uintptr_t FaultAddr, const char *DepotPtr,
968                            const char *RegionInfoPtr, const char *RingBufferPtr,
969                            size_t RingBufferSize, const char *Memory,
970                            const char *MemoryTags, uintptr_t MemoryAddr,
971                            size_t MemorySize) {
972     *ErrorInfo = {};
973     if (!allocatorSupportsMemoryTagging<Config>() ||
974         MemoryAddr + MemorySize < MemoryAddr)
975       return;
976 
977     auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
978     size_t NextErrorReport = 0;
979 
980     // Check for OOB in the current block and the two surrounding blocks. Beyond
981     // that, UAF is more likely.
982     if (extractTag(FaultAddr) != 0)
983       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
984                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
985                          MemorySize, 0, 2);
986 
987     // Check the ring buffer. For primary allocations this will only find UAF;
988     // for secondary allocations we can find either UAF or OOB.
989     getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
990                            RingBufferPtr, RingBufferSize);
991 
992     // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
993     // Beyond that we are likely to hit false positives.
994     if (extractTag(FaultAddr) != 0)
995       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
996                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
997                          MemorySize, 2, 16);
998   }
999 
1000 private:
1001   typedef typename PrimaryT::SizeClassMap SizeClassMap;
1002 
1003   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
1004   static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
1005   static const uptr MinAlignment = 1UL << MinAlignmentLog;
1006   static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
1007   static const uptr MaxAllowedMallocSize =
1008       FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
1009 
1010   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
1011                 "Minimal alignment must at least cover a chunk header.");
1012   static_assert(!allocatorSupportsMemoryTagging<Config>() ||
1013                     MinAlignment >= archMemoryTagGranuleSize(),
1014                 "");
1015 
1016   static const u32 BlockMarker = 0x44554353U;
1017 
1018   // These are indexes into an "array" of 32-bit values that store information
1019   // inline with a chunk that is relevant to diagnosing memory tag faults, where
1020   // 0 corresponds to the address of the user memory. This means that only
1021   // negative indexes may be used. The smallest index that may be used is -2,
1022   // which corresponds to 8 bytes before the user memory, because the chunk
1023   // header size is 8 bytes and in allocators that support memory tagging the
1024   // minimum alignment is at least the tag granule size (16 on aarch64).
1025   static const sptr MemTagAllocationTraceIndex = -2;
1026   static const sptr MemTagAllocationTidIndex = -1;
1027 
1028   u32 Cookie = 0;
1029   u32 QuarantineMaxChunkSize = 0;
1030 
1031   GlobalStats Stats;
1032   PrimaryT Primary;
1033   SecondaryT Secondary;
1034   QuarantineT Quarantine;
1035   TSDRegistryT TSDRegistry;
1036   pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
1037 
1038 #ifdef GWP_ASAN_HOOKS
1039   gwp_asan::GuardedPoolAllocator GuardedAlloc;
1040   uptr GuardedAllocSlotSize = 0;
1041 #endif // GWP_ASAN_HOOKS
1042 
1043   StackDepot Depot;
1044 
1045   struct AllocationRingBuffer {
1046     struct Entry {
1047       atomic_uptr Ptr;
1048       atomic_uptr AllocationSize;
1049       atomic_u32 AllocationTrace;
1050       atomic_u32 AllocationTid;
1051       atomic_u32 DeallocationTrace;
1052       atomic_u32 DeallocationTid;
1053     };
1054 
1055     atomic_uptr Pos;
1056     // An array of Size (at least one) elements of type Entry is immediately
1057     // following to this struct.
1058   };
1059   // Pointer to memory mapped area starting with AllocationRingBuffer struct,
1060   // and immediately followed by Size elements of type Entry.
1061   char *RawRingBuffer = {};
1062   u32 RingBufferElements = 0;
1063   MemMapT RawRingBufferMap;
1064 
1065   // The following might get optimized out by the compiler.
1066   NOINLINE void performSanityChecks() {
1067     // Verify that the header offset field can hold the maximum offset. In the
1068     // case of the Secondary allocator, it takes care of alignment and the
1069     // offset will always be small. In the case of the Primary, the worst case
1070     // scenario happens in the last size class, when the backend allocation
1071     // would already be aligned on the requested alignment, which would happen
1072     // to be the maximum alignment that would fit in that size class. As a
1073     // result, the maximum offset will be at most the maximum alignment for the
1074     // last size class minus the header size, in multiples of MinAlignment.
1075     Chunk::UnpackedHeader Header = {};
1076     const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1077                                          SizeClassMap::MaxSize - MinAlignment);
1078     const uptr MaxOffset =
1079         (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1080     Header.Offset = MaxOffset & Chunk::OffsetMask;
1081     if (UNLIKELY(Header.Offset != MaxOffset))
1082       reportSanityCheckError("offset");
1083 
1084     // Verify that we can fit the maximum size or amount of unused bytes in the
1085     // header. Given that the Secondary fits the allocation to a page, the worst
1086     // case scenario happens in the Primary. It will depend on the second to
1087     // last and last class sizes, as well as the dynamic base for the Primary.
1088     // The following is an over-approximation that works for our needs.
1089     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1090     Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1091     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1092       reportSanityCheckError("size (or unused bytes)");
1093 
1094     const uptr LargestClassId = SizeClassMap::LargestClassId;
1095     Header.ClassId = LargestClassId;
1096     if (UNLIKELY(Header.ClassId != LargestClassId))
1097       reportSanityCheckError("class ID");
1098   }
1099 
1100   static inline void *getBlockBegin(const void *Ptr,
1101                                     Chunk::UnpackedHeader *Header) {
1102     return reinterpret_cast<void *>(
1103         reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1104         (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1105   }
1106 
1107   // Return the size of a chunk as requested during its allocation.
1108   inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1109     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1110     if (LIKELY(Header->ClassId))
1111       return SizeOrUnusedBytes;
1112     if (allocatorSupportsMemoryTagging<Config>())
1113       Ptr = untagPointer(const_cast<void *>(Ptr));
1114     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1115            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1116   }
1117 
1118   void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
1119                                    Chunk::UnpackedHeader *Header,
1120                                    uptr Size) NO_THREAD_SAFETY_ANALYSIS {
1121     void *Ptr = getHeaderTaggedPointer(TaggedPtr);
1122     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1123     // than the maximum allowed, we return a chunk directly to the backend.
1124     // This purposefully underflows for Size == 0.
1125     const bool BypassQuarantine = !Quarantine.getCacheSize() ||
1126                                   ((Size - 1) >= QuarantineMaxChunkSize) ||
1127                                   !Header->ClassId;
1128     if (BypassQuarantine)
1129       Header->State = Chunk::State::Available;
1130     else
1131       Header->State = Chunk::State::Quarantined;
1132     Header->OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
1133                                 Header->ClassId &&
1134                                 !TSDRegistry.getDisableMemInit();
1135     Chunk::storeHeader(Cookie, Ptr, Header);
1136 
1137     if (UNLIKELY(useMemoryTagging<Config>(Options))) {
1138       u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
1139       storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1140       if (Header->ClassId) {
1141         if (!TSDRegistry.getDisableMemInit()) {
1142           uptr TaggedBegin, TaggedEnd;
1143           const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1144               Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
1145               Header->ClassId);
1146           // Exclude the previous tag so that immediate use after free is
1147           // detected 100% of the time.
1148           setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
1149                        &TaggedEnd);
1150         }
1151       }
1152     }
1153     if (BypassQuarantine) {
1154       if (allocatorSupportsMemoryTagging<Config>())
1155         Ptr = untagPointer(Ptr);
1156       void *BlockBegin = getBlockBegin(Ptr, Header);
1157       const uptr ClassId = Header->ClassId;
1158       if (LIKELY(ClassId)) {
1159         bool UnlockRequired;
1160         auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1161         TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
1162         const bool CacheDrained =
1163             TSD->getCache().deallocate(ClassId, BlockBegin);
1164         if (UnlockRequired)
1165           TSD->unlock();
1166         // When we have drained some blocks back to the Primary from TSD, that
1167         // implies that we may have the chance to release some pages as well.
1168         // Note that in order not to block other thread's accessing the TSD,
1169         // release the TSD first then try the page release.
1170         if (CacheDrained)
1171           Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
1172       } else {
1173         if (UNLIKELY(useMemoryTagging<Config>(Options)))
1174           storeTags(reinterpret_cast<uptr>(BlockBegin),
1175                     reinterpret_cast<uptr>(Ptr));
1176         Secondary.deallocate(Options, BlockBegin);
1177       }
1178     } else {
1179       bool UnlockRequired;
1180       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1181       TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
1182       Quarantine.put(&TSD->getQuarantineCache(),
1183                      QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
1184       if (UnlockRequired)
1185         TSD->unlock();
1186     }
1187   }
1188 
1189   bool getChunkFromBlock(uptr Block, uptr *Chunk,
1190                          Chunk::UnpackedHeader *Header) {
1191     *Chunk =
1192         Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
1193     return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
1194   }
1195 
1196   static uptr getChunkOffsetFromBlock(const char *Block) {
1197     u32 Offset = 0;
1198     if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
1199       Offset = reinterpret_cast<const u32 *>(Block)[1];
1200     return Offset + Chunk::getHeaderSize();
1201   }
1202 
1203   // Set the tag of the granule past the end of the allocation to 0, to catch
1204   // linear overflows even if a previous larger allocation used the same block
1205   // and tag. Only do this if the granule past the end is in our block, because
1206   // this would otherwise lead to a SEGV if the allocation covers the entire
1207   // block and our block is at the end of a mapping. The tag of the next block's
1208   // header granule will be set to 0, so it will serve the purpose of catching
1209   // linear overflows in this case.
1210   //
1211   // For allocations of size 0 we do not end up storing the address tag to the
1212   // memory tag space, which getInlineErrorInfo() normally relies on to match
1213   // address tags against chunks. To allow matching in this case we store the
1214   // address tag in the first byte of the chunk.
1215   void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1216     DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1217     uptr UntaggedEnd = untagPointer(End);
1218     if (UntaggedEnd != BlockEnd) {
1219       storeTag(UntaggedEnd);
1220       if (Size == 0)
1221         *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
1222     }
1223   }
1224 
1225   void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
1226                            uptr BlockEnd) {
1227     // Prepare the granule before the chunk to store the chunk header by setting
1228     // its tag to 0. Normally its tag will already be 0, but in the case where a
1229     // chunk holding a low alignment allocation is reused for a higher alignment
1230     // allocation, the chunk may already have a non-zero tag from the previous
1231     // allocation.
1232     storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1233 
1234     uptr TaggedBegin, TaggedEnd;
1235     setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
1236 
1237     storeEndMarker(TaggedEnd, Size, BlockEnd);
1238     return reinterpret_cast<void *>(TaggedBegin);
1239   }
1240 
1241   void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1242                          uptr BlockEnd) {
1243     uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
1244     uptr RoundNewPtr;
1245     if (RoundOldPtr >= NewPtr) {
1246       // If the allocation is shrinking we just need to set the tag past the end
1247       // of the allocation to 0. See explanation in storeEndMarker() above.
1248       RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
1249     } else {
1250       // Set the memory tag of the region
1251       // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
1252       // to the pointer tag stored in OldPtr.
1253       RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
1254     }
1255     storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
1256   }
1257 
1258   void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
1259     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1260       return;
1261     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1262     Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
1263     Ptr32[MemTagAllocationTidIndex] = getThreadID();
1264   }
1265 
1266   void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
1267                             uptr AllocationSize, u32 DeallocationTrace,
1268                             u32 DeallocationTid) {
1269     uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
1270     typename AllocationRingBuffer::Entry *Entry =
1271         getRingBufferEntry(RawRingBuffer, Pos % RingBufferElements);
1272 
1273     // First invalidate our entry so that we don't attempt to interpret a
1274     // partially written state in getSecondaryErrorInfo(). The fences below
1275     // ensure that the compiler does not move the stores to Ptr in between the
1276     // stores to the other fields.
1277     atomic_store_relaxed(&Entry->Ptr, 0);
1278 
1279     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1280     atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1281     atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1282     atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1283     atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1284     atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1285     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1286 
1287     atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1288   }
1289 
1290   void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
1291                                           uptr Size) {
1292     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1293       return;
1294 
1295     u32 Trace = collectStackTrace();
1296     u32 Tid = getThreadID();
1297 
1298     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1299     Ptr32[MemTagAllocationTraceIndex] = Trace;
1300     Ptr32[MemTagAllocationTidIndex] = Tid;
1301 
1302     storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
1303   }
1304 
1305   void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
1306                                    u8 PrevTag, uptr Size) {
1307     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1308       return;
1309 
1310     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1311     u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1312     u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1313 
1314     u32 DeallocationTrace = collectStackTrace();
1315     u32 DeallocationTid = getThreadID();
1316 
1317     storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
1318                          AllocationTrace, AllocationTid, Size,
1319                          DeallocationTrace, DeallocationTid);
1320   }
1321 
1322   static const size_t NumErrorReports =
1323       sizeof(((scudo_error_info *)nullptr)->reports) /
1324       sizeof(((scudo_error_info *)nullptr)->reports[0]);
1325 
1326   static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
1327                                  size_t &NextErrorReport, uintptr_t FaultAddr,
1328                                  const StackDepot *Depot,
1329                                  const char *RegionInfoPtr, const char *Memory,
1330                                  const char *MemoryTags, uintptr_t MemoryAddr,
1331                                  size_t MemorySize, size_t MinDistance,
1332                                  size_t MaxDistance) {
1333     uptr UntaggedFaultAddr = untagPointer(FaultAddr);
1334     u8 FaultAddrTag = extractTag(FaultAddr);
1335     BlockInfo Info =
1336         PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
1337 
1338     auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1339       if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
1340           Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
1341         return false;
1342       *Data = &Memory[Addr - MemoryAddr];
1343       *Tag = static_cast<u8>(
1344           MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1345       return true;
1346     };
1347 
1348     auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
1349                          Chunk::UnpackedHeader *Header, const u32 **Data,
1350                          u8 *Tag) {
1351       const char *BlockBegin;
1352       u8 BlockBeginTag;
1353       if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
1354         return false;
1355       uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
1356       *ChunkAddr = Addr + ChunkOffset;
1357 
1358       const char *ChunkBegin;
1359       if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
1360         return false;
1361       *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
1362           ChunkBegin - Chunk::getHeaderSize());
1363       *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1364 
1365       // Allocations of size 0 will have stashed the tag in the first byte of
1366       // the chunk, see storeEndMarker().
1367       if (Header->SizeOrUnusedBytes == 0)
1368         *Tag = static_cast<u8>(*ChunkBegin);
1369 
1370       return true;
1371     };
1372 
1373     if (NextErrorReport == NumErrorReports)
1374       return;
1375 
1376     auto CheckOOB = [&](uptr BlockAddr) {
1377       if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
1378         return false;
1379 
1380       uptr ChunkAddr;
1381       Chunk::UnpackedHeader Header;
1382       const u32 *Data;
1383       uint8_t Tag;
1384       if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
1385           Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
1386         return false;
1387 
1388       auto *R = &ErrorInfo->reports[NextErrorReport++];
1389       R->error_type =
1390           UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1391       R->allocation_address = ChunkAddr;
1392       R->allocation_size = Header.SizeOrUnusedBytes;
1393       collectTraceMaybe(Depot, R->allocation_trace,
1394                         Data[MemTagAllocationTraceIndex]);
1395       R->allocation_tid = Data[MemTagAllocationTidIndex];
1396       return NextErrorReport == NumErrorReports;
1397     };
1398 
1399     if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
1400       return;
1401 
1402     for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
1403       if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
1404           CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1405         return;
1406   }
1407 
1408   static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
1409                                      size_t &NextErrorReport,
1410                                      uintptr_t FaultAddr,
1411                                      const StackDepot *Depot,
1412                                      const char *RingBufferPtr,
1413                                      size_t RingBufferSize) {
1414     auto *RingBuffer =
1415         reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1416     size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
1417     if (!RingBuffer || RingBufferElements == 0)
1418       return;
1419     uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1420 
1421     for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
1422                            NextErrorReport != NumErrorReports;
1423          --I) {
1424       auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBufferElements);
1425       uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1426       if (!EntryPtr)
1427         continue;
1428 
1429       uptr UntaggedEntryPtr = untagPointer(EntryPtr);
1430       uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1431       u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1432       u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1433       u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1434       u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1435 
1436       if (DeallocationTid) {
1437         // For UAF we only consider in-bounds fault addresses because
1438         // out-of-bounds UAF is rare and attempting to detect it is very likely
1439         // to result in false positives.
1440         if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
1441           continue;
1442       } else {
1443         // Ring buffer OOB is only possible with secondary allocations. In this
1444         // case we are guaranteed a guard region of at least a page on either
1445         // side of the allocation (guard page on the right, guard page + tagged
1446         // region on the left), so ignore any faults outside of that range.
1447         if (FaultAddr < EntryPtr - getPageSizeCached() ||
1448             FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
1449           continue;
1450 
1451         // For UAF the ring buffer will contain two entries, one for the
1452         // allocation and another for the deallocation. Don't report buffer
1453         // overflow/underflow using the allocation entry if we have already
1454         // collected a report from the deallocation entry.
1455         bool Found = false;
1456         for (uptr J = 0; J != NextErrorReport; ++J) {
1457           if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1458             Found = true;
1459             break;
1460           }
1461         }
1462         if (Found)
1463           continue;
1464       }
1465 
1466       auto *R = &ErrorInfo->reports[NextErrorReport++];
1467       if (DeallocationTid)
1468         R->error_type = USE_AFTER_FREE;
1469       else if (FaultAddr < EntryPtr)
1470         R->error_type = BUFFER_UNDERFLOW;
1471       else
1472         R->error_type = BUFFER_OVERFLOW;
1473 
1474       R->allocation_address = UntaggedEntryPtr;
1475       R->allocation_size = EntrySize;
1476       collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1477       R->allocation_tid = AllocationTid;
1478       collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1479       R->deallocation_tid = DeallocationTid;
1480     }
1481   }
1482 
1483   uptr getStats(ScopedString *Str) {
1484     Primary.getStats(Str);
1485     Secondary.getStats(Str);
1486     Quarantine.getStats(Str);
1487     TSDRegistry.getStats(Str);
1488     return Str->length();
1489   }
1490 
1491   static typename AllocationRingBuffer::Entry *
1492   getRingBufferEntry(char *RawRingBuffer, uptr N) {
1493     return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
1494         &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1495   }
1496   static const typename AllocationRingBuffer::Entry *
1497   getRingBufferEntry(const char *RawRingBuffer, uptr N) {
1498     return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
1499         &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1500   }
1501 
1502   void mapAndInitializeRingBuffer() {
1503     if (getFlags()->allocation_ring_buffer_size <= 0)
1504       return;
1505     u32 AllocationRingBufferSize =
1506         static_cast<u32>(getFlags()->allocation_ring_buffer_size);
1507     MemMapT MemMap;
1508     MemMap.map(
1509         /*Addr=*/0U,
1510         roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
1511                 getPageSizeCached()),
1512         "scudo:ring_buffer");
1513     RawRingBuffer = reinterpret_cast<char *>(MemMap.getBase());
1514     RawRingBufferMap = MemMap;
1515     RingBufferElements = AllocationRingBufferSize;
1516     static_assert(sizeof(AllocationRingBuffer) %
1517                           alignof(typename AllocationRingBuffer::Entry) ==
1518                       0,
1519                   "invalid alignment");
1520   }
1521 
1522   void unmapRingBuffer() {
1523     auto *RingBuffer = getRingBuffer();
1524     if (RingBuffer != nullptr) {
1525       RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
1526                              RawRingBufferMap.getCapacity());
1527     }
1528     RawRingBuffer = nullptr;
1529   }
1530 
1531   static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
1532     return sizeof(AllocationRingBuffer) +
1533            RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
1534   }
1535 
1536   static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
1537     if (Bytes < sizeof(AllocationRingBuffer)) {
1538       return 0;
1539     }
1540     return (Bytes - sizeof(AllocationRingBuffer)) /
1541            sizeof(typename AllocationRingBuffer::Entry);
1542   }
1543 
1544   inline AllocationRingBuffer *getRingBuffer() {
1545     return reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
1546   }
1547 };
1548 
1549 } // namespace scudo
1550 
1551 #endif // SCUDO_COMBINED_H_
1552