1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "flags.h"
15 #include "flags_parser.h"
16 #include "local_cache.h"
17 #include "memtag.h"
18 #include "options.h"
19 #include "quarantine.h"
20 #include "report.h"
21 #include "secondary.h"
22 #include "stack_depot.h"
23 #include "string_utils.h"
24 #include "tsd.h"
25 
26 #include "scudo/interface.h"
27 
28 #ifdef GWP_ASAN_HOOKS
29 #include "gwp_asan/guarded_pool_allocator.h"
30 #include "gwp_asan/optional/backtrace.h"
31 #include "gwp_asan/optional/segv_handler.h"
32 #endif // GWP_ASAN_HOOKS
33 
34 extern "C" inline void EmptyCallback() {}
35 
36 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
37 // This function is not part of the NDK so it does not appear in any public
38 // header files. We only declare/use it when targeting the platform.
39 extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
40                                                      size_t num_entries);
41 #endif
42 
43 namespace scudo {
44 
45 template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
46 class Allocator {
47 public:
48   using PrimaryT = typename Params::Primary;
49   using CacheT = typename PrimaryT::CacheT;
50   typedef Allocator<Params, PostInitCallback> ThisT;
51   typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
52 
53   void callPostInitCallback() {
54     pthread_once(&PostInitNonce, PostInitCallback);
55   }
56 
57   struct QuarantineCallback {
58     explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
59         : Allocator(Instance), Cache(LocalCache) {}
60 
61     // Chunk recycling function, returns a quarantined chunk to the backend,
62     // first making sure it hasn't been tampered with.
63     void recycle(void *Ptr) {
64       Chunk::UnpackedHeader Header;
65       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
66       if (UNLIKELY(Header.State != Chunk::State::Quarantined))
67         reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
68 
69       Chunk::UnpackedHeader NewHeader = Header;
70       NewHeader.State = Chunk::State::Available;
71       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
72 
73       if (allocatorSupportsMemoryTagging<Params>())
74         Ptr = untagPointer(Ptr);
75       void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
76       Cache.deallocate(NewHeader.ClassId, BlockBegin);
77     }
78 
79     // We take a shortcut when allocating a quarantine batch by working with the
80     // appropriate class ID instead of using Size. The compiler should optimize
81     // the class ID computation and work with the associated cache directly.
82     void *allocate(UNUSED uptr Size) {
83       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
84           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
85       void *Ptr = Cache.allocate(QuarantineClassId);
86       // Quarantine batch allocation failure is fatal.
87       if (UNLIKELY(!Ptr))
88         reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
89 
90       Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
91                                      Chunk::getHeaderSize());
92       Chunk::UnpackedHeader Header = {};
93       Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
94       Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
95       Header.State = Chunk::State::Allocated;
96       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
97 
98       // Reset tag to 0 as this chunk may have been previously used for a tagged
99       // user allocation.
100       if (UNLIKELY(useMemoryTagging<Params>(Allocator.Primary.Options.load())))
101         storeTags(reinterpret_cast<uptr>(Ptr),
102                   reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
103 
104       return Ptr;
105     }
106 
107     void deallocate(void *Ptr) {
108       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
109           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
110       Chunk::UnpackedHeader Header;
111       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
112 
113       if (UNLIKELY(Header.State != Chunk::State::Allocated))
114         reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
115       DCHECK_EQ(Header.ClassId, QuarantineClassId);
116       DCHECK_EQ(Header.Offset, 0);
117       DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
118 
119       Chunk::UnpackedHeader NewHeader = Header;
120       NewHeader.State = Chunk::State::Available;
121       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
122       Cache.deallocate(QuarantineClassId,
123                        reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
124                                                 Chunk::getHeaderSize()));
125     }
126 
127   private:
128     ThisT &Allocator;
129     CacheT &Cache;
130   };
131 
132   typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
133   typedef typename QuarantineT::CacheT QuarantineCacheT;
134 
135   void init() {
136     performSanityChecks();
137 
138     // Check if hardware CRC32 is supported in the binary and by the platform,
139     // if so, opt for the CRC32 hardware version of the checksum.
140     if (&computeHardwareCRC32 && hasHardwareCRC32())
141       HashAlgorithm = Checksum::HardwareCRC32;
142 
143     if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
144       Cookie = static_cast<u32>(getMonotonicTime() ^
145                                 (reinterpret_cast<uptr>(this) >> 4));
146 
147     initFlags();
148     reportUnrecognizedFlags();
149 
150     // Store some flags locally.
151     if (getFlags()->may_return_null)
152       Primary.Options.set(OptionBit::MayReturnNull);
153     if (getFlags()->zero_contents)
154       Primary.Options.setFillContentsMode(ZeroFill);
155     else if (getFlags()->pattern_fill_contents)
156       Primary.Options.setFillContentsMode(PatternOrZeroFill);
157     if (getFlags()->dealloc_type_mismatch)
158       Primary.Options.set(OptionBit::DeallocTypeMismatch);
159     if (getFlags()->delete_size_mismatch)
160       Primary.Options.set(OptionBit::DeleteSizeMismatch);
161     if (allocatorSupportsMemoryTagging<Params>() &&
162         systemSupportsMemoryTagging())
163       Primary.Options.set(OptionBit::UseMemoryTagging);
164     Primary.Options.set(OptionBit::UseOddEvenTags);
165 
166     QuarantineMaxChunkSize =
167         static_cast<u32>(getFlags()->quarantine_max_chunk_size);
168 
169     Stats.init();
170     const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
171     Primary.init(ReleaseToOsIntervalMs);
172     Secondary.init(&Stats, ReleaseToOsIntervalMs);
173     Quarantine.init(
174         static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
175         static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
176   }
177 
178   // Initialize the embedded GWP-ASan instance. Requires the main allocator to
179   // be functional, best called from PostInitCallback.
180   void initGwpAsan() {
181 #ifdef GWP_ASAN_HOOKS
182     gwp_asan::options::Options Opt;
183     Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
184     Opt.MaxSimultaneousAllocations =
185         getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
186     Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
187     Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
188     // Embedded GWP-ASan is locked through the Scudo atfork handler (via
189     // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
190     // handler.
191     Opt.InstallForkHandlers = false;
192     Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
193     GuardedAlloc.init(Opt);
194 
195     if (Opt.InstallSignalHandlers)
196       gwp_asan::segv_handler::installSignalHandlers(
197           &GuardedAlloc, Printf,
198           gwp_asan::backtrace::getPrintBacktraceFunction(),
199           gwp_asan::backtrace::getSegvBacktraceFunction());
200 
201     GuardedAllocSlotSize =
202         GuardedAlloc.getAllocatorState()->maximumAllocationSize();
203     Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
204                             GuardedAllocSlotSize);
205 #endif // GWP_ASAN_HOOKS
206   }
207 
208   ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
209     TSDRegistry.initThreadMaybe(this, MinimalInit);
210   }
211 
212   void unmapTestOnly() {
213     TSDRegistry.unmapTestOnly(this);
214     Primary.unmapTestOnly();
215     Secondary.unmapTestOnly();
216 #ifdef GWP_ASAN_HOOKS
217     if (getFlags()->GWP_ASAN_InstallSignalHandlers)
218       gwp_asan::segv_handler::uninstallSignalHandlers();
219     GuardedAlloc.uninitTestOnly();
220 #endif // GWP_ASAN_HOOKS
221   }
222 
223   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
224 
225   // The Cache must be provided zero-initialized.
226   void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
227 
228   // Release the resources used by a TSD, which involves:
229   // - draining the local quarantine cache to the global quarantine;
230   // - releasing the cached pointers back to the Primary;
231   // - unlinking the local stats from the global ones (destroying the cache does
232   //   the last two items).
233   void commitBack(TSD<ThisT> *TSD) {
234     Quarantine.drain(&TSD->QuarantineCache,
235                      QuarantineCallback(*this, TSD->Cache));
236     TSD->Cache.destroy(&Stats);
237   }
238 
239   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
240     if (!allocatorSupportsMemoryTagging<Params>())
241       return Ptr;
242     auto UntaggedPtr = untagPointer(Ptr);
243     if (UntaggedPtr != Ptr)
244       return UntaggedPtr;
245     // Secondary, or pointer allocated while memory tagging is unsupported or
246     // disabled. The tag mismatch is okay in the latter case because tags will
247     // not be checked.
248     return addHeaderTag(Ptr);
249   }
250 
251   ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
252     if (!allocatorSupportsMemoryTagging<Params>())
253       return Ptr;
254     return addFixedTag(Ptr, 2);
255   }
256 
257   ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
258     return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
259   }
260 
261   NOINLINE u32 collectStackTrace() {
262 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
263     // Discard collectStackTrace() frame and allocator function frame.
264     constexpr uptr DiscardFrames = 2;
265     uptr Stack[MaxTraceSize + DiscardFrames];
266     uptr Size =
267         android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
268     Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
269     return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
270 #else
271     return 0;
272 #endif
273   }
274 
275   uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr,
276                                          uptr ClassId) {
277     if (!Options.get(OptionBit::UseOddEvenTags))
278       return 0;
279 
280     // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
281     // even, and vice versa. Blocks are laid out Size bytes apart, and adding
282     // Size to Ptr will flip the least significant set bit of Size in Ptr, so
283     // that bit will have the pattern 010101... for consecutive blocks, which we
284     // can use to determine which tag mask to use.
285     return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
286   }
287 
288   NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
289                           uptr Alignment = MinAlignment,
290                           bool ZeroContents = false) {
291     initThreadMaybe();
292 
293     const Options Options = Primary.Options.load();
294     if (UNLIKELY(Alignment > MaxAlignment)) {
295       if (Options.get(OptionBit::MayReturnNull))
296         return nullptr;
297       reportAlignmentTooBig(Alignment, MaxAlignment);
298     }
299     if (Alignment < MinAlignment)
300       Alignment = MinAlignment;
301 
302 #ifdef GWP_ASAN_HOOKS
303     if (UNLIKELY(GuardedAlloc.shouldSample())) {
304       if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
305         if (UNLIKELY(&__scudo_allocate_hook))
306           __scudo_allocate_hook(Ptr, Size);
307         Stats.lock();
308         Stats.add(StatAllocated, GuardedAllocSlotSize);
309         Stats.sub(StatFree, GuardedAllocSlotSize);
310         Stats.unlock();
311         return Ptr;
312       }
313     }
314 #endif // GWP_ASAN_HOOKS
315 
316     const FillContentsMode FillContents = ZeroContents ? ZeroFill
317                                           : TSDRegistry.getDisableMemInit()
318                                               ? NoFill
319                                               : Options.getFillContentsMode();
320 
321     // If the requested size happens to be 0 (more common than you might think),
322     // allocate MinAlignment bytes on top of the header. Then add the extra
323     // bytes required to fulfill the alignment requirements: we allocate enough
324     // to be sure that there will be an address in the block that will satisfy
325     // the alignment.
326     const uptr NeededSize =
327         roundUpTo(Size, MinAlignment) +
328         ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
329 
330     // Takes care of extravagantly large sizes as well as integer overflows.
331     static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
332     if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
333       if (Options.get(OptionBit::MayReturnNull))
334         return nullptr;
335       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
336     }
337     DCHECK_LE(Size, NeededSize);
338 
339     void *Block = nullptr;
340     uptr ClassId = 0;
341     uptr SecondaryBlockEnd = 0;
342     if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
343       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
344       DCHECK_NE(ClassId, 0U);
345       bool UnlockRequired;
346       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
347       Block = TSD->Cache.allocate(ClassId);
348       // If the allocation failed, the most likely reason with a 32-bit primary
349       // is the region being full. In that event, retry in each successively
350       // larger class until it fits. If it fails to fit in the largest class,
351       // fallback to the Secondary.
352       if (UNLIKELY(!Block)) {
353         while (ClassId < SizeClassMap::LargestClassId && !Block)
354           Block = TSD->Cache.allocate(++ClassId);
355         if (!Block)
356           ClassId = 0;
357       }
358       if (UnlockRequired)
359         TSD->unlock();
360     }
361     if (UNLIKELY(ClassId == 0))
362       Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
363                                  FillContents);
364 
365     if (UNLIKELY(!Block)) {
366       if (Options.get(OptionBit::MayReturnNull))
367         return nullptr;
368       reportOutOfMemory(NeededSize);
369     }
370 
371     const uptr BlockUptr = reinterpret_cast<uptr>(Block);
372     const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
373     const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
374 
375     void *Ptr = reinterpret_cast<void *>(UserPtr);
376     void *TaggedPtr = Ptr;
377     if (LIKELY(ClassId)) {
378       // We only need to zero or tag the contents for Primary backed
379       // allocations. We only set tags for primary allocations in order to avoid
380       // faulting potentially large numbers of pages for large secondary
381       // allocations. We assume that guard pages are enough to protect these
382       // allocations.
383       //
384       // FIXME: When the kernel provides a way to set the background tag of a
385       // mapping, we should be able to tag secondary allocations as well.
386       //
387       // When memory tagging is enabled, zeroing the contents is done as part of
388       // setting the tag.
389       if (UNLIKELY(useMemoryTagging<Params>(Options))) {
390         uptr PrevUserPtr;
391         Chunk::UnpackedHeader Header;
392         const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
393         const uptr BlockEnd = BlockUptr + BlockSize;
394         // If possible, try to reuse the UAF tag that was set by deallocate().
395         // For simplicity, only reuse tags if we have the same start address as
396         // the previous allocation. This handles the majority of cases since
397         // most allocations will not be more aligned than the minimum alignment.
398         //
399         // We need to handle situations involving reclaimed chunks, and retag
400         // the reclaimed portions if necessary. In the case where the chunk is
401         // fully reclaimed, the chunk's header will be zero, which will trigger
402         // the code path for new mappings and invalid chunks that prepares the
403         // chunk from scratch. There are three possibilities for partial
404         // reclaiming:
405         //
406         // (1) Header was reclaimed, data was partially reclaimed.
407         // (2) Header was not reclaimed, all data was reclaimed (e.g. because
408         //     data started on a page boundary).
409         // (3) Header was not reclaimed, data was partially reclaimed.
410         //
411         // Case (1) will be handled in the same way as for full reclaiming,
412         // since the header will be zero.
413         //
414         // We can detect case (2) by loading the tag from the start
415         // of the chunk. If it is zero, it means that either all data was
416         // reclaimed (since we never use zero as the chunk tag), or that the
417         // previous allocation was of size zero. Either way, we need to prepare
418         // a new chunk from scratch.
419         //
420         // We can detect case (3) by moving to the next page (if covered by the
421         // chunk) and loading the tag of its first granule. If it is zero, it
422         // means that all following pages may need to be retagged. On the other
423         // hand, if it is nonzero, we can assume that all following pages are
424         // still tagged, according to the logic that if any of the pages
425         // following the next page were reclaimed, the next page would have been
426         // reclaimed as well.
427         uptr TaggedUserPtr;
428         if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
429             PrevUserPtr == UserPtr &&
430             (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
431           uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
432           const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached());
433           if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
434             PrevEnd = NextPage;
435           TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
436           resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
437           if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
438             // If an allocation needs to be zeroed (i.e. calloc) we can normally
439             // avoid zeroing the memory now since we can rely on memory having
440             // been zeroed on free, as this is normally done while setting the
441             // UAF tag. But if tagging was disabled per-thread when the memory
442             // was freed, it would not have been retagged and thus zeroed, and
443             // therefore it needs to be zeroed now.
444             memset(TaggedPtr, 0,
445                    Min(Size, roundUpTo(PrevEnd - TaggedUserPtr,
446                                        archMemoryTagGranuleSize())));
447           } else if (Size) {
448             // Clear any stack metadata that may have previously been stored in
449             // the chunk data.
450             memset(TaggedPtr, 0, archMemoryTagGranuleSize());
451           }
452         } else {
453           const uptr OddEvenMask =
454               computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
455           TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
456         }
457         storePrimaryAllocationStackMaybe(Options, Ptr);
458       } else {
459         Block = addHeaderTag(Block);
460         Ptr = addHeaderTag(Ptr);
461         if (UNLIKELY(FillContents != NoFill)) {
462           // This condition is not necessarily unlikely, but since memset is
463           // costly, we might as well mark it as such.
464           memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
465                  PrimaryT::getSizeByClassId(ClassId));
466         }
467       }
468     } else {
469       Block = addHeaderTag(Block);
470       Ptr = addHeaderTag(Ptr);
471       if (UNLIKELY(useMemoryTagging<Params>(Options))) {
472         storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
473         storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
474       }
475     }
476 
477     Chunk::UnpackedHeader Header = {};
478     if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
479       const uptr Offset = UserPtr - UnalignedUserPtr;
480       DCHECK_GE(Offset, 2 * sizeof(u32));
481       // The BlockMarker has no security purpose, but is specifically meant for
482       // the chunk iteration function that can be used in debugging situations.
483       // It is the only situation where we have to locate the start of a chunk
484       // based on its block address.
485       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
486       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
487       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
488     }
489     Header.ClassId = ClassId & Chunk::ClassIdMask;
490     Header.State = Chunk::State::Allocated;
491     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
492     Header.SizeOrUnusedBytes =
493         (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
494         Chunk::SizeOrUnusedBytesMask;
495     Chunk::storeHeader(Cookie, Ptr, &Header);
496 
497     if (UNLIKELY(&__scudo_allocate_hook))
498       __scudo_allocate_hook(TaggedPtr, Size);
499 
500     return TaggedPtr;
501   }
502 
503   NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
504                            UNUSED uptr Alignment = MinAlignment) {
505     // For a deallocation, we only ensure minimal initialization, meaning thread
506     // local data will be left uninitialized for now (when using ELF TLS). The
507     // fallback cache will be used instead. This is a workaround for a situation
508     // where the only heap operation performed in a thread would be a free past
509     // the TLS destructors, ending up in initialized thread specific data never
510     // being destroyed properly. Any other heap operation will do a full init.
511     initThreadMaybe(/*MinimalInit=*/true);
512 
513     if (UNLIKELY(&__scudo_deallocate_hook))
514       __scudo_deallocate_hook(Ptr);
515 
516     if (UNLIKELY(!Ptr))
517       return;
518 
519 #ifdef GWP_ASAN_HOOKS
520     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
521       GuardedAlloc.deallocate(Ptr);
522       Stats.lock();
523       Stats.add(StatFree, GuardedAllocSlotSize);
524       Stats.sub(StatAllocated, GuardedAllocSlotSize);
525       Stats.unlock();
526       return;
527     }
528 #endif // GWP_ASAN_HOOKS
529 
530     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
531       reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
532 
533     void *TaggedPtr = Ptr;
534     Ptr = getHeaderTaggedPointer(Ptr);
535 
536     Chunk::UnpackedHeader Header;
537     Chunk::loadHeader(Cookie, Ptr, &Header);
538 
539     if (UNLIKELY(Header.State != Chunk::State::Allocated))
540       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
541 
542     const Options Options = Primary.Options.load();
543     if (Options.get(OptionBit::DeallocTypeMismatch)) {
544       if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
545         // With the exception of memalign'd chunks, that can be still be free'd.
546         if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
547             Origin != Chunk::Origin::Malloc)
548           reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
549                                     Header.OriginOrWasZeroed, Origin);
550       }
551     }
552 
553     const uptr Size = getSize(Ptr, &Header);
554     if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
555       if (UNLIKELY(DeleteSize != Size))
556         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
557     }
558 
559     quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
560   }
561 
562   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
563     initThreadMaybe();
564 
565     const Options Options = Primary.Options.load();
566     if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
567       if (Options.get(OptionBit::MayReturnNull))
568         return nullptr;
569       reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
570     }
571 
572     // The following cases are handled by the C wrappers.
573     DCHECK_NE(OldPtr, nullptr);
574     DCHECK_NE(NewSize, 0);
575 
576 #ifdef GWP_ASAN_HOOKS
577     if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
578       uptr OldSize = GuardedAlloc.getSize(OldPtr);
579       void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
580       if (NewPtr)
581         memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
582       GuardedAlloc.deallocate(OldPtr);
583       Stats.lock();
584       Stats.add(StatFree, GuardedAllocSlotSize);
585       Stats.sub(StatAllocated, GuardedAllocSlotSize);
586       Stats.unlock();
587       return NewPtr;
588     }
589 #endif // GWP_ASAN_HOOKS
590 
591     void *OldTaggedPtr = OldPtr;
592     OldPtr = getHeaderTaggedPointer(OldPtr);
593 
594     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
595       reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
596 
597     Chunk::UnpackedHeader OldHeader;
598     Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
599 
600     if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
601       reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
602 
603     // Pointer has to be allocated with a malloc-type function. Some
604     // applications think that it is OK to realloc a memalign'ed pointer, which
605     // will trigger this check. It really isn't.
606     if (Options.get(OptionBit::DeallocTypeMismatch)) {
607       if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
608         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
609                                   OldHeader.OriginOrWasZeroed,
610                                   Chunk::Origin::Malloc);
611     }
612 
613     void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
614     uptr BlockEnd;
615     uptr OldSize;
616     const uptr ClassId = OldHeader.ClassId;
617     if (LIKELY(ClassId)) {
618       BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
619                  SizeClassMap::getSizeByClassId(ClassId);
620       OldSize = OldHeader.SizeOrUnusedBytes;
621     } else {
622       BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
623       OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
624                             OldHeader.SizeOrUnusedBytes);
625     }
626     // If the new chunk still fits in the previously allocated block (with a
627     // reasonable delta), we just keep the old block, and update the chunk
628     // header to reflect the size change.
629     if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
630       if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
631         Chunk::UnpackedHeader NewHeader = OldHeader;
632         NewHeader.SizeOrUnusedBytes =
633             (ClassId ? NewSize
634                      : BlockEnd -
635                            (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
636             Chunk::SizeOrUnusedBytesMask;
637         Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
638         if (UNLIKELY(useMemoryTagging<Params>(Options))) {
639           if (ClassId) {
640             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
641                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
642                               NewSize, untagPointer(BlockEnd));
643             storePrimaryAllocationStackMaybe(Options, OldPtr);
644           } else {
645             storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
646           }
647         }
648         return OldTaggedPtr;
649       }
650     }
651 
652     // Otherwise we allocate a new one, and deallocate the old one. Some
653     // allocators will allocate an even larger chunk (by a fixed factor) to
654     // allow for potential further in-place realloc. The gains of such a trick
655     // are currently unclear.
656     void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
657     if (LIKELY(NewPtr)) {
658       memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
659       quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
660     }
661     return NewPtr;
662   }
663 
664   // TODO(kostyak): disable() is currently best-effort. There are some small
665   //                windows of time when an allocation could still succeed after
666   //                this function finishes. We will revisit that later.
667   void disable() {
668     initThreadMaybe();
669 #ifdef GWP_ASAN_HOOKS
670     GuardedAlloc.disable();
671 #endif
672     TSDRegistry.disable();
673     Stats.disable();
674     Quarantine.disable();
675     Primary.disable();
676     Secondary.disable();
677   }
678 
679   void enable() {
680     initThreadMaybe();
681     Secondary.enable();
682     Primary.enable();
683     Quarantine.enable();
684     Stats.enable();
685     TSDRegistry.enable();
686 #ifdef GWP_ASAN_HOOKS
687     GuardedAlloc.enable();
688 #endif
689   }
690 
691   // The function returns the amount of bytes required to store the statistics,
692   // which might be larger than the amount of bytes provided. Note that the
693   // statistics buffer is not necessarily constant between calls to this
694   // function. This can be called with a null buffer or zero size for buffer
695   // sizing purposes.
696   uptr getStats(char *Buffer, uptr Size) {
697     ScopedString Str;
698     disable();
699     const uptr Length = getStats(&Str) + 1;
700     enable();
701     if (Length < Size)
702       Size = Length;
703     if (Buffer && Size) {
704       memcpy(Buffer, Str.data(), Size);
705       Buffer[Size - 1] = '\0';
706     }
707     return Length;
708   }
709 
710   void printStats() {
711     ScopedString Str;
712     disable();
713     getStats(&Str);
714     enable();
715     Str.output();
716   }
717 
718   void releaseToOS() {
719     initThreadMaybe();
720     Primary.releaseToOS();
721     Secondary.releaseToOS();
722   }
723 
724   // Iterate over all chunks and call a callback for all busy chunks located
725   // within the provided memory range. Said callback must not use this allocator
726   // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
727   void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
728                          void *Arg) {
729     initThreadMaybe();
730     if (archSupportsMemoryTagging())
731       Base = untagPointer(Base);
732     const uptr From = Base;
733     const uptr To = Base + Size;
734     bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Params>() &&
735                                 systemSupportsMemoryTagging();
736     auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
737                    Arg](uptr Block) {
738       if (Block < From || Block >= To)
739         return;
740       uptr Chunk;
741       Chunk::UnpackedHeader Header;
742       if (MayHaveTaggedPrimary) {
743         // A chunk header can either have a zero tag (tagged primary) or the
744         // header tag (secondary, or untagged primary). We don't know which so
745         // try both.
746         ScopedDisableMemoryTagChecks x;
747         if (!getChunkFromBlock(Block, &Chunk, &Header) &&
748             !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
749           return;
750       } else {
751         if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
752           return;
753       }
754       if (Header.State == Chunk::State::Allocated) {
755         uptr TaggedChunk = Chunk;
756         if (allocatorSupportsMemoryTagging<Params>())
757           TaggedChunk = untagPointer(TaggedChunk);
758         if (useMemoryTagging<Params>(Primary.Options.load()))
759           TaggedChunk = loadTag(Chunk);
760         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
761                  Arg);
762       }
763     };
764     Primary.iterateOverBlocks(Lambda);
765     Secondary.iterateOverBlocks(Lambda);
766 #ifdef GWP_ASAN_HOOKS
767     GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
768 #endif
769   }
770 
771   bool canReturnNull() {
772     initThreadMaybe();
773     return Primary.Options.load().get(OptionBit::MayReturnNull);
774   }
775 
776   bool setOption(Option O, sptr Value) {
777     initThreadMaybe();
778     if (O == Option::MemtagTuning) {
779       // Enabling odd/even tags involves a tradeoff between use-after-free
780       // detection and buffer overflow detection. Odd/even tags make it more
781       // likely for buffer overflows to be detected by increasing the size of
782       // the guaranteed "red zone" around the allocation, but on the other hand
783       // use-after-free is less likely to be detected because the tag space for
784       // any particular chunk is cut in half. Therefore we use this tuning
785       // setting to control whether odd/even tags are enabled.
786       if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
787         Primary.Options.set(OptionBit::UseOddEvenTags);
788       else if (Value == M_MEMTAG_TUNING_UAF)
789         Primary.Options.clear(OptionBit::UseOddEvenTags);
790       return true;
791     } else {
792       // We leave it to the various sub-components to decide whether or not they
793       // want to handle the option, but we do not want to short-circuit
794       // execution if one of the setOption was to return false.
795       const bool PrimaryResult = Primary.setOption(O, Value);
796       const bool SecondaryResult = Secondary.setOption(O, Value);
797       const bool RegistryResult = TSDRegistry.setOption(O, Value);
798       return PrimaryResult && SecondaryResult && RegistryResult;
799     }
800     return false;
801   }
802 
803   // Return the usable size for a given chunk. Technically we lie, as we just
804   // report the actual size of a chunk. This is done to counteract code actively
805   // writing past the end of a chunk (like sqlite3) when the usable size allows
806   // for it, which then forces realloc to copy the usable size of a chunk as
807   // opposed to its actual size.
808   uptr getUsableSize(const void *Ptr) {
809     initThreadMaybe();
810     if (UNLIKELY(!Ptr))
811       return 0;
812 
813 #ifdef GWP_ASAN_HOOKS
814     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
815       return GuardedAlloc.getSize(Ptr);
816 #endif // GWP_ASAN_HOOKS
817 
818     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
819     Chunk::UnpackedHeader Header;
820     Chunk::loadHeader(Cookie, Ptr, &Header);
821     // Getting the usable size of a chunk only makes sense if it's allocated.
822     if (UNLIKELY(Header.State != Chunk::State::Allocated))
823       reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
824     return getSize(Ptr, &Header);
825   }
826 
827   void getStats(StatCounters S) {
828     initThreadMaybe();
829     Stats.get(S);
830   }
831 
832   // Returns true if the pointer provided was allocated by the current
833   // allocator instance, which is compliant with tcmalloc's ownership concept.
834   // A corrupted chunk will not be reported as owned, which is WAI.
835   bool isOwned(const void *Ptr) {
836     initThreadMaybe();
837 #ifdef GWP_ASAN_HOOKS
838     if (GuardedAlloc.pointerIsMine(Ptr))
839       return true;
840 #endif // GWP_ASAN_HOOKS
841     if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
842       return false;
843     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
844     Chunk::UnpackedHeader Header;
845     return Chunk::isValid(Cookie, Ptr, &Header) &&
846            Header.State == Chunk::State::Allocated;
847   }
848 
849   bool useMemoryTaggingTestOnly() const {
850     return useMemoryTagging<Params>(Primary.Options.load());
851   }
852   void disableMemoryTagging() {
853     // If we haven't been initialized yet, we need to initialize now in order to
854     // prevent a future call to initThreadMaybe() from enabling memory tagging
855     // based on feature detection. But don't call initThreadMaybe() because it
856     // may end up calling the allocator (via pthread_atfork, via the post-init
857     // callback), which may cause mappings to be created with memory tagging
858     // enabled.
859     TSDRegistry.initOnceMaybe(this);
860     if (allocatorSupportsMemoryTagging<Params>()) {
861       Secondary.disableMemoryTagging();
862       Primary.Options.clear(OptionBit::UseMemoryTagging);
863     }
864   }
865 
866   void setTrackAllocationStacks(bool Track) {
867     initThreadMaybe();
868     if (Track)
869       Primary.Options.set(OptionBit::TrackAllocationStacks);
870     else
871       Primary.Options.clear(OptionBit::TrackAllocationStacks);
872   }
873 
874   void setFillContents(FillContentsMode FillContents) {
875     initThreadMaybe();
876     Primary.Options.setFillContentsMode(FillContents);
877   }
878 
879   void setAddLargeAllocationSlack(bool AddSlack) {
880     initThreadMaybe();
881     if (AddSlack)
882       Primary.Options.set(OptionBit::AddLargeAllocationSlack);
883     else
884       Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
885   }
886 
887   const char *getStackDepotAddress() const {
888     return reinterpret_cast<const char *>(&Depot);
889   }
890 
891   const char *getRegionInfoArrayAddress() const {
892     return Primary.getRegionInfoArrayAddress();
893   }
894 
895   static uptr getRegionInfoArraySize() {
896     return PrimaryT::getRegionInfoArraySize();
897   }
898 
899   const char *getRingBufferAddress() const {
900     return reinterpret_cast<const char *>(&RingBuffer);
901   }
902 
903   static uptr getRingBufferSize() { return sizeof(RingBuffer); }
904 
905   static const uptr MaxTraceSize = 64;
906 
907   static void collectTraceMaybe(const StackDepot *Depot,
908                                 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
909     uptr RingPos, Size;
910     if (!Depot->find(Hash, &RingPos, &Size))
911       return;
912     for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
913       Trace[I] = (*Depot)[RingPos + I];
914   }
915 
916   static void getErrorInfo(struct scudo_error_info *ErrorInfo,
917                            uintptr_t FaultAddr, const char *DepotPtr,
918                            const char *RegionInfoPtr, const char *RingBufferPtr,
919                            const char *Memory, const char *MemoryTags,
920                            uintptr_t MemoryAddr, size_t MemorySize) {
921     *ErrorInfo = {};
922     if (!allocatorSupportsMemoryTagging<Params>() ||
923         MemoryAddr + MemorySize < MemoryAddr)
924       return;
925 
926     auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
927     size_t NextErrorReport = 0;
928 
929     // Check for OOB in the current block and the two surrounding blocks. Beyond
930     // that, UAF is more likely.
931     if (extractTag(FaultAddr) != 0)
932       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
933                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
934                          MemorySize, 0, 2);
935 
936     // Check the ring buffer. For primary allocations this will only find UAF;
937     // for secondary allocations we can find either UAF or OOB.
938     getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
939                            RingBufferPtr);
940 
941     // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
942     // Beyond that we are likely to hit false positives.
943     if (extractTag(FaultAddr) != 0)
944       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
945                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
946                          MemorySize, 2, 16);
947   }
948 
949 private:
950   using SecondaryT = MapAllocator<Params>;
951   typedef typename PrimaryT::SizeClassMap SizeClassMap;
952 
953   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
954   static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
955   static const uptr MinAlignment = 1UL << MinAlignmentLog;
956   static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
957   static const uptr MaxAllowedMallocSize =
958       FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
959 
960   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
961                 "Minimal alignment must at least cover a chunk header.");
962   static_assert(!allocatorSupportsMemoryTagging<Params>() ||
963                     MinAlignment >= archMemoryTagGranuleSize(),
964                 "");
965 
966   static const u32 BlockMarker = 0x44554353U;
967 
968   // These are indexes into an "array" of 32-bit values that store information
969   // inline with a chunk that is relevant to diagnosing memory tag faults, where
970   // 0 corresponds to the address of the user memory. This means that only
971   // negative indexes may be used. The smallest index that may be used is -2,
972   // which corresponds to 8 bytes before the user memory, because the chunk
973   // header size is 8 bytes and in allocators that support memory tagging the
974   // minimum alignment is at least the tag granule size (16 on aarch64).
975   static const sptr MemTagAllocationTraceIndex = -2;
976   static const sptr MemTagAllocationTidIndex = -1;
977 
978   u32 Cookie = 0;
979   u32 QuarantineMaxChunkSize = 0;
980 
981   GlobalStats Stats;
982   PrimaryT Primary;
983   SecondaryT Secondary;
984   QuarantineT Quarantine;
985   TSDRegistryT TSDRegistry;
986   pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
987 
988 #ifdef GWP_ASAN_HOOKS
989   gwp_asan::GuardedPoolAllocator GuardedAlloc;
990   uptr GuardedAllocSlotSize = 0;
991 #endif // GWP_ASAN_HOOKS
992 
993   StackDepot Depot;
994 
995   struct AllocationRingBuffer {
996     struct Entry {
997       atomic_uptr Ptr;
998       atomic_uptr AllocationSize;
999       atomic_u32 AllocationTrace;
1000       atomic_u32 AllocationTid;
1001       atomic_u32 DeallocationTrace;
1002       atomic_u32 DeallocationTid;
1003     };
1004 
1005     atomic_uptr Pos;
1006 #ifdef SCUDO_FUZZ
1007     static const uptr NumEntries = 2;
1008 #else
1009     static const uptr NumEntries = 32768;
1010 #endif
1011     Entry Entries[NumEntries];
1012   };
1013   AllocationRingBuffer RingBuffer = {};
1014 
1015   // The following might get optimized out by the compiler.
1016   NOINLINE void performSanityChecks() {
1017     // Verify that the header offset field can hold the maximum offset. In the
1018     // case of the Secondary allocator, it takes care of alignment and the
1019     // offset will always be small. In the case of the Primary, the worst case
1020     // scenario happens in the last size class, when the backend allocation
1021     // would already be aligned on the requested alignment, which would happen
1022     // to be the maximum alignment that would fit in that size class. As a
1023     // result, the maximum offset will be at most the maximum alignment for the
1024     // last size class minus the header size, in multiples of MinAlignment.
1025     Chunk::UnpackedHeader Header = {};
1026     const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1027                                          SizeClassMap::MaxSize - MinAlignment);
1028     const uptr MaxOffset =
1029         (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1030     Header.Offset = MaxOffset & Chunk::OffsetMask;
1031     if (UNLIKELY(Header.Offset != MaxOffset))
1032       reportSanityCheckError("offset");
1033 
1034     // Verify that we can fit the maximum size or amount of unused bytes in the
1035     // header. Given that the Secondary fits the allocation to a page, the worst
1036     // case scenario happens in the Primary. It will depend on the second to
1037     // last and last class sizes, as well as the dynamic base for the Primary.
1038     // The following is an over-approximation that works for our needs.
1039     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1040     Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1041     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1042       reportSanityCheckError("size (or unused bytes)");
1043 
1044     const uptr LargestClassId = SizeClassMap::LargestClassId;
1045     Header.ClassId = LargestClassId;
1046     if (UNLIKELY(Header.ClassId != LargestClassId))
1047       reportSanityCheckError("class ID");
1048   }
1049 
1050   static inline void *getBlockBegin(const void *Ptr,
1051                                     Chunk::UnpackedHeader *Header) {
1052     return reinterpret_cast<void *>(
1053         reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1054         (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1055   }
1056 
1057   // Return the size of a chunk as requested during its allocation.
1058   inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1059     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1060     if (LIKELY(Header->ClassId))
1061       return SizeOrUnusedBytes;
1062     if (allocatorSupportsMemoryTagging<Params>())
1063       Ptr = untagPointer(const_cast<void *>(Ptr));
1064     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1065            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1066   }
1067 
1068   void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
1069                                    Chunk::UnpackedHeader *Header, uptr Size) {
1070     void *Ptr = getHeaderTaggedPointer(TaggedPtr);
1071     Chunk::UnpackedHeader NewHeader = *Header;
1072     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1073     // than the maximum allowed, we return a chunk directly to the backend.
1074     // This purposefully underflows for Size == 0.
1075     const bool BypassQuarantine = !Quarantine.getCacheSize() ||
1076                                   ((Size - 1) >= QuarantineMaxChunkSize) ||
1077                                   !NewHeader.ClassId;
1078     if (BypassQuarantine)
1079       NewHeader.State = Chunk::State::Available;
1080     else
1081       NewHeader.State = Chunk::State::Quarantined;
1082     NewHeader.OriginOrWasZeroed = useMemoryTagging<Params>(Options) &&
1083                                   NewHeader.ClassId &&
1084                                   !TSDRegistry.getDisableMemInit();
1085     Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
1086 
1087     if (UNLIKELY(useMemoryTagging<Params>(Options))) {
1088       u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
1089       storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1090       if (NewHeader.ClassId) {
1091         if (!TSDRegistry.getDisableMemInit()) {
1092           uptr TaggedBegin, TaggedEnd;
1093           const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1094               Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
1095               NewHeader.ClassId);
1096           // Exclude the previous tag so that immediate use after free is
1097           // detected 100% of the time.
1098           setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
1099                        &TaggedEnd);
1100         }
1101       }
1102     }
1103     if (BypassQuarantine) {
1104       if (allocatorSupportsMemoryTagging<Params>())
1105         Ptr = untagPointer(Ptr);
1106       void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
1107       const uptr ClassId = NewHeader.ClassId;
1108       if (LIKELY(ClassId)) {
1109         bool UnlockRequired;
1110         auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1111         TSD->Cache.deallocate(ClassId, BlockBegin);
1112         if (UnlockRequired)
1113           TSD->unlock();
1114       } else {
1115         if (UNLIKELY(useMemoryTagging<Params>(Options)))
1116           storeTags(reinterpret_cast<uptr>(BlockBegin),
1117                     reinterpret_cast<uptr>(Ptr));
1118         Secondary.deallocate(Options, BlockBegin);
1119       }
1120     } else {
1121       bool UnlockRequired;
1122       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1123       Quarantine.put(&TSD->QuarantineCache,
1124                      QuarantineCallback(*this, TSD->Cache), Ptr, Size);
1125       if (UnlockRequired)
1126         TSD->unlock();
1127     }
1128   }
1129 
1130   bool getChunkFromBlock(uptr Block, uptr *Chunk,
1131                          Chunk::UnpackedHeader *Header) {
1132     *Chunk =
1133         Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
1134     return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
1135   }
1136 
1137   static uptr getChunkOffsetFromBlock(const char *Block) {
1138     u32 Offset = 0;
1139     if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
1140       Offset = reinterpret_cast<const u32 *>(Block)[1];
1141     return Offset + Chunk::getHeaderSize();
1142   }
1143 
1144   // Set the tag of the granule past the end of the allocation to 0, to catch
1145   // linear overflows even if a previous larger allocation used the same block
1146   // and tag. Only do this if the granule past the end is in our block, because
1147   // this would otherwise lead to a SEGV if the allocation covers the entire
1148   // block and our block is at the end of a mapping. The tag of the next block's
1149   // header granule will be set to 0, so it will serve the purpose of catching
1150   // linear overflows in this case.
1151   //
1152   // For allocations of size 0 we do not end up storing the address tag to the
1153   // memory tag space, which getInlineErrorInfo() normally relies on to match
1154   // address tags against chunks. To allow matching in this case we store the
1155   // address tag in the first byte of the chunk.
1156   void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1157     DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1158     uptr UntaggedEnd = untagPointer(End);
1159     if (UntaggedEnd != BlockEnd) {
1160       storeTag(UntaggedEnd);
1161       if (Size == 0)
1162         *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
1163     }
1164   }
1165 
1166   void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
1167                            uptr BlockEnd) {
1168     // Prepare the granule before the chunk to store the chunk header by setting
1169     // its tag to 0. Normally its tag will already be 0, but in the case where a
1170     // chunk holding a low alignment allocation is reused for a higher alignment
1171     // allocation, the chunk may already have a non-zero tag from the previous
1172     // allocation.
1173     storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1174 
1175     uptr TaggedBegin, TaggedEnd;
1176     setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
1177 
1178     storeEndMarker(TaggedEnd, Size, BlockEnd);
1179     return reinterpret_cast<void *>(TaggedBegin);
1180   }
1181 
1182   void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1183                          uptr BlockEnd) {
1184     uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
1185     uptr RoundNewPtr;
1186     if (RoundOldPtr >= NewPtr) {
1187       // If the allocation is shrinking we just need to set the tag past the end
1188       // of the allocation to 0. See explanation in storeEndMarker() above.
1189       RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
1190     } else {
1191       // Set the memory tag of the region
1192       // [RoundOldPtr, roundUpTo(NewPtr, archMemoryTagGranuleSize()))
1193       // to the pointer tag stored in OldPtr.
1194       RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
1195     }
1196     storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
1197   }
1198 
1199   void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
1200     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1201       return;
1202     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1203     Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
1204     Ptr32[MemTagAllocationTidIndex] = getThreadID();
1205   }
1206 
1207   void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
1208                             uptr AllocationSize, u32 DeallocationTrace,
1209                             u32 DeallocationTid) {
1210     uptr Pos = atomic_fetch_add(&RingBuffer.Pos, 1, memory_order_relaxed);
1211     typename AllocationRingBuffer::Entry *Entry =
1212         &RingBuffer.Entries[Pos % AllocationRingBuffer::NumEntries];
1213 
1214     // First invalidate our entry so that we don't attempt to interpret a
1215     // partially written state in getSecondaryErrorInfo(). The fences below
1216     // ensure that the compiler does not move the stores to Ptr in between the
1217     // stores to the other fields.
1218     atomic_store_relaxed(&Entry->Ptr, 0);
1219 
1220     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1221     atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1222     atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1223     atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1224     atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1225     atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1226     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1227 
1228     atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1229   }
1230 
1231   void storeSecondaryAllocationStackMaybe(Options Options, void *Ptr,
1232                                           uptr Size) {
1233     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1234       return;
1235 
1236     u32 Trace = collectStackTrace();
1237     u32 Tid = getThreadID();
1238 
1239     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1240     Ptr32[MemTagAllocationTraceIndex] = Trace;
1241     Ptr32[MemTagAllocationTidIndex] = Tid;
1242 
1243     storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
1244   }
1245 
1246   void storeDeallocationStackMaybe(Options Options, void *Ptr, u8 PrevTag,
1247                                    uptr Size) {
1248     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1249       return;
1250 
1251     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1252     u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1253     u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1254 
1255     u32 DeallocationTrace = collectStackTrace();
1256     u32 DeallocationTid = getThreadID();
1257 
1258     storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
1259                          AllocationTrace, AllocationTid, Size,
1260                          DeallocationTrace, DeallocationTid);
1261   }
1262 
1263   static const size_t NumErrorReports =
1264       sizeof(((scudo_error_info *)0)->reports) /
1265       sizeof(((scudo_error_info *)0)->reports[0]);
1266 
1267   static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
1268                                  size_t &NextErrorReport, uintptr_t FaultAddr,
1269                                  const StackDepot *Depot,
1270                                  const char *RegionInfoPtr, const char *Memory,
1271                                  const char *MemoryTags, uintptr_t MemoryAddr,
1272                                  size_t MemorySize, size_t MinDistance,
1273                                  size_t MaxDistance) {
1274     uptr UntaggedFaultAddr = untagPointer(FaultAddr);
1275     u8 FaultAddrTag = extractTag(FaultAddr);
1276     BlockInfo Info =
1277         PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
1278 
1279     auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1280       if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
1281           Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
1282         return false;
1283       *Data = &Memory[Addr - MemoryAddr];
1284       *Tag = static_cast<u8>(
1285           MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1286       return true;
1287     };
1288 
1289     auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
1290                          Chunk::UnpackedHeader *Header, const u32 **Data,
1291                          u8 *Tag) {
1292       const char *BlockBegin;
1293       u8 BlockBeginTag;
1294       if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
1295         return false;
1296       uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
1297       *ChunkAddr = Addr + ChunkOffset;
1298 
1299       const char *ChunkBegin;
1300       if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
1301         return false;
1302       *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
1303           ChunkBegin - Chunk::getHeaderSize());
1304       *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1305 
1306       // Allocations of size 0 will have stashed the tag in the first byte of
1307       // the chunk, see storeEndMarker().
1308       if (Header->SizeOrUnusedBytes == 0)
1309         *Tag = static_cast<u8>(*ChunkBegin);
1310 
1311       return true;
1312     };
1313 
1314     if (NextErrorReport == NumErrorReports)
1315       return;
1316 
1317     auto CheckOOB = [&](uptr BlockAddr) {
1318       if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
1319         return false;
1320 
1321       uptr ChunkAddr;
1322       Chunk::UnpackedHeader Header;
1323       const u32 *Data;
1324       uint8_t Tag;
1325       if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
1326           Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
1327         return false;
1328 
1329       auto *R = &ErrorInfo->reports[NextErrorReport++];
1330       R->error_type =
1331           UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1332       R->allocation_address = ChunkAddr;
1333       R->allocation_size = Header.SizeOrUnusedBytes;
1334       collectTraceMaybe(Depot, R->allocation_trace,
1335                         Data[MemTagAllocationTraceIndex]);
1336       R->allocation_tid = Data[MemTagAllocationTidIndex];
1337       return NextErrorReport == NumErrorReports;
1338     };
1339 
1340     if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
1341       return;
1342 
1343     for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
1344       if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
1345           CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1346         return;
1347   }
1348 
1349   static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
1350                                      size_t &NextErrorReport,
1351                                      uintptr_t FaultAddr,
1352                                      const StackDepot *Depot,
1353                                      const char *RingBufferPtr) {
1354     auto *RingBuffer =
1355         reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1356     uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1357 
1358     for (uptr I = Pos - 1; I != Pos - 1 - AllocationRingBuffer::NumEntries &&
1359                            NextErrorReport != NumErrorReports;
1360          --I) {
1361       auto *Entry = &RingBuffer->Entries[I % AllocationRingBuffer::NumEntries];
1362       uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1363       if (!EntryPtr)
1364         continue;
1365 
1366       uptr UntaggedEntryPtr = untagPointer(EntryPtr);
1367       uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1368       u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1369       u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1370       u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1371       u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1372 
1373       if (DeallocationTid) {
1374         // For UAF we only consider in-bounds fault addresses because
1375         // out-of-bounds UAF is rare and attempting to detect it is very likely
1376         // to result in false positives.
1377         if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
1378           continue;
1379       } else {
1380         // Ring buffer OOB is only possible with secondary allocations. In this
1381         // case we are guaranteed a guard region of at least a page on either
1382         // side of the allocation (guard page on the right, guard page + tagged
1383         // region on the left), so ignore any faults outside of that range.
1384         if (FaultAddr < EntryPtr - getPageSizeCached() ||
1385             FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
1386           continue;
1387 
1388         // For UAF the ring buffer will contain two entries, one for the
1389         // allocation and another for the deallocation. Don't report buffer
1390         // overflow/underflow using the allocation entry if we have already
1391         // collected a report from the deallocation entry.
1392         bool Found = false;
1393         for (uptr J = 0; J != NextErrorReport; ++J) {
1394           if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1395             Found = true;
1396             break;
1397           }
1398         }
1399         if (Found)
1400           continue;
1401       }
1402 
1403       auto *R = &ErrorInfo->reports[NextErrorReport++];
1404       if (DeallocationTid)
1405         R->error_type = USE_AFTER_FREE;
1406       else if (FaultAddr < EntryPtr)
1407         R->error_type = BUFFER_UNDERFLOW;
1408       else
1409         R->error_type = BUFFER_OVERFLOW;
1410 
1411       R->allocation_address = UntaggedEntryPtr;
1412       R->allocation_size = EntrySize;
1413       collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1414       R->allocation_tid = AllocationTid;
1415       collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1416       R->deallocation_tid = DeallocationTid;
1417     }
1418   }
1419 
1420   uptr getStats(ScopedString *Str) {
1421     Primary.getStats(Str);
1422     Secondary.getStats(Str);
1423     Quarantine.getStats(Str);
1424     return Str->length();
1425   }
1426 };
1427 
1428 } // namespace scudo
1429 
1430 #endif // SCUDO_COMBINED_H_
1431