1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "flags.h"
15 #include "flags_parser.h"
16 #include "interface.h"
17 #include "local_cache.h"
18 #include "quarantine.h"
19 #include "report.h"
20 #include "secondary.h"
21 #include "string_utils.h"
22 #include "tsd.h"
23 
24 #ifdef GWP_ASAN_HOOKS
25 #include "gwp_asan/guarded_pool_allocator.h"
26 // GWP-ASan is declared here in order to avoid indirect call overhead. It's also
27 // instantiated outside of the Allocator class, as the allocator is only
28 // zero-initialised. GWP-ASan requires constant initialisation, and the Scudo
29 // allocator doesn't have a constexpr constructor (see discussion here:
30 // https://reviews.llvm.org/D69265#inline-624315).
31 static gwp_asan::GuardedPoolAllocator GuardedAlloc;
32 #endif // GWP_ASAN_HOOKS
33 
34 extern "C" inline void EmptyCallback() {}
35 
36 namespace scudo {
37 
38 template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
39 class Allocator {
40 public:
41   using PrimaryT = typename Params::Primary;
42   using CacheT = typename PrimaryT::CacheT;
43   typedef Allocator<Params, PostInitCallback> ThisT;
44   typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
45 
46   void callPostInitCallback() {
47     static pthread_once_t OnceControl = PTHREAD_ONCE_INIT;
48     pthread_once(&OnceControl, PostInitCallback);
49   }
50 
51   struct QuarantineCallback {
52     explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
53         : Allocator(Instance), Cache(LocalCache) {}
54 
55     // Chunk recycling function, returns a quarantined chunk to the backend,
56     // first making sure it hasn't been tampered with.
57     void recycle(void *Ptr) {
58       Chunk::UnpackedHeader Header;
59       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
60       if (UNLIKELY(Header.State != Chunk::State::Quarantined))
61         reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
62 
63       Chunk::UnpackedHeader NewHeader = Header;
64       NewHeader.State = Chunk::State::Available;
65       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
66 
67       void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
68       const uptr ClassId = NewHeader.ClassId;
69       if (LIKELY(ClassId))
70         Cache.deallocate(ClassId, BlockBegin);
71       else
72         Allocator.Secondary.deallocate(BlockBegin);
73     }
74 
75     // We take a shortcut when allocating a quarantine batch by working with the
76     // appropriate class ID instead of using Size. The compiler should optimize
77     // the class ID computation and work with the associated cache directly.
78     void *allocate(UNUSED uptr Size) {
79       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
80           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
81       void *Ptr = Cache.allocate(QuarantineClassId);
82       // Quarantine batch allocation failure is fatal.
83       if (UNLIKELY(!Ptr))
84         reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
85 
86       Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
87                                      Chunk::getHeaderSize());
88       Chunk::UnpackedHeader Header = {};
89       Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
90       Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
91       Header.State = Chunk::State::Allocated;
92       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
93 
94       return Ptr;
95     }
96 
97     void deallocate(void *Ptr) {
98       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
99           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
100       Chunk::UnpackedHeader Header;
101       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
102 
103       if (UNLIKELY(Header.State != Chunk::State::Allocated))
104         reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
105       DCHECK_EQ(Header.ClassId, QuarantineClassId);
106       DCHECK_EQ(Header.Offset, 0);
107       DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
108 
109       Chunk::UnpackedHeader NewHeader = Header;
110       NewHeader.State = Chunk::State::Available;
111       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
112       Cache.deallocate(QuarantineClassId,
113                        reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
114                                                 Chunk::getHeaderSize()));
115     }
116 
117   private:
118     ThisT &Allocator;
119     CacheT &Cache;
120   };
121 
122   typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
123   typedef typename QuarantineT::CacheT QuarantineCacheT;
124 
125   void initLinkerInitialized() {
126     performSanityChecks();
127 
128     // Check if hardware CRC32 is supported in the binary and by the platform,
129     // if so, opt for the CRC32 hardware version of the checksum.
130     if (&computeHardwareCRC32 && hasHardwareCRC32())
131       HashAlgorithm = Checksum::HardwareCRC32;
132 
133     if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
134       Cookie = static_cast<u32>(getMonotonicTime() ^
135                                 (reinterpret_cast<uptr>(this) >> 4));
136 
137     initFlags();
138     reportUnrecognizedFlags();
139 
140     // Store some flags locally.
141     Options.MayReturnNull = getFlags()->may_return_null;
142     Options.ZeroContents = getFlags()->zero_contents;
143     Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
144     Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
145     Options.QuarantineMaxChunkSize =
146         static_cast<u32>(getFlags()->quarantine_max_chunk_size);
147 
148     Stats.initLinkerInitialized();
149     Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
150     Secondary.initLinkerInitialized(&Stats);
151 
152     Quarantine.init(
153         static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
154         static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
155 
156 #ifdef GWP_ASAN_HOOKS
157     gwp_asan::options::Options Opt;
158     Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
159     // Bear in mind - Scudo has its own alignment guarantees that are strictly
160     // enforced. Scudo exposes the same allocation function for everything from
161     // malloc() to posix_memalign, so in general this flag goes unused, as Scudo
162     // will always ask GWP-ASan for an aligned amount of bytes.
163     Opt.PerfectlyRightAlign = getFlags()->GWP_ASAN_PerfectlyRightAlign;
164     Opt.MaxSimultaneousAllocations =
165         getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
166     Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
167     Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
168     Opt.Printf = Printf;
169     GuardedAlloc.init(Opt);
170 #endif // GWP_ASAN_HOOKS
171   }
172 
173   void reset() { memset(this, 0, sizeof(*this)); }
174 
175   void unmapTestOnly() {
176     TSDRegistry.unmapTestOnly();
177     Primary.unmapTestOnly();
178   }
179 
180   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
181 
182   // The Cache must be provided zero-initialized.
183   void initCache(CacheT *Cache) {
184     Cache->initLinkerInitialized(&Stats, &Primary);
185   }
186 
187   // Release the resources used by a TSD, which involves:
188   // - draining the local quarantine cache to the global quarantine;
189   // - releasing the cached pointers back to the Primary;
190   // - unlinking the local stats from the global ones (destroying the cache does
191   //   the last two items).
192   void commitBack(TSD<ThisT> *TSD) {
193     Quarantine.drain(&TSD->QuarantineCache,
194                      QuarantineCallback(*this, TSD->Cache));
195     TSD->Cache.destroy(&Stats);
196   }
197 
198   NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
199                           uptr Alignment = MinAlignment,
200                           bool ZeroContents = false) {
201     initThreadMaybe();
202 
203 #ifdef GWP_ASAN_HOOKS
204     if (UNLIKELY(GuardedAlloc.shouldSample())) {
205       if (void *Ptr = GuardedAlloc.allocate(roundUpTo(Size, Alignment)))
206         return Ptr;
207     }
208 #endif // GWP_ASAN_HOOKS
209 
210     ZeroContents |= static_cast<bool>(Options.ZeroContents);
211 
212     if (UNLIKELY(Alignment > MaxAlignment)) {
213       if (Options.MayReturnNull)
214         return nullptr;
215       reportAlignmentTooBig(Alignment, MaxAlignment);
216     }
217     if (Alignment < MinAlignment)
218       Alignment = MinAlignment;
219 
220     // If the requested size happens to be 0 (more common than you might think),
221     // allocate MinAlignment bytes on top of the header. Then add the extra
222     // bytes required to fulfill the alignment requirements: we allocate enough
223     // to be sure that there will be an address in the block that will satisfy
224     // the alignment.
225     const uptr NeededSize =
226         roundUpTo(Size, MinAlignment) +
227         ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
228 
229     // Takes care of extravagantly large sizes as well as integer overflows.
230     static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
231     if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
232       if (Options.MayReturnNull)
233         return nullptr;
234       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
235     }
236     DCHECK_LE(Size, NeededSize);
237 
238     void *Block;
239     uptr ClassId;
240     uptr BlockEnd;
241     if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
242       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
243       DCHECK_NE(ClassId, 0U);
244       bool UnlockRequired;
245       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
246       Block = TSD->Cache.allocate(ClassId);
247       if (UnlockRequired)
248         TSD->unlock();
249     } else {
250       ClassId = 0;
251       Block =
252           Secondary.allocate(NeededSize, Alignment, &BlockEnd, ZeroContents);
253     }
254 
255     if (UNLIKELY(!Block)) {
256       if (Options.MayReturnNull)
257         return nullptr;
258       reportOutOfMemory(NeededSize);
259     }
260 
261     // We only need to zero the contents for Primary backed allocations. This
262     // condition is not necessarily unlikely, but since memset is costly, we
263     // might as well mark it as such.
264     if (UNLIKELY(ZeroContents && ClassId))
265       memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
266 
267     const uptr UnalignedUserPtr =
268         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
269     const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
270 
271     Chunk::UnpackedHeader Header = {};
272     if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
273       const uptr Offset = UserPtr - UnalignedUserPtr;
274       DCHECK_GE(Offset, 2 * sizeof(u32));
275       // The BlockMarker has no security purpose, but is specifically meant for
276       // the chunk iteration function that can be used in debugging situations.
277       // It is the only situation where we have to locate the start of a chunk
278       // based on its block address.
279       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
280       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
281       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
282     }
283     Header.ClassId = ClassId & Chunk::ClassIdMask;
284     Header.State = Chunk::State::Allocated;
285     Header.Origin = Origin & Chunk::OriginMask;
286     Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) &
287                                Chunk::SizeOrUnusedBytesMask;
288     void *Ptr = reinterpret_cast<void *>(UserPtr);
289     Chunk::storeHeader(Cookie, Ptr, &Header);
290 
291     if (&__scudo_allocate_hook)
292       __scudo_allocate_hook(Ptr, Size);
293 
294     return Ptr;
295   }
296 
297   NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
298                            UNUSED uptr Alignment = MinAlignment) {
299     // For a deallocation, we only ensure minimal initialization, meaning thread
300     // local data will be left uninitialized for now (when using ELF TLS). The
301     // fallback cache will be used instead. This is a workaround for a situation
302     // where the only heap operation performed in a thread would be a free past
303     // the TLS destructors, ending up in initialized thread specific data never
304     // being destroyed properly. Any other heap operation will do a full init.
305     initThreadMaybe(/*MinimalInit=*/true);
306 
307 #ifdef GWP_ASAN_HOOKS
308     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
309       GuardedAlloc.deallocate(Ptr);
310       return;
311     }
312 #endif // GWP_ASAN_HOOKS
313 
314     if (&__scudo_deallocate_hook)
315       __scudo_deallocate_hook(Ptr);
316 
317     if (UNLIKELY(!Ptr))
318       return;
319     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
320       reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
321 
322     Chunk::UnpackedHeader Header;
323     Chunk::loadHeader(Cookie, Ptr, &Header);
324 
325     if (UNLIKELY(Header.State != Chunk::State::Allocated))
326       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
327     if (Options.DeallocTypeMismatch) {
328       if (Header.Origin != Origin) {
329         // With the exception of memalign'd chunks, that can be still be free'd.
330         if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||
331                      Origin != Chunk::Origin::Malloc))
332           reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
333                                     Header.Origin, Origin);
334       }
335     }
336 
337     const uptr Size = getSize(Ptr, &Header);
338     if (DeleteSize && Options.DeleteSizeMismatch) {
339       if (UNLIKELY(DeleteSize != Size))
340         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
341     }
342 
343     quarantineOrDeallocateChunk(Ptr, &Header, Size);
344   }
345 
346   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
347     initThreadMaybe();
348 
349     // The following cases are handled by the C wrappers.
350     DCHECK_NE(OldPtr, nullptr);
351     DCHECK_NE(NewSize, 0);
352 
353 #ifdef GWP_ASAN_HOOKS
354     if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
355       uptr OldSize = GuardedAlloc.getSize(OldPtr);
356       void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
357       if (NewPtr)
358         memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
359       GuardedAlloc.deallocate(OldPtr);
360       return NewPtr;
361     }
362 #endif // GWP_ASAN_HOOKS
363 
364     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
365       reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
366 
367     Chunk::UnpackedHeader OldHeader;
368     Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
369 
370     if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
371       reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
372 
373     // Pointer has to be allocated with a malloc-type function. Some
374     // applications think that it is OK to realloc a memalign'ed pointer, which
375     // will trigger this check. It really isn't.
376     if (Options.DeallocTypeMismatch) {
377       if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc))
378         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
379                                   OldHeader.Origin, Chunk::Origin::Malloc);
380     }
381 
382     void *BlockBegin = getBlockBegin(OldPtr, &OldHeader);
383     uptr BlockEnd;
384     uptr OldSize;
385     const uptr ClassId = OldHeader.ClassId;
386     if (LIKELY(ClassId)) {
387       BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
388                  SizeClassMap::getSizeByClassId(ClassId);
389       OldSize = OldHeader.SizeOrUnusedBytes;
390     } else {
391       BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
392       OldSize = BlockEnd -
393                 (reinterpret_cast<uptr>(OldPtr) + OldHeader.SizeOrUnusedBytes);
394     }
395     // If the new chunk still fits in the previously allocated block (with a
396     // reasonable delta), we just keep the old block, and update the chunk
397     // header to reflect the size change.
398     if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) {
399       const uptr Delta =
400           OldSize < NewSize ? NewSize - OldSize : OldSize - NewSize;
401       if (Delta <= SizeClassMap::MaxSize / 2) {
402         Chunk::UnpackedHeader NewHeader = OldHeader;
403         NewHeader.SizeOrUnusedBytes =
404             (ClassId ? NewSize
405                      : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
406             Chunk::SizeOrUnusedBytesMask;
407         Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
408         return OldPtr;
409       }
410     }
411 
412     // Otherwise we allocate a new one, and deallocate the old one. Some
413     // allocators will allocate an even larger chunk (by a fixed factor) to
414     // allow for potential further in-place realloc. The gains of such a trick
415     // are currently unclear.
416     void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
417     if (NewPtr) {
418       const uptr OldSize = getSize(OldPtr, &OldHeader);
419       memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
420       quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
421     }
422     return NewPtr;
423   }
424 
425   // TODO(kostyak): disable() is currently best-effort. There are some small
426   //                windows of time when an allocation could still succeed after
427   //                this function finishes. We will revisit that later.
428   void disable() {
429     initThreadMaybe();
430     TSDRegistry.disable();
431     Stats.disable();
432     Quarantine.disable();
433     Primary.disable();
434     Secondary.disable();
435   }
436 
437   void enable() {
438     initThreadMaybe();
439     Secondary.enable();
440     Primary.enable();
441     Quarantine.enable();
442     Stats.enable();
443     TSDRegistry.enable();
444   }
445 
446   // The function returns the amount of bytes required to store the statistics,
447   // which might be larger than the amount of bytes provided. Note that the
448   // statistics buffer is not necessarily constant between calls to this
449   // function. This can be called with a null buffer or zero size for buffer
450   // sizing purposes.
451   uptr getStats(char *Buffer, uptr Size) {
452     ScopedString Str(1024);
453     disable();
454     const uptr Length = getStats(&Str) + 1;
455     enable();
456     if (Length < Size)
457       Size = Length;
458     if (Buffer && Size) {
459       memcpy(Buffer, Str.data(), Size);
460       Buffer[Size - 1] = '\0';
461     }
462     return Length;
463   }
464 
465   void printStats() {
466     ScopedString Str(1024);
467     disable();
468     getStats(&Str);
469     enable();
470     Str.output();
471   }
472 
473   void releaseToOS() {
474     initThreadMaybe();
475     Primary.releaseToOS();
476   }
477 
478   // Iterate over all chunks and call a callback for all busy chunks located
479   // within the provided memory range. Said callback must not use this allocator
480   // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
481   void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
482                          void *Arg) {
483     initThreadMaybe();
484     const uptr From = Base;
485     const uptr To = Base + Size;
486     auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
487       if (Block < From || Block >= To)
488         return;
489       uptr Chunk;
490       Chunk::UnpackedHeader Header;
491       if (getChunkFromBlock(Block, &Chunk, &Header) &&
492           Header.State == Chunk::State::Allocated)
493         Callback(Chunk, getSize(reinterpret_cast<void *>(Chunk), &Header), Arg);
494     };
495     Primary.iterateOverBlocks(Lambda);
496     Secondary.iterateOverBlocks(Lambda);
497   }
498 
499   bool canReturnNull() {
500     initThreadMaybe();
501     return Options.MayReturnNull;
502   }
503 
504   // TODO(kostyak): implement this as a "backend" to mallopt.
505   bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; }
506 
507   // Return the usable size for a given chunk. Technically we lie, as we just
508   // report the actual size of a chunk. This is done to counteract code actively
509   // writing past the end of a chunk (like sqlite3) when the usable size allows
510   // for it, which then forces realloc to copy the usable size of a chunk as
511   // opposed to its actual size.
512   uptr getUsableSize(const void *Ptr) {
513     initThreadMaybe();
514     if (UNLIKELY(!Ptr))
515       return 0;
516 
517 #ifdef GWP_ASAN_HOOKS
518     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
519       return GuardedAlloc.getSize(Ptr);
520 #endif // GWP_ASAN_HOOKS
521 
522     Chunk::UnpackedHeader Header;
523     Chunk::loadHeader(Cookie, Ptr, &Header);
524     // Getting the usable size of a chunk only makes sense if it's allocated.
525     if (UNLIKELY(Header.State != Chunk::State::Allocated))
526       reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
527     return getSize(Ptr, &Header);
528   }
529 
530   void getStats(StatCounters S) {
531     initThreadMaybe();
532     Stats.get(S);
533   }
534 
535   // Returns true if the pointer provided was allocated by the current
536   // allocator instance, which is compliant with tcmalloc's ownership concept.
537   // A corrupted chunk will not be reported as owned, which is WAI.
538   bool isOwned(const void *Ptr) {
539     initThreadMaybe();
540 #ifdef GWP_ASAN_HOOKS
541     if (GuardedAlloc.pointerIsMine(Ptr))
542       return true;
543 #endif // GWP_ASAN_HOOKS
544     if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
545       return false;
546     Chunk::UnpackedHeader Header;
547     return Chunk::isValid(Cookie, Ptr, &Header) &&
548            Header.State == Chunk::State::Allocated;
549   }
550 
551 private:
552   using SecondaryT = typename Params::Secondary;
553   typedef typename PrimaryT::SizeClassMap SizeClassMap;
554 
555   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
556   static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
557   static const uptr MinAlignment = 1UL << MinAlignmentLog;
558   static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
559   static const uptr MaxAllowedMallocSize =
560       FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
561 
562   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
563                 "Minimal alignment must at least cover a chunk header.");
564 
565   static const u32 BlockMarker = 0x44554353U;
566 
567   GlobalStats Stats;
568   TSDRegistryT TSDRegistry;
569   PrimaryT Primary;
570   SecondaryT Secondary;
571   QuarantineT Quarantine;
572 
573   u32 Cookie;
574 
575   struct {
576     u8 MayReturnNull : 1;       // may_return_null
577     u8 ZeroContents : 1;        // zero_contents
578     u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
579     u8 DeleteSizeMismatch : 1;  // delete_size_mismatch
580     u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
581   } Options;
582 
583   // The following might get optimized out by the compiler.
584   NOINLINE void performSanityChecks() {
585     // Verify that the header offset field can hold the maximum offset. In the
586     // case of the Secondary allocator, it takes care of alignment and the
587     // offset will always be small. In the case of the Primary, the worst case
588     // scenario happens in the last size class, when the backend allocation
589     // would already be aligned on the requested alignment, which would happen
590     // to be the maximum alignment that would fit in that size class. As a
591     // result, the maximum offset will be at most the maximum alignment for the
592     // last size class minus the header size, in multiples of MinAlignment.
593     Chunk::UnpackedHeader Header = {};
594     const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
595                                          SizeClassMap::MaxSize - MinAlignment);
596     const uptr MaxOffset =
597         (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
598     Header.Offset = MaxOffset & Chunk::OffsetMask;
599     if (UNLIKELY(Header.Offset != MaxOffset))
600       reportSanityCheckError("offset");
601 
602     // Verify that we can fit the maximum size or amount of unused bytes in the
603     // header. Given that the Secondary fits the allocation to a page, the worst
604     // case scenario happens in the Primary. It will depend on the second to
605     // last and last class sizes, as well as the dynamic base for the Primary.
606     // The following is an over-approximation that works for our needs.
607     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
608     Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
609     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
610       reportSanityCheckError("size (or unused bytes)");
611 
612     const uptr LargestClassId = SizeClassMap::LargestClassId;
613     Header.ClassId = LargestClassId;
614     if (UNLIKELY(Header.ClassId != LargestClassId))
615       reportSanityCheckError("class ID");
616   }
617 
618   static inline void *getBlockBegin(const void *Ptr,
619                                     Chunk::UnpackedHeader *Header) {
620     return reinterpret_cast<void *>(
621         reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
622         (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
623   }
624 
625   // Return the size of a chunk as requested during its allocation.
626   inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
627     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
628     if (LIKELY(Header->ClassId))
629       return SizeOrUnusedBytes;
630     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
631            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
632   }
633 
634   ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
635     TSDRegistry.initThreadMaybe(this, MinimalInit);
636   }
637 
638   void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
639                                    uptr Size) {
640     Chunk::UnpackedHeader NewHeader = *Header;
641     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
642     // than the maximum allowed, we return a chunk directly to the backend.
643     // Logical Or can be short-circuited, which introduces unnecessary
644     // conditional jumps, so use bitwise Or and let the compiler be clever.
645     const bool BypassQuarantine = !Quarantine.getCacheSize() | !Size |
646                                   (Size > Options.QuarantineMaxChunkSize);
647     if (BypassQuarantine) {
648       NewHeader.State = Chunk::State::Available;
649       Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
650       void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
651       const uptr ClassId = NewHeader.ClassId;
652       if (LIKELY(ClassId)) {
653         bool UnlockRequired;
654         auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
655         TSD->Cache.deallocate(ClassId, BlockBegin);
656         if (UnlockRequired)
657           TSD->unlock();
658       } else {
659         Secondary.deallocate(BlockBegin);
660       }
661     } else {
662       NewHeader.State = Chunk::State::Quarantined;
663       Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
664       bool UnlockRequired;
665       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
666       Quarantine.put(&TSD->QuarantineCache,
667                      QuarantineCallback(*this, TSD->Cache), Ptr, Size);
668       if (UnlockRequired)
669         TSD->unlock();
670     }
671   }
672 
673   bool getChunkFromBlock(uptr Block, uptr *Chunk,
674                          Chunk::UnpackedHeader *Header) {
675     u32 Offset = 0;
676     if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
677       Offset = reinterpret_cast<u32 *>(Block)[1];
678     *Chunk = Block + Offset + Chunk::getHeaderSize();
679     return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
680   }
681 
682   uptr getStats(ScopedString *Str) {
683     Primary.getStats(Str);
684     Secondary.getStats(Str);
685     Quarantine.getStats(Str);
686     return Str->length();
687   }
688 };
689 
690 } // namespace scudo
691 
692 #endif // SCUDO_COMBINED_H_
693