1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11
12 #include "chunk.h"
13 #include "common.h"
14 #include "flags.h"
15 #include "flags_parser.h"
16 #include "local_cache.h"
17 #include "memtag.h"
18 #include "options.h"
19 #include "quarantine.h"
20 #include "report.h"
21 #include "rss_limit_checker.h"
22 #include "secondary.h"
23 #include "stack_depot.h"
24 #include "string_utils.h"
25 #include "tsd.h"
26
27 #include "scudo/interface.h"
28
29 #ifdef GWP_ASAN_HOOKS
30 #include "gwp_asan/guarded_pool_allocator.h"
31 #include "gwp_asan/optional/backtrace.h"
32 #include "gwp_asan/optional/segv_handler.h"
33 #endif // GWP_ASAN_HOOKS
34
EmptyCallback()35 extern "C" inline void EmptyCallback() {}
36
37 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
38 // This function is not part of the NDK so it does not appear in any public
39 // header files. We only declare/use it when targeting the platform.
40 extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
41 size_t num_entries);
42 #endif
43
44 namespace scudo {
45
46 template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
47 class Allocator {
48 public:
49 using PrimaryT = typename Params::Primary;
50 using CacheT = typename PrimaryT::CacheT;
51 typedef Allocator<Params, PostInitCallback> ThisT;
52 typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
53
callPostInitCallback()54 void callPostInitCallback() {
55 pthread_once(&PostInitNonce, PostInitCallback);
56 }
57
58 struct QuarantineCallback {
QuarantineCallbackQuarantineCallback59 explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
60 : Allocator(Instance), Cache(LocalCache) {}
61
62 // Chunk recycling function, returns a quarantined chunk to the backend,
63 // first making sure it hasn't been tampered with.
recycleQuarantineCallback64 void recycle(void *Ptr) {
65 Chunk::UnpackedHeader Header;
66 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
67 if (UNLIKELY(Header.State != Chunk::State::Quarantined))
68 reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
69
70 Chunk::UnpackedHeader NewHeader = Header;
71 NewHeader.State = Chunk::State::Available;
72 Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
73
74 if (allocatorSupportsMemoryTagging<Params>())
75 Ptr = untagPointer(Ptr);
76 void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
77 Cache.deallocate(NewHeader.ClassId, BlockBegin);
78 }
79
80 // We take a shortcut when allocating a quarantine batch by working with the
81 // appropriate class ID instead of using Size. The compiler should optimize
82 // the class ID computation and work with the associated cache directly.
allocateQuarantineCallback83 void *allocate(UNUSED uptr Size) {
84 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
85 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
86 void *Ptr = Cache.allocate(QuarantineClassId);
87 // Quarantine batch allocation failure is fatal.
88 if (UNLIKELY(!Ptr))
89 reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
90
91 Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
92 Chunk::getHeaderSize());
93 Chunk::UnpackedHeader Header = {};
94 Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
95 Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
96 Header.State = Chunk::State::Allocated;
97 Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
98
99 // Reset tag to 0 as this chunk may have been previously used for a tagged
100 // user allocation.
101 if (UNLIKELY(useMemoryTagging<Params>(Allocator.Primary.Options.load())))
102 storeTags(reinterpret_cast<uptr>(Ptr),
103 reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
104
105 return Ptr;
106 }
107
deallocateQuarantineCallback108 void deallocate(void *Ptr) {
109 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
110 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
111 Chunk::UnpackedHeader Header;
112 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
113
114 if (UNLIKELY(Header.State != Chunk::State::Allocated))
115 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
116 DCHECK_EQ(Header.ClassId, QuarantineClassId);
117 DCHECK_EQ(Header.Offset, 0);
118 DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
119
120 Chunk::UnpackedHeader NewHeader = Header;
121 NewHeader.State = Chunk::State::Available;
122 Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
123 Cache.deallocate(QuarantineClassId,
124 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
125 Chunk::getHeaderSize()));
126 }
127
128 private:
129 ThisT &Allocator;
130 CacheT &Cache;
131 };
132
133 typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
134 typedef typename QuarantineT::CacheT QuarantineCacheT;
135
init()136 void init() {
137 performSanityChecks();
138
139 // Check if hardware CRC32 is supported in the binary and by the platform,
140 // if so, opt for the CRC32 hardware version of the checksum.
141 if (&computeHardwareCRC32 && hasHardwareCRC32())
142 HashAlgorithm = Checksum::HardwareCRC32;
143
144 if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
145 Cookie = static_cast<u32>(getMonotonicTime() ^
146 (reinterpret_cast<uptr>(this) >> 4));
147
148 initFlags();
149 reportUnrecognizedFlags();
150
151 RssChecker.init(scudo::getFlags()->soft_rss_limit_mb,
152 scudo::getFlags()->hard_rss_limit_mb);
153
154 // Store some flags locally.
155 if (getFlags()->may_return_null)
156 Primary.Options.set(OptionBit::MayReturnNull);
157 if (getFlags()->zero_contents)
158 Primary.Options.setFillContentsMode(ZeroFill);
159 else if (getFlags()->pattern_fill_contents)
160 Primary.Options.setFillContentsMode(PatternOrZeroFill);
161 if (getFlags()->dealloc_type_mismatch)
162 Primary.Options.set(OptionBit::DeallocTypeMismatch);
163 if (getFlags()->delete_size_mismatch)
164 Primary.Options.set(OptionBit::DeleteSizeMismatch);
165 if (allocatorSupportsMemoryTagging<Params>() &&
166 systemSupportsMemoryTagging())
167 Primary.Options.set(OptionBit::UseMemoryTagging);
168 Primary.Options.set(OptionBit::UseOddEvenTags);
169
170 QuarantineMaxChunkSize =
171 static_cast<u32>(getFlags()->quarantine_max_chunk_size);
172
173 Stats.init();
174 const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
175 Primary.init(ReleaseToOsIntervalMs);
176 Secondary.init(&Stats, ReleaseToOsIntervalMs);
177 Quarantine.init(
178 static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
179 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
180
181 initRingBuffer();
182 }
183
184 // Initialize the embedded GWP-ASan instance. Requires the main allocator to
185 // be functional, best called from PostInitCallback.
initGwpAsan()186 void initGwpAsan() {
187 #ifdef GWP_ASAN_HOOKS
188 gwp_asan::options::Options Opt;
189 Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
190 Opt.MaxSimultaneousAllocations =
191 getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
192 Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
193 Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
194 Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
195 // Embedded GWP-ASan is locked through the Scudo atfork handler (via
196 // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
197 // handler.
198 Opt.InstallForkHandlers = false;
199 Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
200 GuardedAlloc.init(Opt);
201
202 if (Opt.InstallSignalHandlers)
203 gwp_asan::segv_handler::installSignalHandlers(
204 &GuardedAlloc, Printf,
205 gwp_asan::backtrace::getPrintBacktraceFunction(),
206 gwp_asan::backtrace::getSegvBacktraceFunction(),
207 Opt.Recoverable);
208
209 GuardedAllocSlotSize =
210 GuardedAlloc.getAllocatorState()->maximumAllocationSize();
211 Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
212 GuardedAllocSlotSize);
213 #endif // GWP_ASAN_HOOKS
214 }
215
216 #ifdef GWP_ASAN_HOOKS
getGwpAsanAllocationMetadata()217 const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
218 return GuardedAlloc.getMetadataRegion();
219 }
220
getGwpAsanAllocatorState()221 const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
222 return GuardedAlloc.getAllocatorState();
223 }
224 #endif // GWP_ASAN_HOOKS
225
226 ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
227 TSDRegistry.initThreadMaybe(this, MinimalInit);
228 }
229
unmapTestOnly()230 void unmapTestOnly() {
231 TSDRegistry.unmapTestOnly(this);
232 Primary.unmapTestOnly();
233 Secondary.unmapTestOnly();
234 #ifdef GWP_ASAN_HOOKS
235 if (getFlags()->GWP_ASAN_InstallSignalHandlers)
236 gwp_asan::segv_handler::uninstallSignalHandlers();
237 GuardedAlloc.uninitTestOnly();
238 #endif // GWP_ASAN_HOOKS
239 }
240
getTSDRegistry()241 TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
242
243 // The Cache must be provided zero-initialized.
initCache(CacheT * Cache)244 void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
245
246 // Release the resources used by a TSD, which involves:
247 // - draining the local quarantine cache to the global quarantine;
248 // - releasing the cached pointers back to the Primary;
249 // - unlinking the local stats from the global ones (destroying the cache does
250 // the last two items).
commitBack(TSD<ThisT> * TSD)251 void commitBack(TSD<ThisT> *TSD) {
252 Quarantine.drain(&TSD->QuarantineCache,
253 QuarantineCallback(*this, TSD->Cache));
254 TSD->Cache.destroy(&Stats);
255 }
256
getHeaderTaggedPointer(void * Ptr)257 ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
258 if (!allocatorSupportsMemoryTagging<Params>())
259 return Ptr;
260 auto UntaggedPtr = untagPointer(Ptr);
261 if (UntaggedPtr != Ptr)
262 return UntaggedPtr;
263 // Secondary, or pointer allocated while memory tagging is unsupported or
264 // disabled. The tag mismatch is okay in the latter case because tags will
265 // not be checked.
266 return addHeaderTag(Ptr);
267 }
268
addHeaderTag(uptr Ptr)269 ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
270 if (!allocatorSupportsMemoryTagging<Params>())
271 return Ptr;
272 return addFixedTag(Ptr, 2);
273 }
274
addHeaderTag(void * Ptr)275 ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
276 return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
277 }
278
collectStackTrace()279 NOINLINE u32 collectStackTrace() {
280 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
281 // Discard collectStackTrace() frame and allocator function frame.
282 constexpr uptr DiscardFrames = 2;
283 uptr Stack[MaxTraceSize + DiscardFrames];
284 uptr Size =
285 android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
286 Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
287 return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
288 #else
289 return 0;
290 #endif
291 }
292
computeOddEvenMaskForPointerMaybe(Options Options,uptr Ptr,uptr ClassId)293 uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr,
294 uptr ClassId) {
295 if (!Options.get(OptionBit::UseOddEvenTags))
296 return 0;
297
298 // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
299 // even, and vice versa. Blocks are laid out Size bytes apart, and adding
300 // Size to Ptr will flip the least significant set bit of Size in Ptr, so
301 // that bit will have the pattern 010101... for consecutive blocks, which we
302 // can use to determine which tag mask to use.
303 return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
304 }
305
306 NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
307 uptr Alignment = MinAlignment,
308 bool ZeroContents = false) {
309 initThreadMaybe();
310
311 const Options Options = Primary.Options.load();
312 if (UNLIKELY(Alignment > MaxAlignment)) {
313 if (Options.get(OptionBit::MayReturnNull))
314 return nullptr;
315 reportAlignmentTooBig(Alignment, MaxAlignment);
316 }
317 if (Alignment < MinAlignment)
318 Alignment = MinAlignment;
319
320 #ifdef GWP_ASAN_HOOKS
321 if (UNLIKELY(GuardedAlloc.shouldSample())) {
322 if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
323 if (UNLIKELY(&__scudo_allocate_hook))
324 __scudo_allocate_hook(Ptr, Size);
325 Stats.lock();
326 Stats.add(StatAllocated, GuardedAllocSlotSize);
327 Stats.sub(StatFree, GuardedAllocSlotSize);
328 Stats.unlock();
329 return Ptr;
330 }
331 }
332 #endif // GWP_ASAN_HOOKS
333
334 const FillContentsMode FillContents = ZeroContents ? ZeroFill
335 : TSDRegistry.getDisableMemInit()
336 ? NoFill
337 : Options.getFillContentsMode();
338
339 // If the requested size happens to be 0 (more common than you might think),
340 // allocate MinAlignment bytes on top of the header. Then add the extra
341 // bytes required to fulfill the alignment requirements: we allocate enough
342 // to be sure that there will be an address in the block that will satisfy
343 // the alignment.
344 const uptr NeededSize =
345 roundUpTo(Size, MinAlignment) +
346 ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
347
348 // Takes care of extravagantly large sizes as well as integer overflows.
349 static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
350 if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
351 if (Options.get(OptionBit::MayReturnNull))
352 return nullptr;
353 reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
354 }
355 DCHECK_LE(Size, NeededSize);
356
357 switch (RssChecker.getRssLimitExceeded()) {
358 case RssLimitChecker::Neither:
359 break;
360 case RssLimitChecker::Soft:
361 if (Options.get(OptionBit::MayReturnNull))
362 return nullptr;
363 reportSoftRSSLimit(RssChecker.getSoftRssLimit());
364 break;
365 case RssLimitChecker::Hard:
366 reportHardRSSLimit(RssChecker.getHardRssLimit());
367 break;
368 }
369
370 void *Block = nullptr;
371 uptr ClassId = 0;
372 uptr SecondaryBlockEnd = 0;
373 if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
374 ClassId = SizeClassMap::getClassIdBySize(NeededSize);
375 DCHECK_NE(ClassId, 0U);
376 bool UnlockRequired;
377 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
378 Block = TSD->Cache.allocate(ClassId);
379 // If the allocation failed, the most likely reason with a 32-bit primary
380 // is the region being full. In that event, retry in each successively
381 // larger class until it fits. If it fails to fit in the largest class,
382 // fallback to the Secondary.
383 if (UNLIKELY(!Block)) {
384 while (ClassId < SizeClassMap::LargestClassId && !Block)
385 Block = TSD->Cache.allocate(++ClassId);
386 if (!Block)
387 ClassId = 0;
388 }
389 if (UnlockRequired)
390 TSD->unlock();
391 }
392 if (UNLIKELY(ClassId == 0))
393 Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
394 FillContents);
395
396 if (UNLIKELY(!Block)) {
397 if (Options.get(OptionBit::MayReturnNull))
398 return nullptr;
399 reportOutOfMemory(NeededSize);
400 }
401
402 const uptr BlockUptr = reinterpret_cast<uptr>(Block);
403 const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
404 const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
405
406 void *Ptr = reinterpret_cast<void *>(UserPtr);
407 void *TaggedPtr = Ptr;
408 if (LIKELY(ClassId)) {
409 // We only need to zero or tag the contents for Primary backed
410 // allocations. We only set tags for primary allocations in order to avoid
411 // faulting potentially large numbers of pages for large secondary
412 // allocations. We assume that guard pages are enough to protect these
413 // allocations.
414 //
415 // FIXME: When the kernel provides a way to set the background tag of a
416 // mapping, we should be able to tag secondary allocations as well.
417 //
418 // When memory tagging is enabled, zeroing the contents is done as part of
419 // setting the tag.
420 if (UNLIKELY(useMemoryTagging<Params>(Options))) {
421 uptr PrevUserPtr;
422 Chunk::UnpackedHeader Header;
423 const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
424 const uptr BlockEnd = BlockUptr + BlockSize;
425 // If possible, try to reuse the UAF tag that was set by deallocate().
426 // For simplicity, only reuse tags if we have the same start address as
427 // the previous allocation. This handles the majority of cases since
428 // most allocations will not be more aligned than the minimum alignment.
429 //
430 // We need to handle situations involving reclaimed chunks, and retag
431 // the reclaimed portions if necessary. In the case where the chunk is
432 // fully reclaimed, the chunk's header will be zero, which will trigger
433 // the code path for new mappings and invalid chunks that prepares the
434 // chunk from scratch. There are three possibilities for partial
435 // reclaiming:
436 //
437 // (1) Header was reclaimed, data was partially reclaimed.
438 // (2) Header was not reclaimed, all data was reclaimed (e.g. because
439 // data started on a page boundary).
440 // (3) Header was not reclaimed, data was partially reclaimed.
441 //
442 // Case (1) will be handled in the same way as for full reclaiming,
443 // since the header will be zero.
444 //
445 // We can detect case (2) by loading the tag from the start
446 // of the chunk. If it is zero, it means that either all data was
447 // reclaimed (since we never use zero as the chunk tag), or that the
448 // previous allocation was of size zero. Either way, we need to prepare
449 // a new chunk from scratch.
450 //
451 // We can detect case (3) by moving to the next page (if covered by the
452 // chunk) and loading the tag of its first granule. If it is zero, it
453 // means that all following pages may need to be retagged. On the other
454 // hand, if it is nonzero, we can assume that all following pages are
455 // still tagged, according to the logic that if any of the pages
456 // following the next page were reclaimed, the next page would have been
457 // reclaimed as well.
458 uptr TaggedUserPtr;
459 if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
460 PrevUserPtr == UserPtr &&
461 (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
462 uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
463 const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached());
464 if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
465 PrevEnd = NextPage;
466 TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
467 resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
468 if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
469 // If an allocation needs to be zeroed (i.e. calloc) we can normally
470 // avoid zeroing the memory now since we can rely on memory having
471 // been zeroed on free, as this is normally done while setting the
472 // UAF tag. But if tagging was disabled per-thread when the memory
473 // was freed, it would not have been retagged and thus zeroed, and
474 // therefore it needs to be zeroed now.
475 memset(TaggedPtr, 0,
476 Min(Size, roundUpTo(PrevEnd - TaggedUserPtr,
477 archMemoryTagGranuleSize())));
478 } else if (Size) {
479 // Clear any stack metadata that may have previously been stored in
480 // the chunk data.
481 memset(TaggedPtr, 0, archMemoryTagGranuleSize());
482 }
483 } else {
484 const uptr OddEvenMask =
485 computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
486 TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
487 }
488 storePrimaryAllocationStackMaybe(Options, Ptr);
489 } else {
490 Block = addHeaderTag(Block);
491 Ptr = addHeaderTag(Ptr);
492 if (UNLIKELY(FillContents != NoFill)) {
493 // This condition is not necessarily unlikely, but since memset is
494 // costly, we might as well mark it as such.
495 memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
496 PrimaryT::getSizeByClassId(ClassId));
497 }
498 }
499 } else {
500 Block = addHeaderTag(Block);
501 Ptr = addHeaderTag(Ptr);
502 if (UNLIKELY(useMemoryTagging<Params>(Options))) {
503 storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
504 storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
505 }
506 }
507
508 Chunk::UnpackedHeader Header = {};
509 if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
510 const uptr Offset = UserPtr - UnalignedUserPtr;
511 DCHECK_GE(Offset, 2 * sizeof(u32));
512 // The BlockMarker has no security purpose, but is specifically meant for
513 // the chunk iteration function that can be used in debugging situations.
514 // It is the only situation where we have to locate the start of a chunk
515 // based on its block address.
516 reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
517 reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
518 Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
519 }
520 Header.ClassId = ClassId & Chunk::ClassIdMask;
521 Header.State = Chunk::State::Allocated;
522 Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
523 Header.SizeOrUnusedBytes =
524 (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
525 Chunk::SizeOrUnusedBytesMask;
526 Chunk::storeHeader(Cookie, Ptr, &Header);
527
528 if (UNLIKELY(&__scudo_allocate_hook))
529 __scudo_allocate_hook(TaggedPtr, Size);
530
531 return TaggedPtr;
532 }
533
534 NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
535 UNUSED uptr Alignment = MinAlignment) {
536 // For a deallocation, we only ensure minimal initialization, meaning thread
537 // local data will be left uninitialized for now (when using ELF TLS). The
538 // fallback cache will be used instead. This is a workaround for a situation
539 // where the only heap operation performed in a thread would be a free past
540 // the TLS destructors, ending up in initialized thread specific data never
541 // being destroyed properly. Any other heap operation will do a full init.
542 initThreadMaybe(/*MinimalInit=*/true);
543
544 if (UNLIKELY(&__scudo_deallocate_hook))
545 __scudo_deallocate_hook(Ptr);
546
547 if (UNLIKELY(!Ptr))
548 return;
549
550 #ifdef GWP_ASAN_HOOKS
551 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
552 GuardedAlloc.deallocate(Ptr);
553 Stats.lock();
554 Stats.add(StatFree, GuardedAllocSlotSize);
555 Stats.sub(StatAllocated, GuardedAllocSlotSize);
556 Stats.unlock();
557 return;
558 }
559 #endif // GWP_ASAN_HOOKS
560
561 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
562 reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
563
564 void *TaggedPtr = Ptr;
565 Ptr = getHeaderTaggedPointer(Ptr);
566
567 Chunk::UnpackedHeader Header;
568 Chunk::loadHeader(Cookie, Ptr, &Header);
569
570 if (UNLIKELY(Header.State != Chunk::State::Allocated))
571 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
572
573 const Options Options = Primary.Options.load();
574 if (Options.get(OptionBit::DeallocTypeMismatch)) {
575 if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
576 // With the exception of memalign'd chunks, that can be still be free'd.
577 if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
578 Origin != Chunk::Origin::Malloc)
579 reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
580 Header.OriginOrWasZeroed, Origin);
581 }
582 }
583
584 const uptr Size = getSize(Ptr, &Header);
585 if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
586 if (UNLIKELY(DeleteSize != Size))
587 reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
588 }
589
590 quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
591 }
592
593 void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
594 initThreadMaybe();
595
596 const Options Options = Primary.Options.load();
597 if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
598 if (Options.get(OptionBit::MayReturnNull))
599 return nullptr;
600 reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
601 }
602
603 // The following cases are handled by the C wrappers.
604 DCHECK_NE(OldPtr, nullptr);
605 DCHECK_NE(NewSize, 0);
606
607 #ifdef GWP_ASAN_HOOKS
608 if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
609 uptr OldSize = GuardedAlloc.getSize(OldPtr);
610 void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
611 if (NewPtr)
612 memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
613 GuardedAlloc.deallocate(OldPtr);
614 Stats.lock();
615 Stats.add(StatFree, GuardedAllocSlotSize);
616 Stats.sub(StatAllocated, GuardedAllocSlotSize);
617 Stats.unlock();
618 return NewPtr;
619 }
620 #endif // GWP_ASAN_HOOKS
621
622 void *OldTaggedPtr = OldPtr;
623 OldPtr = getHeaderTaggedPointer(OldPtr);
624
625 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
626 reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
627
628 Chunk::UnpackedHeader OldHeader;
629 Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
630
631 if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
632 reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
633
634 // Pointer has to be allocated with a malloc-type function. Some
635 // applications think that it is OK to realloc a memalign'ed pointer, which
636 // will trigger this check. It really isn't.
637 if (Options.get(OptionBit::DeallocTypeMismatch)) {
638 if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
639 reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
640 OldHeader.OriginOrWasZeroed,
641 Chunk::Origin::Malloc);
642 }
643
644 void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
645 uptr BlockEnd;
646 uptr OldSize;
647 const uptr ClassId = OldHeader.ClassId;
648 if (LIKELY(ClassId)) {
649 BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
650 SizeClassMap::getSizeByClassId(ClassId);
651 OldSize = OldHeader.SizeOrUnusedBytes;
652 } else {
653 BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
654 OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
655 OldHeader.SizeOrUnusedBytes);
656 }
657 // If the new chunk still fits in the previously allocated block (with a
658 // reasonable delta), we just keep the old block, and update the chunk
659 // header to reflect the size change.
660 if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
661 if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
662 Chunk::UnpackedHeader NewHeader = OldHeader;
663 NewHeader.SizeOrUnusedBytes =
664 (ClassId ? NewSize
665 : BlockEnd -
666 (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
667 Chunk::SizeOrUnusedBytesMask;
668 Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
669 if (UNLIKELY(useMemoryTagging<Params>(Options))) {
670 if (ClassId) {
671 resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
672 reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
673 NewSize, untagPointer(BlockEnd));
674 storePrimaryAllocationStackMaybe(Options, OldPtr);
675 } else {
676 storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
677 }
678 }
679 return OldTaggedPtr;
680 }
681 }
682
683 // Otherwise we allocate a new one, and deallocate the old one. Some
684 // allocators will allocate an even larger chunk (by a fixed factor) to
685 // allow for potential further in-place realloc. The gains of such a trick
686 // are currently unclear.
687 void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
688 if (LIKELY(NewPtr)) {
689 memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
690 quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
691 }
692 return NewPtr;
693 }
694
695 // TODO(kostyak): disable() is currently best-effort. There are some small
696 // windows of time when an allocation could still succeed after
697 // this function finishes. We will revisit that later.
disable()698 void disable() {
699 initThreadMaybe();
700 #ifdef GWP_ASAN_HOOKS
701 GuardedAlloc.disable();
702 #endif
703 TSDRegistry.disable();
704 Stats.disable();
705 Quarantine.disable();
706 Primary.disable();
707 Secondary.disable();
708 }
709
enable()710 void enable() {
711 initThreadMaybe();
712 Secondary.enable();
713 Primary.enable();
714 Quarantine.enable();
715 Stats.enable();
716 TSDRegistry.enable();
717 #ifdef GWP_ASAN_HOOKS
718 GuardedAlloc.enable();
719 #endif
720 }
721
722 // The function returns the amount of bytes required to store the statistics,
723 // which might be larger than the amount of bytes provided. Note that the
724 // statistics buffer is not necessarily constant between calls to this
725 // function. This can be called with a null buffer or zero size for buffer
726 // sizing purposes.
getStats(char * Buffer,uptr Size)727 uptr getStats(char *Buffer, uptr Size) {
728 ScopedString Str;
729 disable();
730 const uptr Length = getStats(&Str) + 1;
731 enable();
732 if (Length < Size)
733 Size = Length;
734 if (Buffer && Size) {
735 memcpy(Buffer, Str.data(), Size);
736 Buffer[Size - 1] = '\0';
737 }
738 return Length;
739 }
740
printStats()741 void printStats() {
742 ScopedString Str;
743 disable();
744 getStats(&Str);
745 enable();
746 Str.output();
747 }
748
releaseToOS()749 void releaseToOS() {
750 initThreadMaybe();
751 Primary.releaseToOS();
752 Secondary.releaseToOS();
753 }
754
755 // Iterate over all chunks and call a callback for all busy chunks located
756 // within the provided memory range. Said callback must not use this allocator
757 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
iterateOverChunks(uptr Base,uptr Size,iterate_callback Callback,void * Arg)758 void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
759 void *Arg) {
760 initThreadMaybe();
761 if (archSupportsMemoryTagging())
762 Base = untagPointer(Base);
763 const uptr From = Base;
764 const uptr To = Base + Size;
765 bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Params>() &&
766 systemSupportsMemoryTagging();
767 auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
768 Arg](uptr Block) {
769 if (Block < From || Block >= To)
770 return;
771 uptr Chunk;
772 Chunk::UnpackedHeader Header;
773 if (MayHaveTaggedPrimary) {
774 // A chunk header can either have a zero tag (tagged primary) or the
775 // header tag (secondary, or untagged primary). We don't know which so
776 // try both.
777 ScopedDisableMemoryTagChecks x;
778 if (!getChunkFromBlock(Block, &Chunk, &Header) &&
779 !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
780 return;
781 } else {
782 if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
783 return;
784 }
785 if (Header.State == Chunk::State::Allocated) {
786 uptr TaggedChunk = Chunk;
787 if (allocatorSupportsMemoryTagging<Params>())
788 TaggedChunk = untagPointer(TaggedChunk);
789 if (useMemoryTagging<Params>(Primary.Options.load()))
790 TaggedChunk = loadTag(Chunk);
791 Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
792 Arg);
793 }
794 };
795 Primary.iterateOverBlocks(Lambda);
796 Secondary.iterateOverBlocks(Lambda);
797 #ifdef GWP_ASAN_HOOKS
798 GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
799 #endif
800 }
801
canReturnNull()802 bool canReturnNull() {
803 initThreadMaybe();
804 return Primary.Options.load().get(OptionBit::MayReturnNull);
805 }
806
setOption(Option O,sptr Value)807 bool setOption(Option O, sptr Value) {
808 initThreadMaybe();
809 if (O == Option::MemtagTuning) {
810 // Enabling odd/even tags involves a tradeoff between use-after-free
811 // detection and buffer overflow detection. Odd/even tags make it more
812 // likely for buffer overflows to be detected by increasing the size of
813 // the guaranteed "red zone" around the allocation, but on the other hand
814 // use-after-free is less likely to be detected because the tag space for
815 // any particular chunk is cut in half. Therefore we use this tuning
816 // setting to control whether odd/even tags are enabled.
817 if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
818 Primary.Options.set(OptionBit::UseOddEvenTags);
819 else if (Value == M_MEMTAG_TUNING_UAF)
820 Primary.Options.clear(OptionBit::UseOddEvenTags);
821 return true;
822 } else {
823 // We leave it to the various sub-components to decide whether or not they
824 // want to handle the option, but we do not want to short-circuit
825 // execution if one of the setOption was to return false.
826 const bool PrimaryResult = Primary.setOption(O, Value);
827 const bool SecondaryResult = Secondary.setOption(O, Value);
828 const bool RegistryResult = TSDRegistry.setOption(O, Value);
829 return PrimaryResult && SecondaryResult && RegistryResult;
830 }
831 return false;
832 }
833
834 // Return the usable size for a given chunk. Technically we lie, as we just
835 // report the actual size of a chunk. This is done to counteract code actively
836 // writing past the end of a chunk (like sqlite3) when the usable size allows
837 // for it, which then forces realloc to copy the usable size of a chunk as
838 // opposed to its actual size.
getUsableSize(const void * Ptr)839 uptr getUsableSize(const void *Ptr) {
840 initThreadMaybe();
841 if (UNLIKELY(!Ptr))
842 return 0;
843
844 #ifdef GWP_ASAN_HOOKS
845 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
846 return GuardedAlloc.getSize(Ptr);
847 #endif // GWP_ASAN_HOOKS
848
849 Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
850 Chunk::UnpackedHeader Header;
851 Chunk::loadHeader(Cookie, Ptr, &Header);
852 // Getting the usable size of a chunk only makes sense if it's allocated.
853 if (UNLIKELY(Header.State != Chunk::State::Allocated))
854 reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
855 return getSize(Ptr, &Header);
856 }
857
getStats(StatCounters S)858 void getStats(StatCounters S) {
859 initThreadMaybe();
860 Stats.get(S);
861 }
862
863 // Returns true if the pointer provided was allocated by the current
864 // allocator instance, which is compliant with tcmalloc's ownership concept.
865 // A corrupted chunk will not be reported as owned, which is WAI.
isOwned(const void * Ptr)866 bool isOwned(const void *Ptr) {
867 initThreadMaybe();
868 #ifdef GWP_ASAN_HOOKS
869 if (GuardedAlloc.pointerIsMine(Ptr))
870 return true;
871 #endif // GWP_ASAN_HOOKS
872 if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
873 return false;
874 Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
875 Chunk::UnpackedHeader Header;
876 return Chunk::isValid(Cookie, Ptr, &Header) &&
877 Header.State == Chunk::State::Allocated;
878 }
879
setRssLimitsTestOnly(int SoftRssLimitMb,int HardRssLimitMb,bool MayReturnNull)880 void setRssLimitsTestOnly(int SoftRssLimitMb, int HardRssLimitMb,
881 bool MayReturnNull) {
882 RssChecker.init(SoftRssLimitMb, HardRssLimitMb);
883 if (MayReturnNull)
884 Primary.Options.set(OptionBit::MayReturnNull);
885 }
886
useMemoryTaggingTestOnly()887 bool useMemoryTaggingTestOnly() const {
888 return useMemoryTagging<Params>(Primary.Options.load());
889 }
disableMemoryTagging()890 void disableMemoryTagging() {
891 // If we haven't been initialized yet, we need to initialize now in order to
892 // prevent a future call to initThreadMaybe() from enabling memory tagging
893 // based on feature detection. But don't call initThreadMaybe() because it
894 // may end up calling the allocator (via pthread_atfork, via the post-init
895 // callback), which may cause mappings to be created with memory tagging
896 // enabled.
897 TSDRegistry.initOnceMaybe(this);
898 if (allocatorSupportsMemoryTagging<Params>()) {
899 Secondary.disableMemoryTagging();
900 Primary.Options.clear(OptionBit::UseMemoryTagging);
901 }
902 }
903
setTrackAllocationStacks(bool Track)904 void setTrackAllocationStacks(bool Track) {
905 initThreadMaybe();
906 if (getFlags()->allocation_ring_buffer_size == 0) {
907 DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
908 return;
909 }
910 if (Track)
911 Primary.Options.set(OptionBit::TrackAllocationStacks);
912 else
913 Primary.Options.clear(OptionBit::TrackAllocationStacks);
914 }
915
setFillContents(FillContentsMode FillContents)916 void setFillContents(FillContentsMode FillContents) {
917 initThreadMaybe();
918 Primary.Options.setFillContentsMode(FillContents);
919 }
920
setAddLargeAllocationSlack(bool AddSlack)921 void setAddLargeAllocationSlack(bool AddSlack) {
922 initThreadMaybe();
923 if (AddSlack)
924 Primary.Options.set(OptionBit::AddLargeAllocationSlack);
925 else
926 Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
927 }
928
getStackDepotAddress()929 const char *getStackDepotAddress() const {
930 return reinterpret_cast<const char *>(&Depot);
931 }
932
getRegionInfoArrayAddress()933 const char *getRegionInfoArrayAddress() const {
934 return Primary.getRegionInfoArrayAddress();
935 }
936
getRegionInfoArraySize()937 static uptr getRegionInfoArraySize() {
938 return PrimaryT::getRegionInfoArraySize();
939 }
940
getRingBufferAddress()941 const char *getRingBufferAddress() {
942 initThreadMaybe();
943 return RawRingBuffer;
944 }
945
getRingBufferSize()946 uptr getRingBufferSize() {
947 initThreadMaybe();
948 auto *RingBuffer = getRingBuffer();
949 return RingBuffer ? ringBufferSizeInBytes(RingBuffer->Size) : 0;
950 }
951
setRingBufferSizeForBuffer(char * Buffer,size_t Size)952 static bool setRingBufferSizeForBuffer(char *Buffer, size_t Size) {
953 // Need at least one entry.
954 if (Size < sizeof(AllocationRingBuffer) +
955 sizeof(typename AllocationRingBuffer::Entry)) {
956 return false;
957 }
958 AllocationRingBuffer *RingBuffer =
959 reinterpret_cast<AllocationRingBuffer *>(Buffer);
960 RingBuffer->Size = (Size - sizeof(AllocationRingBuffer)) /
961 sizeof(typename AllocationRingBuffer::Entry);
962 return true;
963 }
964
965 static const uptr MaxTraceSize = 64;
966
collectTraceMaybe(const StackDepot * Depot,uintptr_t (& Trace)[MaxTraceSize],u32 Hash)967 static void collectTraceMaybe(const StackDepot *Depot,
968 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
969 uptr RingPos, Size;
970 if (!Depot->find(Hash, &RingPos, &Size))
971 return;
972 for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
973 Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
974 }
975
getErrorInfo(struct scudo_error_info * ErrorInfo,uintptr_t FaultAddr,const char * DepotPtr,const char * RegionInfoPtr,const char * RingBufferPtr,const char * Memory,const char * MemoryTags,uintptr_t MemoryAddr,size_t MemorySize)976 static void getErrorInfo(struct scudo_error_info *ErrorInfo,
977 uintptr_t FaultAddr, const char *DepotPtr,
978 const char *RegionInfoPtr, const char *RingBufferPtr,
979 const char *Memory, const char *MemoryTags,
980 uintptr_t MemoryAddr, size_t MemorySize) {
981 *ErrorInfo = {};
982 if (!allocatorSupportsMemoryTagging<Params>() ||
983 MemoryAddr + MemorySize < MemoryAddr)
984 return;
985
986 auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
987 size_t NextErrorReport = 0;
988
989 // Check for OOB in the current block and the two surrounding blocks. Beyond
990 // that, UAF is more likely.
991 if (extractTag(FaultAddr) != 0)
992 getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
993 RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
994 MemorySize, 0, 2);
995
996 // Check the ring buffer. For primary allocations this will only find UAF;
997 // for secondary allocations we can find either UAF or OOB.
998 getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
999 RingBufferPtr);
1000
1001 // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
1002 // Beyond that we are likely to hit false positives.
1003 if (extractTag(FaultAddr) != 0)
1004 getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
1005 RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
1006 MemorySize, 2, 16);
1007 }
1008
1009 private:
1010 using SecondaryT = MapAllocator<Params>;
1011 typedef typename PrimaryT::SizeClassMap SizeClassMap;
1012
1013 static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
1014 static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
1015 static const uptr MinAlignment = 1UL << MinAlignmentLog;
1016 static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
1017 static const uptr MaxAllowedMallocSize =
1018 FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
1019
1020 static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
1021 "Minimal alignment must at least cover a chunk header.");
1022 static_assert(!allocatorSupportsMemoryTagging<Params>() ||
1023 MinAlignment >= archMemoryTagGranuleSize(),
1024 "");
1025
1026 static const u32 BlockMarker = 0x44554353U;
1027
1028 // These are indexes into an "array" of 32-bit values that store information
1029 // inline with a chunk that is relevant to diagnosing memory tag faults, where
1030 // 0 corresponds to the address of the user memory. This means that only
1031 // negative indexes may be used. The smallest index that may be used is -2,
1032 // which corresponds to 8 bytes before the user memory, because the chunk
1033 // header size is 8 bytes and in allocators that support memory tagging the
1034 // minimum alignment is at least the tag granule size (16 on aarch64).
1035 static const sptr MemTagAllocationTraceIndex = -2;
1036 static const sptr MemTagAllocationTidIndex = -1;
1037
1038 u32 Cookie = 0;
1039 u32 QuarantineMaxChunkSize = 0;
1040
1041 GlobalStats Stats;
1042 PrimaryT Primary;
1043 SecondaryT Secondary;
1044 QuarantineT Quarantine;
1045 TSDRegistryT TSDRegistry;
1046 pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
1047 RssLimitChecker RssChecker;
1048
1049 #ifdef GWP_ASAN_HOOKS
1050 gwp_asan::GuardedPoolAllocator GuardedAlloc;
1051 uptr GuardedAllocSlotSize = 0;
1052 #endif // GWP_ASAN_HOOKS
1053
1054 StackDepot Depot;
1055
1056 struct AllocationRingBuffer {
1057 struct Entry {
1058 atomic_uptr Ptr;
1059 atomic_uptr AllocationSize;
1060 atomic_u32 AllocationTrace;
1061 atomic_u32 AllocationTid;
1062 atomic_u32 DeallocationTrace;
1063 atomic_u32 DeallocationTid;
1064 };
1065
1066 atomic_uptr Pos;
1067 u32 Size;
1068 // An array of Size (at least one) elements of type Entry is immediately
1069 // following to this struct.
1070 };
1071 // Pointer to memory mapped area starting with AllocationRingBuffer struct,
1072 // and immediately followed by Size elements of type Entry.
1073 char *RawRingBuffer = {};
1074
1075 // The following might get optimized out by the compiler.
performSanityChecks()1076 NOINLINE void performSanityChecks() {
1077 // Verify that the header offset field can hold the maximum offset. In the
1078 // case of the Secondary allocator, it takes care of alignment and the
1079 // offset will always be small. In the case of the Primary, the worst case
1080 // scenario happens in the last size class, when the backend allocation
1081 // would already be aligned on the requested alignment, which would happen
1082 // to be the maximum alignment that would fit in that size class. As a
1083 // result, the maximum offset will be at most the maximum alignment for the
1084 // last size class minus the header size, in multiples of MinAlignment.
1085 Chunk::UnpackedHeader Header = {};
1086 const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1087 SizeClassMap::MaxSize - MinAlignment);
1088 const uptr MaxOffset =
1089 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1090 Header.Offset = MaxOffset & Chunk::OffsetMask;
1091 if (UNLIKELY(Header.Offset != MaxOffset))
1092 reportSanityCheckError("offset");
1093
1094 // Verify that we can fit the maximum size or amount of unused bytes in the
1095 // header. Given that the Secondary fits the allocation to a page, the worst
1096 // case scenario happens in the Primary. It will depend on the second to
1097 // last and last class sizes, as well as the dynamic base for the Primary.
1098 // The following is an over-approximation that works for our needs.
1099 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1100 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1101 if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1102 reportSanityCheckError("size (or unused bytes)");
1103
1104 const uptr LargestClassId = SizeClassMap::LargestClassId;
1105 Header.ClassId = LargestClassId;
1106 if (UNLIKELY(Header.ClassId != LargestClassId))
1107 reportSanityCheckError("class ID");
1108 }
1109
getBlockBegin(const void * Ptr,Chunk::UnpackedHeader * Header)1110 static inline void *getBlockBegin(const void *Ptr,
1111 Chunk::UnpackedHeader *Header) {
1112 return reinterpret_cast<void *>(
1113 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1114 (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1115 }
1116
1117 // Return the size of a chunk as requested during its allocation.
getSize(const void * Ptr,Chunk::UnpackedHeader * Header)1118 inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1119 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1120 if (LIKELY(Header->ClassId))
1121 return SizeOrUnusedBytes;
1122 if (allocatorSupportsMemoryTagging<Params>())
1123 Ptr = untagPointer(const_cast<void *>(Ptr));
1124 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1125 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1126 }
1127
quarantineOrDeallocateChunk(Options Options,void * TaggedPtr,Chunk::UnpackedHeader * Header,uptr Size)1128 void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
1129 Chunk::UnpackedHeader *Header, uptr Size) {
1130 void *Ptr = getHeaderTaggedPointer(TaggedPtr);
1131 Chunk::UnpackedHeader NewHeader = *Header;
1132 // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1133 // than the maximum allowed, we return a chunk directly to the backend.
1134 // This purposefully underflows for Size == 0.
1135 const bool BypassQuarantine = !Quarantine.getCacheSize() ||
1136 ((Size - 1) >= QuarantineMaxChunkSize) ||
1137 !NewHeader.ClassId;
1138 if (BypassQuarantine)
1139 NewHeader.State = Chunk::State::Available;
1140 else
1141 NewHeader.State = Chunk::State::Quarantined;
1142 NewHeader.OriginOrWasZeroed = useMemoryTagging<Params>(Options) &&
1143 NewHeader.ClassId &&
1144 !TSDRegistry.getDisableMemInit();
1145 Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
1146
1147 if (UNLIKELY(useMemoryTagging<Params>(Options))) {
1148 u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
1149 storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1150 if (NewHeader.ClassId) {
1151 if (!TSDRegistry.getDisableMemInit()) {
1152 uptr TaggedBegin, TaggedEnd;
1153 const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1154 Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
1155 NewHeader.ClassId);
1156 // Exclude the previous tag so that immediate use after free is
1157 // detected 100% of the time.
1158 setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
1159 &TaggedEnd);
1160 }
1161 }
1162 }
1163 if (BypassQuarantine) {
1164 if (allocatorSupportsMemoryTagging<Params>())
1165 Ptr = untagPointer(Ptr);
1166 void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
1167 const uptr ClassId = NewHeader.ClassId;
1168 if (LIKELY(ClassId)) {
1169 bool UnlockRequired;
1170 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1171 TSD->Cache.deallocate(ClassId, BlockBegin);
1172 if (UnlockRequired)
1173 TSD->unlock();
1174 } else {
1175 if (UNLIKELY(useMemoryTagging<Params>(Options)))
1176 storeTags(reinterpret_cast<uptr>(BlockBegin),
1177 reinterpret_cast<uptr>(Ptr));
1178 Secondary.deallocate(Options, BlockBegin);
1179 }
1180 } else {
1181 bool UnlockRequired;
1182 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1183 Quarantine.put(&TSD->QuarantineCache,
1184 QuarantineCallback(*this, TSD->Cache), Ptr, Size);
1185 if (UnlockRequired)
1186 TSD->unlock();
1187 }
1188 }
1189
getChunkFromBlock(uptr Block,uptr * Chunk,Chunk::UnpackedHeader * Header)1190 bool getChunkFromBlock(uptr Block, uptr *Chunk,
1191 Chunk::UnpackedHeader *Header) {
1192 *Chunk =
1193 Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
1194 return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
1195 }
1196
getChunkOffsetFromBlock(const char * Block)1197 static uptr getChunkOffsetFromBlock(const char *Block) {
1198 u32 Offset = 0;
1199 if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
1200 Offset = reinterpret_cast<const u32 *>(Block)[1];
1201 return Offset + Chunk::getHeaderSize();
1202 }
1203
1204 // Set the tag of the granule past the end of the allocation to 0, to catch
1205 // linear overflows even if a previous larger allocation used the same block
1206 // and tag. Only do this if the granule past the end is in our block, because
1207 // this would otherwise lead to a SEGV if the allocation covers the entire
1208 // block and our block is at the end of a mapping. The tag of the next block's
1209 // header granule will be set to 0, so it will serve the purpose of catching
1210 // linear overflows in this case.
1211 //
1212 // For allocations of size 0 we do not end up storing the address tag to the
1213 // memory tag space, which getInlineErrorInfo() normally relies on to match
1214 // address tags against chunks. To allow matching in this case we store the
1215 // address tag in the first byte of the chunk.
storeEndMarker(uptr End,uptr Size,uptr BlockEnd)1216 void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1217 DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1218 uptr UntaggedEnd = untagPointer(End);
1219 if (UntaggedEnd != BlockEnd) {
1220 storeTag(UntaggedEnd);
1221 if (Size == 0)
1222 *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
1223 }
1224 }
1225
prepareTaggedChunk(void * Ptr,uptr Size,uptr ExcludeMask,uptr BlockEnd)1226 void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
1227 uptr BlockEnd) {
1228 // Prepare the granule before the chunk to store the chunk header by setting
1229 // its tag to 0. Normally its tag will already be 0, but in the case where a
1230 // chunk holding a low alignment allocation is reused for a higher alignment
1231 // allocation, the chunk may already have a non-zero tag from the previous
1232 // allocation.
1233 storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1234
1235 uptr TaggedBegin, TaggedEnd;
1236 setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
1237
1238 storeEndMarker(TaggedEnd, Size, BlockEnd);
1239 return reinterpret_cast<void *>(TaggedBegin);
1240 }
1241
resizeTaggedChunk(uptr OldPtr,uptr NewPtr,uptr NewSize,uptr BlockEnd)1242 void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1243 uptr BlockEnd) {
1244 uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
1245 uptr RoundNewPtr;
1246 if (RoundOldPtr >= NewPtr) {
1247 // If the allocation is shrinking we just need to set the tag past the end
1248 // of the allocation to 0. See explanation in storeEndMarker() above.
1249 RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
1250 } else {
1251 // Set the memory tag of the region
1252 // [RoundOldPtr, roundUpTo(NewPtr, archMemoryTagGranuleSize()))
1253 // to the pointer tag stored in OldPtr.
1254 RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
1255 }
1256 storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
1257 }
1258
storePrimaryAllocationStackMaybe(Options Options,void * Ptr)1259 void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
1260 if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1261 return;
1262 auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1263 Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
1264 Ptr32[MemTagAllocationTidIndex] = getThreadID();
1265 }
1266
storeRingBufferEntry(void * Ptr,u32 AllocationTrace,u32 AllocationTid,uptr AllocationSize,u32 DeallocationTrace,u32 DeallocationTid)1267 void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
1268 uptr AllocationSize, u32 DeallocationTrace,
1269 u32 DeallocationTid) {
1270 uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
1271 typename AllocationRingBuffer::Entry *Entry =
1272 getRingBufferEntry(RawRingBuffer, Pos % getRingBuffer()->Size);
1273
1274 // First invalidate our entry so that we don't attempt to interpret a
1275 // partially written state in getSecondaryErrorInfo(). The fences below
1276 // ensure that the compiler does not move the stores to Ptr in between the
1277 // stores to the other fields.
1278 atomic_store_relaxed(&Entry->Ptr, 0);
1279
1280 __atomic_signal_fence(__ATOMIC_SEQ_CST);
1281 atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1282 atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1283 atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1284 atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1285 atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1286 __atomic_signal_fence(__ATOMIC_SEQ_CST);
1287
1288 atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1289 }
1290
storeSecondaryAllocationStackMaybe(Options Options,void * Ptr,uptr Size)1291 void storeSecondaryAllocationStackMaybe(Options Options, void *Ptr,
1292 uptr Size) {
1293 if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1294 return;
1295
1296 u32 Trace = collectStackTrace();
1297 u32 Tid = getThreadID();
1298
1299 auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1300 Ptr32[MemTagAllocationTraceIndex] = Trace;
1301 Ptr32[MemTagAllocationTidIndex] = Tid;
1302
1303 storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
1304 }
1305
storeDeallocationStackMaybe(Options Options,void * Ptr,u8 PrevTag,uptr Size)1306 void storeDeallocationStackMaybe(Options Options, void *Ptr, u8 PrevTag,
1307 uptr Size) {
1308 if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1309 return;
1310
1311 auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1312 u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1313 u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1314
1315 u32 DeallocationTrace = collectStackTrace();
1316 u32 DeallocationTid = getThreadID();
1317
1318 storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
1319 AllocationTrace, AllocationTid, Size,
1320 DeallocationTrace, DeallocationTid);
1321 }
1322
1323 static const size_t NumErrorReports =
1324 sizeof(((scudo_error_info *)nullptr)->reports) /
1325 sizeof(((scudo_error_info *)nullptr)->reports[0]);
1326
getInlineErrorInfo(struct scudo_error_info * ErrorInfo,size_t & NextErrorReport,uintptr_t FaultAddr,const StackDepot * Depot,const char * RegionInfoPtr,const char * Memory,const char * MemoryTags,uintptr_t MemoryAddr,size_t MemorySize,size_t MinDistance,size_t MaxDistance)1327 static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
1328 size_t &NextErrorReport, uintptr_t FaultAddr,
1329 const StackDepot *Depot,
1330 const char *RegionInfoPtr, const char *Memory,
1331 const char *MemoryTags, uintptr_t MemoryAddr,
1332 size_t MemorySize, size_t MinDistance,
1333 size_t MaxDistance) {
1334 uptr UntaggedFaultAddr = untagPointer(FaultAddr);
1335 u8 FaultAddrTag = extractTag(FaultAddr);
1336 BlockInfo Info =
1337 PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
1338
1339 auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1340 if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
1341 Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
1342 return false;
1343 *Data = &Memory[Addr - MemoryAddr];
1344 *Tag = static_cast<u8>(
1345 MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1346 return true;
1347 };
1348
1349 auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
1350 Chunk::UnpackedHeader *Header, const u32 **Data,
1351 u8 *Tag) {
1352 const char *BlockBegin;
1353 u8 BlockBeginTag;
1354 if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
1355 return false;
1356 uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
1357 *ChunkAddr = Addr + ChunkOffset;
1358
1359 const char *ChunkBegin;
1360 if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
1361 return false;
1362 *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
1363 ChunkBegin - Chunk::getHeaderSize());
1364 *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1365
1366 // Allocations of size 0 will have stashed the tag in the first byte of
1367 // the chunk, see storeEndMarker().
1368 if (Header->SizeOrUnusedBytes == 0)
1369 *Tag = static_cast<u8>(*ChunkBegin);
1370
1371 return true;
1372 };
1373
1374 if (NextErrorReport == NumErrorReports)
1375 return;
1376
1377 auto CheckOOB = [&](uptr BlockAddr) {
1378 if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
1379 return false;
1380
1381 uptr ChunkAddr;
1382 Chunk::UnpackedHeader Header;
1383 const u32 *Data;
1384 uint8_t Tag;
1385 if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
1386 Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
1387 return false;
1388
1389 auto *R = &ErrorInfo->reports[NextErrorReport++];
1390 R->error_type =
1391 UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1392 R->allocation_address = ChunkAddr;
1393 R->allocation_size = Header.SizeOrUnusedBytes;
1394 collectTraceMaybe(Depot, R->allocation_trace,
1395 Data[MemTagAllocationTraceIndex]);
1396 R->allocation_tid = Data[MemTagAllocationTidIndex];
1397 return NextErrorReport == NumErrorReports;
1398 };
1399
1400 if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
1401 return;
1402
1403 for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
1404 if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
1405 CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1406 return;
1407 }
1408
getRingBufferErrorInfo(struct scudo_error_info * ErrorInfo,size_t & NextErrorReport,uintptr_t FaultAddr,const StackDepot * Depot,const char * RingBufferPtr)1409 static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
1410 size_t &NextErrorReport,
1411 uintptr_t FaultAddr,
1412 const StackDepot *Depot,
1413 const char *RingBufferPtr) {
1414 auto *RingBuffer =
1415 reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1416 if (!RingBuffer || RingBuffer->Size == 0)
1417 return;
1418 uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1419
1420 for (uptr I = Pos - 1;
1421 I != Pos - 1 - RingBuffer->Size && NextErrorReport != NumErrorReports;
1422 --I) {
1423 auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBuffer->Size);
1424 uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1425 if (!EntryPtr)
1426 continue;
1427
1428 uptr UntaggedEntryPtr = untagPointer(EntryPtr);
1429 uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1430 u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1431 u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1432 u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1433 u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1434
1435 if (DeallocationTid) {
1436 // For UAF we only consider in-bounds fault addresses because
1437 // out-of-bounds UAF is rare and attempting to detect it is very likely
1438 // to result in false positives.
1439 if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
1440 continue;
1441 } else {
1442 // Ring buffer OOB is only possible with secondary allocations. In this
1443 // case we are guaranteed a guard region of at least a page on either
1444 // side of the allocation (guard page on the right, guard page + tagged
1445 // region on the left), so ignore any faults outside of that range.
1446 if (FaultAddr < EntryPtr - getPageSizeCached() ||
1447 FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
1448 continue;
1449
1450 // For UAF the ring buffer will contain two entries, one for the
1451 // allocation and another for the deallocation. Don't report buffer
1452 // overflow/underflow using the allocation entry if we have already
1453 // collected a report from the deallocation entry.
1454 bool Found = false;
1455 for (uptr J = 0; J != NextErrorReport; ++J) {
1456 if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1457 Found = true;
1458 break;
1459 }
1460 }
1461 if (Found)
1462 continue;
1463 }
1464
1465 auto *R = &ErrorInfo->reports[NextErrorReport++];
1466 if (DeallocationTid)
1467 R->error_type = USE_AFTER_FREE;
1468 else if (FaultAddr < EntryPtr)
1469 R->error_type = BUFFER_UNDERFLOW;
1470 else
1471 R->error_type = BUFFER_OVERFLOW;
1472
1473 R->allocation_address = UntaggedEntryPtr;
1474 R->allocation_size = EntrySize;
1475 collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1476 R->allocation_tid = AllocationTid;
1477 collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1478 R->deallocation_tid = DeallocationTid;
1479 }
1480 }
1481
getStats(ScopedString * Str)1482 uptr getStats(ScopedString *Str) {
1483 Primary.getStats(Str);
1484 Secondary.getStats(Str);
1485 Quarantine.getStats(Str);
1486 return Str->length();
1487 }
1488
1489 static typename AllocationRingBuffer::Entry *
getRingBufferEntry(char * RawRingBuffer,uptr N)1490 getRingBufferEntry(char *RawRingBuffer, uptr N) {
1491 return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
1492 &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1493 }
1494 static const typename AllocationRingBuffer::Entry *
getRingBufferEntry(const char * RawRingBuffer,uptr N)1495 getRingBufferEntry(const char *RawRingBuffer, uptr N) {
1496 return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
1497 &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1498 }
1499
initRingBuffer()1500 void initRingBuffer() {
1501 u32 AllocationRingBufferSize =
1502 static_cast<u32>(getFlags()->allocation_ring_buffer_size);
1503 if (AllocationRingBufferSize < 1)
1504 return;
1505 MapPlatformData Data = {};
1506 RawRingBuffer = static_cast<char *>(
1507 map(/*Addr=*/nullptr,
1508 roundUpTo(ringBufferSizeInBytes(AllocationRingBufferSize), getPageSizeCached()),
1509 "AllocatorRingBuffer", /*Flags=*/0, &Data));
1510 auto *RingBuffer = reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
1511 RingBuffer->Size = AllocationRingBufferSize;
1512 static_assert(sizeof(AllocationRingBuffer) %
1513 alignof(typename AllocationRingBuffer::Entry) ==
1514 0,
1515 "invalid alignment");
1516 }
1517
ringBufferSizeInBytes(u32 AllocationRingBufferSize)1518 static constexpr size_t ringBufferSizeInBytes(u32 AllocationRingBufferSize) {
1519 return sizeof(AllocationRingBuffer) +
1520 AllocationRingBufferSize *
1521 sizeof(typename AllocationRingBuffer::Entry);
1522 }
1523
getRingBuffer()1524 inline AllocationRingBuffer *getRingBuffer() {
1525 return reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
1526 }
1527 };
1528
1529 } // namespace scudo
1530
1531 #endif // SCUDO_COMBINED_H_
1532