1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11
12 #include "chunk.h"
13 #include "common.h"
14 #include "flags.h"
15 #include "flags_parser.h"
16 #include "local_cache.h"
17 #include "mem_map.h"
18 #include "memtag.h"
19 #include "options.h"
20 #include "quarantine.h"
21 #include "report.h"
22 #include "secondary.h"
23 #include "stack_depot.h"
24 #include "string_utils.h"
25 #include "tsd.h"
26
27 #include "scudo/interface.h"
28
29 #ifdef GWP_ASAN_HOOKS
30 #include "gwp_asan/guarded_pool_allocator.h"
31 #include "gwp_asan/optional/backtrace.h"
32 #include "gwp_asan/optional/segv_handler.h"
33 #endif // GWP_ASAN_HOOKS
34
EmptyCallback()35 extern "C" inline void EmptyCallback() {}
36
37 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
38 // This function is not part of the NDK so it does not appear in any public
39 // header files. We only declare/use it when targeting the platform.
40 extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
41 size_t num_entries);
42 #endif
43
44 namespace scudo {
45
46 template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
47 class Allocator {
48 public:
49 using PrimaryT = typename Config::template PrimaryT<Config>;
50 using SecondaryT = typename Config::template SecondaryT<Config>;
51 using CacheT = typename PrimaryT::CacheT;
52 typedef Allocator<Config, PostInitCallback> ThisT;
53 typedef typename Config::template TSDRegistryT<ThisT> TSDRegistryT;
54
callPostInitCallback()55 void callPostInitCallback() {
56 pthread_once(&PostInitNonce, PostInitCallback);
57 }
58
59 struct QuarantineCallback {
QuarantineCallbackQuarantineCallback60 explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
61 : Allocator(Instance), Cache(LocalCache) {}
62
63 // Chunk recycling function, returns a quarantined chunk to the backend,
64 // first making sure it hasn't been tampered with.
recycleQuarantineCallback65 void recycle(void *Ptr) {
66 Chunk::UnpackedHeader Header;
67 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
68 if (UNLIKELY(Header.State != Chunk::State::Quarantined))
69 reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
70
71 Header.State = Chunk::State::Available;
72 Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
73
74 if (allocatorSupportsMemoryTagging<Config>())
75 Ptr = untagPointer(Ptr);
76 void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
77 Cache.deallocate(Header.ClassId, BlockBegin);
78 }
79
80 // We take a shortcut when allocating a quarantine batch by working with the
81 // appropriate class ID instead of using Size. The compiler should optimize
82 // the class ID computation and work with the associated cache directly.
allocateQuarantineCallback83 void *allocate(UNUSED uptr Size) {
84 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
85 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
86 void *Ptr = Cache.allocate(QuarantineClassId);
87 // Quarantine batch allocation failure is fatal.
88 if (UNLIKELY(!Ptr))
89 reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
90
91 Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
92 Chunk::getHeaderSize());
93 Chunk::UnpackedHeader Header = {};
94 Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
95 Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
96 Header.State = Chunk::State::Allocated;
97 Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
98
99 // Reset tag to 0 as this chunk may have been previously used for a tagged
100 // user allocation.
101 if (UNLIKELY(useMemoryTagging<Config>(Allocator.Primary.Options.load())))
102 storeTags(reinterpret_cast<uptr>(Ptr),
103 reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
104
105 return Ptr;
106 }
107
deallocateQuarantineCallback108 void deallocate(void *Ptr) {
109 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
110 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
111 Chunk::UnpackedHeader Header;
112 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
113
114 if (UNLIKELY(Header.State != Chunk::State::Allocated))
115 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
116 DCHECK_EQ(Header.ClassId, QuarantineClassId);
117 DCHECK_EQ(Header.Offset, 0);
118 DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
119
120 Header.State = Chunk::State::Available;
121 Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
122 Cache.deallocate(QuarantineClassId,
123 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
124 Chunk::getHeaderSize()));
125 }
126
127 private:
128 ThisT &Allocator;
129 CacheT &Cache;
130 };
131
132 typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
133 typedef typename QuarantineT::CacheT QuarantineCacheT;
134
init()135 void init() {
136 performSanityChecks();
137
138 // Check if hardware CRC32 is supported in the binary and by the platform,
139 // if so, opt for the CRC32 hardware version of the checksum.
140 if (&computeHardwareCRC32 && hasHardwareCRC32())
141 HashAlgorithm = Checksum::HardwareCRC32;
142
143 if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
144 Cookie = static_cast<u32>(getMonotonicTime() ^
145 (reinterpret_cast<uptr>(this) >> 4));
146
147 initFlags();
148 reportUnrecognizedFlags();
149
150 // Store some flags locally.
151 if (getFlags()->may_return_null)
152 Primary.Options.set(OptionBit::MayReturnNull);
153 if (getFlags()->zero_contents)
154 Primary.Options.setFillContentsMode(ZeroFill);
155 else if (getFlags()->pattern_fill_contents)
156 Primary.Options.setFillContentsMode(PatternOrZeroFill);
157 if (getFlags()->dealloc_type_mismatch)
158 Primary.Options.set(OptionBit::DeallocTypeMismatch);
159 if (getFlags()->delete_size_mismatch)
160 Primary.Options.set(OptionBit::DeleteSizeMismatch);
161 if (allocatorSupportsMemoryTagging<Config>() &&
162 systemSupportsMemoryTagging())
163 Primary.Options.set(OptionBit::UseMemoryTagging);
164
165 QuarantineMaxChunkSize =
166 static_cast<u32>(getFlags()->quarantine_max_chunk_size);
167
168 Stats.init();
169 const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
170 Primary.init(ReleaseToOsIntervalMs);
171 Secondary.init(&Stats, ReleaseToOsIntervalMs);
172 Quarantine.init(
173 static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
174 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
175
176 mapAndInitializeRingBuffer();
177 }
178
179 // Initialize the embedded GWP-ASan instance. Requires the main allocator to
180 // be functional, best called from PostInitCallback.
initGwpAsan()181 void initGwpAsan() {
182 #ifdef GWP_ASAN_HOOKS
183 gwp_asan::options::Options Opt;
184 Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
185 Opt.MaxSimultaneousAllocations =
186 getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
187 Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
188 Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
189 Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
190 // Embedded GWP-ASan is locked through the Scudo atfork handler (via
191 // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
192 // handler.
193 Opt.InstallForkHandlers = false;
194 Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
195 GuardedAlloc.init(Opt);
196
197 if (Opt.InstallSignalHandlers)
198 gwp_asan::segv_handler::installSignalHandlers(
199 &GuardedAlloc, Printf,
200 gwp_asan::backtrace::getPrintBacktraceFunction(),
201 gwp_asan::backtrace::getSegvBacktraceFunction(),
202 Opt.Recoverable);
203
204 GuardedAllocSlotSize =
205 GuardedAlloc.getAllocatorState()->maximumAllocationSize();
206 Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
207 GuardedAllocSlotSize);
208 #endif // GWP_ASAN_HOOKS
209 }
210
211 #ifdef GWP_ASAN_HOOKS
getGwpAsanAllocationMetadata()212 const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
213 return GuardedAlloc.getMetadataRegion();
214 }
215
getGwpAsanAllocatorState()216 const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
217 return GuardedAlloc.getAllocatorState();
218 }
219 #endif // GWP_ASAN_HOOKS
220
221 ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
222 TSDRegistry.initThreadMaybe(this, MinimalInit);
223 }
224
unmapTestOnly()225 void unmapTestOnly() {
226 unmapRingBuffer();
227 TSDRegistry.unmapTestOnly(this);
228 Primary.unmapTestOnly();
229 Secondary.unmapTestOnly();
230 #ifdef GWP_ASAN_HOOKS
231 if (getFlags()->GWP_ASAN_InstallSignalHandlers)
232 gwp_asan::segv_handler::uninstallSignalHandlers();
233 GuardedAlloc.uninitTestOnly();
234 #endif // GWP_ASAN_HOOKS
235 }
236
getTSDRegistry()237 TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
getQuarantine()238 QuarantineT *getQuarantine() { return &Quarantine; }
239
240 // The Cache must be provided zero-initialized.
initCache(CacheT * Cache)241 void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
242
243 // Release the resources used by a TSD, which involves:
244 // - draining the local quarantine cache to the global quarantine;
245 // - releasing the cached pointers back to the Primary;
246 // - unlinking the local stats from the global ones (destroying the cache does
247 // the last two items).
commitBack(TSD<ThisT> * TSD)248 void commitBack(TSD<ThisT> *TSD) {
249 TSD->assertLocked(/*BypassCheck=*/true);
250 Quarantine.drain(&TSD->getQuarantineCache(),
251 QuarantineCallback(*this, TSD->getCache()));
252 TSD->getCache().destroy(&Stats);
253 }
254
drainCache(TSD<ThisT> * TSD)255 void drainCache(TSD<ThisT> *TSD) {
256 TSD->assertLocked(/*BypassCheck=*/true);
257 Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
258 QuarantineCallback(*this, TSD->getCache()));
259 TSD->getCache().drain();
260 }
drainCaches()261 void drainCaches() { TSDRegistry.drainCaches(this); }
262
getHeaderTaggedPointer(void * Ptr)263 ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
264 if (!allocatorSupportsMemoryTagging<Config>())
265 return Ptr;
266 auto UntaggedPtr = untagPointer(Ptr);
267 if (UntaggedPtr != Ptr)
268 return UntaggedPtr;
269 // Secondary, or pointer allocated while memory tagging is unsupported or
270 // disabled. The tag mismatch is okay in the latter case because tags will
271 // not be checked.
272 return addHeaderTag(Ptr);
273 }
274
addHeaderTag(uptr Ptr)275 ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
276 if (!allocatorSupportsMemoryTagging<Config>())
277 return Ptr;
278 return addFixedTag(Ptr, 2);
279 }
280
addHeaderTag(void * Ptr)281 ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
282 return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
283 }
284
collectStackTrace()285 NOINLINE u32 collectStackTrace() {
286 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
287 // Discard collectStackTrace() frame and allocator function frame.
288 constexpr uptr DiscardFrames = 2;
289 uptr Stack[MaxTraceSize + DiscardFrames];
290 uptr Size =
291 android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
292 Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
293 return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
294 #else
295 return 0;
296 #endif
297 }
298
computeOddEvenMaskForPointerMaybe(const Options & Options,uptr Ptr,uptr ClassId)299 uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
300 uptr ClassId) {
301 if (!Options.get(OptionBit::UseOddEvenTags))
302 return 0;
303
304 // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
305 // even, and vice versa. Blocks are laid out Size bytes apart, and adding
306 // Size to Ptr will flip the least significant set bit of Size in Ptr, so
307 // that bit will have the pattern 010101... for consecutive blocks, which we
308 // can use to determine which tag mask to use.
309 return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
310 }
311
312 NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
313 uptr Alignment = MinAlignment,
314 bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
315 initThreadMaybe();
316
317 const Options Options = Primary.Options.load();
318 if (UNLIKELY(Alignment > MaxAlignment)) {
319 if (Options.get(OptionBit::MayReturnNull))
320 return nullptr;
321 reportAlignmentTooBig(Alignment, MaxAlignment);
322 }
323 if (Alignment < MinAlignment)
324 Alignment = MinAlignment;
325
326 #ifdef GWP_ASAN_HOOKS
327 if (UNLIKELY(GuardedAlloc.shouldSample())) {
328 if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
329 Stats.lock();
330 Stats.add(StatAllocated, GuardedAllocSlotSize);
331 Stats.sub(StatFree, GuardedAllocSlotSize);
332 Stats.unlock();
333 return Ptr;
334 }
335 }
336 #endif // GWP_ASAN_HOOKS
337
338 const FillContentsMode FillContents = ZeroContents ? ZeroFill
339 : TSDRegistry.getDisableMemInit()
340 ? NoFill
341 : Options.getFillContentsMode();
342
343 // If the requested size happens to be 0 (more common than you might think),
344 // allocate MinAlignment bytes on top of the header. Then add the extra
345 // bytes required to fulfill the alignment requirements: we allocate enough
346 // to be sure that there will be an address in the block that will satisfy
347 // the alignment.
348 const uptr NeededSize =
349 roundUp(Size, MinAlignment) +
350 ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
351
352 // Takes care of extravagantly large sizes as well as integer overflows.
353 static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
354 if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
355 if (Options.get(OptionBit::MayReturnNull))
356 return nullptr;
357 reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
358 }
359 DCHECK_LE(Size, NeededSize);
360
361 void *Block = nullptr;
362 uptr ClassId = 0;
363 uptr SecondaryBlockEnd = 0;
364 if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
365 ClassId = SizeClassMap::getClassIdBySize(NeededSize);
366 DCHECK_NE(ClassId, 0U);
367 bool UnlockRequired;
368 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
369 TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
370 Block = TSD->getCache().allocate(ClassId);
371 // If the allocation failed, retry in each successively larger class until
372 // it fits. If it fails to fit in the largest class, fallback to the
373 // Secondary.
374 if (UNLIKELY(!Block)) {
375 while (ClassId < SizeClassMap::LargestClassId && !Block)
376 Block = TSD->getCache().allocate(++ClassId);
377 if (!Block)
378 ClassId = 0;
379 }
380 if (UnlockRequired)
381 TSD->unlock();
382 }
383 if (UNLIKELY(ClassId == 0)) {
384 Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
385 FillContents);
386 }
387
388 if (UNLIKELY(!Block)) {
389 if (Options.get(OptionBit::MayReturnNull))
390 return nullptr;
391 printStats();
392 reportOutOfMemory(NeededSize);
393 }
394
395 const uptr BlockUptr = reinterpret_cast<uptr>(Block);
396 const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
397 const uptr UserPtr = roundUp(UnalignedUserPtr, Alignment);
398
399 void *Ptr = reinterpret_cast<void *>(UserPtr);
400 void *TaggedPtr = Ptr;
401 if (LIKELY(ClassId)) {
402 // We only need to zero or tag the contents for Primary backed
403 // allocations. We only set tags for primary allocations in order to avoid
404 // faulting potentially large numbers of pages for large secondary
405 // allocations. We assume that guard pages are enough to protect these
406 // allocations.
407 //
408 // FIXME: When the kernel provides a way to set the background tag of a
409 // mapping, we should be able to tag secondary allocations as well.
410 //
411 // When memory tagging is enabled, zeroing the contents is done as part of
412 // setting the tag.
413 if (UNLIKELY(useMemoryTagging<Config>(Options))) {
414 uptr PrevUserPtr;
415 Chunk::UnpackedHeader Header;
416 const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
417 const uptr BlockEnd = BlockUptr + BlockSize;
418 // If possible, try to reuse the UAF tag that was set by deallocate().
419 // For simplicity, only reuse tags if we have the same start address as
420 // the previous allocation. This handles the majority of cases since
421 // most allocations will not be more aligned than the minimum alignment.
422 //
423 // We need to handle situations involving reclaimed chunks, and retag
424 // the reclaimed portions if necessary. In the case where the chunk is
425 // fully reclaimed, the chunk's header will be zero, which will trigger
426 // the code path for new mappings and invalid chunks that prepares the
427 // chunk from scratch. There are three possibilities for partial
428 // reclaiming:
429 //
430 // (1) Header was reclaimed, data was partially reclaimed.
431 // (2) Header was not reclaimed, all data was reclaimed (e.g. because
432 // data started on a page boundary).
433 // (3) Header was not reclaimed, data was partially reclaimed.
434 //
435 // Case (1) will be handled in the same way as for full reclaiming,
436 // since the header will be zero.
437 //
438 // We can detect case (2) by loading the tag from the start
439 // of the chunk. If it is zero, it means that either all data was
440 // reclaimed (since we never use zero as the chunk tag), or that the
441 // previous allocation was of size zero. Either way, we need to prepare
442 // a new chunk from scratch.
443 //
444 // We can detect case (3) by moving to the next page (if covered by the
445 // chunk) and loading the tag of its first granule. If it is zero, it
446 // means that all following pages may need to be retagged. On the other
447 // hand, if it is nonzero, we can assume that all following pages are
448 // still tagged, according to the logic that if any of the pages
449 // following the next page were reclaimed, the next page would have been
450 // reclaimed as well.
451 uptr TaggedUserPtr;
452 if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
453 PrevUserPtr == UserPtr &&
454 (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
455 uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
456 const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
457 if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
458 PrevEnd = NextPage;
459 TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
460 resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
461 if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
462 // If an allocation needs to be zeroed (i.e. calloc) we can normally
463 // avoid zeroing the memory now since we can rely on memory having
464 // been zeroed on free, as this is normally done while setting the
465 // UAF tag. But if tagging was disabled per-thread when the memory
466 // was freed, it would not have been retagged and thus zeroed, and
467 // therefore it needs to be zeroed now.
468 memset(TaggedPtr, 0,
469 Min(Size, roundUp(PrevEnd - TaggedUserPtr,
470 archMemoryTagGranuleSize())));
471 } else if (Size) {
472 // Clear any stack metadata that may have previously been stored in
473 // the chunk data.
474 memset(TaggedPtr, 0, archMemoryTagGranuleSize());
475 }
476 } else {
477 const uptr OddEvenMask =
478 computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
479 TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
480 }
481 storePrimaryAllocationStackMaybe(Options, Ptr);
482 } else {
483 Block = addHeaderTag(Block);
484 Ptr = addHeaderTag(Ptr);
485 if (UNLIKELY(FillContents != NoFill)) {
486 // This condition is not necessarily unlikely, but since memset is
487 // costly, we might as well mark it as such.
488 memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
489 PrimaryT::getSizeByClassId(ClassId));
490 }
491 }
492 } else {
493 Block = addHeaderTag(Block);
494 Ptr = addHeaderTag(Ptr);
495 if (UNLIKELY(useMemoryTagging<Config>(Options))) {
496 storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
497 storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
498 }
499 }
500
501 Chunk::UnpackedHeader Header = {};
502 if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
503 const uptr Offset = UserPtr - UnalignedUserPtr;
504 DCHECK_GE(Offset, 2 * sizeof(u32));
505 // The BlockMarker has no security purpose, but is specifically meant for
506 // the chunk iteration function that can be used in debugging situations.
507 // It is the only situation where we have to locate the start of a chunk
508 // based on its block address.
509 reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
510 reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
511 Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
512 }
513 Header.ClassId = ClassId & Chunk::ClassIdMask;
514 Header.State = Chunk::State::Allocated;
515 Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
516 Header.SizeOrUnusedBytes =
517 (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
518 Chunk::SizeOrUnusedBytesMask;
519 Chunk::storeHeader(Cookie, Ptr, &Header);
520
521 return TaggedPtr;
522 }
523
524 NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
525 UNUSED uptr Alignment = MinAlignment) {
526 if (UNLIKELY(!Ptr))
527 return;
528
529 // For a deallocation, we only ensure minimal initialization, meaning thread
530 // local data will be left uninitialized for now (when using ELF TLS). The
531 // fallback cache will be used instead. This is a workaround for a situation
532 // where the only heap operation performed in a thread would be a free past
533 // the TLS destructors, ending up in initialized thread specific data never
534 // being destroyed properly. Any other heap operation will do a full init.
535 initThreadMaybe(/*MinimalInit=*/true);
536
537 #ifdef GWP_ASAN_HOOKS
538 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
539 GuardedAlloc.deallocate(Ptr);
540 Stats.lock();
541 Stats.add(StatFree, GuardedAllocSlotSize);
542 Stats.sub(StatAllocated, GuardedAllocSlotSize);
543 Stats.unlock();
544 return;
545 }
546 #endif // GWP_ASAN_HOOKS
547
548 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
549 reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
550
551 void *TaggedPtr = Ptr;
552 Ptr = getHeaderTaggedPointer(Ptr);
553
554 Chunk::UnpackedHeader Header;
555 Chunk::loadHeader(Cookie, Ptr, &Header);
556
557 if (UNLIKELY(Header.State != Chunk::State::Allocated))
558 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
559
560 const Options Options = Primary.Options.load();
561 if (Options.get(OptionBit::DeallocTypeMismatch)) {
562 if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
563 // With the exception of memalign'd chunks, that can be still be free'd.
564 if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
565 Origin != Chunk::Origin::Malloc)
566 reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
567 Header.OriginOrWasZeroed, Origin);
568 }
569 }
570
571 const uptr Size = getSize(Ptr, &Header);
572 if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
573 if (UNLIKELY(DeleteSize != Size))
574 reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
575 }
576
577 quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
578 }
579
580 void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
581 initThreadMaybe();
582
583 const Options Options = Primary.Options.load();
584 if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
585 if (Options.get(OptionBit::MayReturnNull))
586 return nullptr;
587 reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
588 }
589
590 // The following cases are handled by the C wrappers.
591 DCHECK_NE(OldPtr, nullptr);
592 DCHECK_NE(NewSize, 0);
593
594 #ifdef GWP_ASAN_HOOKS
595 if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
596 uptr OldSize = GuardedAlloc.getSize(OldPtr);
597 void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
598 if (NewPtr)
599 memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
600 GuardedAlloc.deallocate(OldPtr);
601 Stats.lock();
602 Stats.add(StatFree, GuardedAllocSlotSize);
603 Stats.sub(StatAllocated, GuardedAllocSlotSize);
604 Stats.unlock();
605 return NewPtr;
606 }
607 #endif // GWP_ASAN_HOOKS
608
609 void *OldTaggedPtr = OldPtr;
610 OldPtr = getHeaderTaggedPointer(OldPtr);
611
612 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
613 reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
614
615 Chunk::UnpackedHeader Header;
616 Chunk::loadHeader(Cookie, OldPtr, &Header);
617
618 if (UNLIKELY(Header.State != Chunk::State::Allocated))
619 reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
620
621 // Pointer has to be allocated with a malloc-type function. Some
622 // applications think that it is OK to realloc a memalign'ed pointer, which
623 // will trigger this check. It really isn't.
624 if (Options.get(OptionBit::DeallocTypeMismatch)) {
625 if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
626 reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
627 Header.OriginOrWasZeroed,
628 Chunk::Origin::Malloc);
629 }
630
631 void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
632 uptr BlockEnd;
633 uptr OldSize;
634 const uptr ClassId = Header.ClassId;
635 if (LIKELY(ClassId)) {
636 BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
637 SizeClassMap::getSizeByClassId(ClassId);
638 OldSize = Header.SizeOrUnusedBytes;
639 } else {
640 BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
641 OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
642 Header.SizeOrUnusedBytes);
643 }
644 // If the new chunk still fits in the previously allocated block (with a
645 // reasonable delta), we just keep the old block, and update the chunk
646 // header to reflect the size change.
647 if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
648 if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
649 Header.SizeOrUnusedBytes =
650 (ClassId ? NewSize
651 : BlockEnd -
652 (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
653 Chunk::SizeOrUnusedBytesMask;
654 Chunk::storeHeader(Cookie, OldPtr, &Header);
655 if (UNLIKELY(useMemoryTagging<Config>(Options))) {
656 if (ClassId) {
657 resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
658 reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
659 NewSize, untagPointer(BlockEnd));
660 storePrimaryAllocationStackMaybe(Options, OldPtr);
661 } else {
662 storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
663 }
664 }
665 return OldTaggedPtr;
666 }
667 }
668
669 // Otherwise we allocate a new one, and deallocate the old one. Some
670 // allocators will allocate an even larger chunk (by a fixed factor) to
671 // allow for potential further in-place realloc. The gains of such a trick
672 // are currently unclear.
673 void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
674 if (LIKELY(NewPtr)) {
675 memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
676 quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
677 }
678 return NewPtr;
679 }
680
681 // TODO(kostyak): disable() is currently best-effort. There are some small
682 // windows of time when an allocation could still succeed after
683 // this function finishes. We will revisit that later.
disable()684 void disable() NO_THREAD_SAFETY_ANALYSIS {
685 initThreadMaybe();
686 #ifdef GWP_ASAN_HOOKS
687 GuardedAlloc.disable();
688 #endif
689 TSDRegistry.disable();
690 Stats.disable();
691 Quarantine.disable();
692 Primary.disable();
693 Secondary.disable();
694 }
695
enable()696 void enable() NO_THREAD_SAFETY_ANALYSIS {
697 initThreadMaybe();
698 Secondary.enable();
699 Primary.enable();
700 Quarantine.enable();
701 Stats.enable();
702 TSDRegistry.enable();
703 #ifdef GWP_ASAN_HOOKS
704 GuardedAlloc.enable();
705 #endif
706 }
707
708 // The function returns the amount of bytes required to store the statistics,
709 // which might be larger than the amount of bytes provided. Note that the
710 // statistics buffer is not necessarily constant between calls to this
711 // function. This can be called with a null buffer or zero size for buffer
712 // sizing purposes.
getStats(char * Buffer,uptr Size)713 uptr getStats(char *Buffer, uptr Size) {
714 ScopedString Str;
715 const uptr Length = getStats(&Str) + 1;
716 if (Length < Size)
717 Size = Length;
718 if (Buffer && Size) {
719 memcpy(Buffer, Str.data(), Size);
720 Buffer[Size - 1] = '\0';
721 }
722 return Length;
723 }
724
printStats()725 void printStats() {
726 ScopedString Str;
727 getStats(&Str);
728 Str.output();
729 }
730
printFragmentationInfo()731 void printFragmentationInfo() {
732 ScopedString Str;
733 Primary.getFragmentationInfo(&Str);
734 // Secondary allocator dumps the fragmentation data in getStats().
735 Str.output();
736 }
737
releaseToOS(ReleaseToOS ReleaseType)738 void releaseToOS(ReleaseToOS ReleaseType) {
739 initThreadMaybe();
740 if (ReleaseType == ReleaseToOS::ForceAll)
741 drainCaches();
742 Primary.releaseToOS(ReleaseType);
743 Secondary.releaseToOS();
744 }
745
746 // Iterate over all chunks and call a callback for all busy chunks located
747 // within the provided memory range. Said callback must not use this allocator
748 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
iterateOverChunks(uptr Base,uptr Size,iterate_callback Callback,void * Arg)749 void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
750 void *Arg) {
751 initThreadMaybe();
752 if (archSupportsMemoryTagging())
753 Base = untagPointer(Base);
754 const uptr From = Base;
755 const uptr To = Base + Size;
756 bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Config>() &&
757 systemSupportsMemoryTagging();
758 auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
759 Arg](uptr Block) {
760 if (Block < From || Block >= To)
761 return;
762 uptr Chunk;
763 Chunk::UnpackedHeader Header;
764 if (MayHaveTaggedPrimary) {
765 // A chunk header can either have a zero tag (tagged primary) or the
766 // header tag (secondary, or untagged primary). We don't know which so
767 // try both.
768 ScopedDisableMemoryTagChecks x;
769 if (!getChunkFromBlock(Block, &Chunk, &Header) &&
770 !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
771 return;
772 } else {
773 if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
774 return;
775 }
776 if (Header.State == Chunk::State::Allocated) {
777 uptr TaggedChunk = Chunk;
778 if (allocatorSupportsMemoryTagging<Config>())
779 TaggedChunk = untagPointer(TaggedChunk);
780 if (useMemoryTagging<Config>(Primary.Options.load()))
781 TaggedChunk = loadTag(Chunk);
782 Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
783 Arg);
784 }
785 };
786 Primary.iterateOverBlocks(Lambda);
787 Secondary.iterateOverBlocks(Lambda);
788 #ifdef GWP_ASAN_HOOKS
789 GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
790 #endif
791 }
792
canReturnNull()793 bool canReturnNull() {
794 initThreadMaybe();
795 return Primary.Options.load().get(OptionBit::MayReturnNull);
796 }
797
setOption(Option O,sptr Value)798 bool setOption(Option O, sptr Value) {
799 initThreadMaybe();
800 if (O == Option::MemtagTuning) {
801 // Enabling odd/even tags involves a tradeoff between use-after-free
802 // detection and buffer overflow detection. Odd/even tags make it more
803 // likely for buffer overflows to be detected by increasing the size of
804 // the guaranteed "red zone" around the allocation, but on the other hand
805 // use-after-free is less likely to be detected because the tag space for
806 // any particular chunk is cut in half. Therefore we use this tuning
807 // setting to control whether odd/even tags are enabled.
808 if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
809 Primary.Options.set(OptionBit::UseOddEvenTags);
810 else if (Value == M_MEMTAG_TUNING_UAF)
811 Primary.Options.clear(OptionBit::UseOddEvenTags);
812 return true;
813 } else {
814 // We leave it to the various sub-components to decide whether or not they
815 // want to handle the option, but we do not want to short-circuit
816 // execution if one of the setOption was to return false.
817 const bool PrimaryResult = Primary.setOption(O, Value);
818 const bool SecondaryResult = Secondary.setOption(O, Value);
819 const bool RegistryResult = TSDRegistry.setOption(O, Value);
820 return PrimaryResult && SecondaryResult && RegistryResult;
821 }
822 return false;
823 }
824
825 // Return the usable size for a given chunk. Technically we lie, as we just
826 // report the actual size of a chunk. This is done to counteract code actively
827 // writing past the end of a chunk (like sqlite3) when the usable size allows
828 // for it, which then forces realloc to copy the usable size of a chunk as
829 // opposed to its actual size.
getUsableSize(const void * Ptr)830 uptr getUsableSize(const void *Ptr) {
831 if (UNLIKELY(!Ptr))
832 return 0;
833
834 return getAllocSize(Ptr);
835 }
836
getAllocSize(const void * Ptr)837 uptr getAllocSize(const void *Ptr) {
838 initThreadMaybe();
839
840 #ifdef GWP_ASAN_HOOKS
841 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
842 return GuardedAlloc.getSize(Ptr);
843 #endif // GWP_ASAN_HOOKS
844
845 Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
846 Chunk::UnpackedHeader Header;
847 Chunk::loadHeader(Cookie, Ptr, &Header);
848
849 // Getting the alloc size of a chunk only makes sense if it's allocated.
850 if (UNLIKELY(Header.State != Chunk::State::Allocated))
851 reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
852
853 return getSize(Ptr, &Header);
854 }
855
getStats(StatCounters S)856 void getStats(StatCounters S) {
857 initThreadMaybe();
858 Stats.get(S);
859 }
860
861 // Returns true if the pointer provided was allocated by the current
862 // allocator instance, which is compliant with tcmalloc's ownership concept.
863 // A corrupted chunk will not be reported as owned, which is WAI.
isOwned(const void * Ptr)864 bool isOwned(const void *Ptr) {
865 initThreadMaybe();
866 #ifdef GWP_ASAN_HOOKS
867 if (GuardedAlloc.pointerIsMine(Ptr))
868 return true;
869 #endif // GWP_ASAN_HOOKS
870 if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
871 return false;
872 Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
873 Chunk::UnpackedHeader Header;
874 return Chunk::isValid(Cookie, Ptr, &Header) &&
875 Header.State == Chunk::State::Allocated;
876 }
877
useMemoryTaggingTestOnly()878 bool useMemoryTaggingTestOnly() const {
879 return useMemoryTagging<Config>(Primary.Options.load());
880 }
disableMemoryTagging()881 void disableMemoryTagging() {
882 // If we haven't been initialized yet, we need to initialize now in order to
883 // prevent a future call to initThreadMaybe() from enabling memory tagging
884 // based on feature detection. But don't call initThreadMaybe() because it
885 // may end up calling the allocator (via pthread_atfork, via the post-init
886 // callback), which may cause mappings to be created with memory tagging
887 // enabled.
888 TSDRegistry.initOnceMaybe(this);
889 if (allocatorSupportsMemoryTagging<Config>()) {
890 Secondary.disableMemoryTagging();
891 Primary.Options.clear(OptionBit::UseMemoryTagging);
892 }
893 }
894
setTrackAllocationStacks(bool Track)895 void setTrackAllocationStacks(bool Track) {
896 initThreadMaybe();
897 if (getFlags()->allocation_ring_buffer_size <= 0) {
898 DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
899 return;
900 }
901 if (Track)
902 Primary.Options.set(OptionBit::TrackAllocationStacks);
903 else
904 Primary.Options.clear(OptionBit::TrackAllocationStacks);
905 }
906
setFillContents(FillContentsMode FillContents)907 void setFillContents(FillContentsMode FillContents) {
908 initThreadMaybe();
909 Primary.Options.setFillContentsMode(FillContents);
910 }
911
setAddLargeAllocationSlack(bool AddSlack)912 void setAddLargeAllocationSlack(bool AddSlack) {
913 initThreadMaybe();
914 if (AddSlack)
915 Primary.Options.set(OptionBit::AddLargeAllocationSlack);
916 else
917 Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
918 }
919
getStackDepotAddress()920 const char *getStackDepotAddress() const {
921 return reinterpret_cast<const char *>(&Depot);
922 }
923
getRegionInfoArrayAddress()924 const char *getRegionInfoArrayAddress() const {
925 return Primary.getRegionInfoArrayAddress();
926 }
927
getRegionInfoArraySize()928 static uptr getRegionInfoArraySize() {
929 return PrimaryT::getRegionInfoArraySize();
930 }
931
getRingBufferAddress()932 const char *getRingBufferAddress() {
933 initThreadMaybe();
934 return RawRingBuffer;
935 }
936
getRingBufferSize()937 uptr getRingBufferSize() {
938 initThreadMaybe();
939 return RingBufferElements ? ringBufferSizeInBytes(RingBufferElements) : 0;
940 }
941
942 static const uptr MaxTraceSize = 64;
943
collectTraceMaybe(const StackDepot * Depot,uintptr_t (& Trace)[MaxTraceSize],u32 Hash)944 static void collectTraceMaybe(const StackDepot *Depot,
945 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
946 uptr RingPos, Size;
947 if (!Depot->find(Hash, &RingPos, &Size))
948 return;
949 for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
950 Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
951 }
952
getErrorInfo(struct scudo_error_info * ErrorInfo,uintptr_t FaultAddr,const char * DepotPtr,const char * RegionInfoPtr,const char * RingBufferPtr,size_t RingBufferSize,const char * Memory,const char * MemoryTags,uintptr_t MemoryAddr,size_t MemorySize)953 static void getErrorInfo(struct scudo_error_info *ErrorInfo,
954 uintptr_t FaultAddr, const char *DepotPtr,
955 const char *RegionInfoPtr, const char *RingBufferPtr,
956 size_t RingBufferSize, const char *Memory,
957 const char *MemoryTags, uintptr_t MemoryAddr,
958 size_t MemorySize) {
959 *ErrorInfo = {};
960 if (!allocatorSupportsMemoryTagging<Config>() ||
961 MemoryAddr + MemorySize < MemoryAddr)
962 return;
963
964 auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
965 size_t NextErrorReport = 0;
966
967 // Check for OOB in the current block and the two surrounding blocks. Beyond
968 // that, UAF is more likely.
969 if (extractTag(FaultAddr) != 0)
970 getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
971 RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
972 MemorySize, 0, 2);
973
974 // Check the ring buffer. For primary allocations this will only find UAF;
975 // for secondary allocations we can find either UAF or OOB.
976 getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
977 RingBufferPtr, RingBufferSize);
978
979 // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
980 // Beyond that we are likely to hit false positives.
981 if (extractTag(FaultAddr) != 0)
982 getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
983 RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
984 MemorySize, 2, 16);
985 }
986
987 private:
988 typedef typename PrimaryT::SizeClassMap SizeClassMap;
989
990 static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
991 static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
992 static const uptr MinAlignment = 1UL << MinAlignmentLog;
993 static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
994 static const uptr MaxAllowedMallocSize =
995 FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
996
997 static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
998 "Minimal alignment must at least cover a chunk header.");
999 static_assert(!allocatorSupportsMemoryTagging<Config>() ||
1000 MinAlignment >= archMemoryTagGranuleSize(),
1001 "");
1002
1003 static const u32 BlockMarker = 0x44554353U;
1004
1005 // These are indexes into an "array" of 32-bit values that store information
1006 // inline with a chunk that is relevant to diagnosing memory tag faults, where
1007 // 0 corresponds to the address of the user memory. This means that only
1008 // negative indexes may be used. The smallest index that may be used is -2,
1009 // which corresponds to 8 bytes before the user memory, because the chunk
1010 // header size is 8 bytes and in allocators that support memory tagging the
1011 // minimum alignment is at least the tag granule size (16 on aarch64).
1012 static const sptr MemTagAllocationTraceIndex = -2;
1013 static const sptr MemTagAllocationTidIndex = -1;
1014
1015 u32 Cookie = 0;
1016 u32 QuarantineMaxChunkSize = 0;
1017
1018 GlobalStats Stats;
1019 PrimaryT Primary;
1020 SecondaryT Secondary;
1021 QuarantineT Quarantine;
1022 TSDRegistryT TSDRegistry;
1023 pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
1024
1025 #ifdef GWP_ASAN_HOOKS
1026 gwp_asan::GuardedPoolAllocator GuardedAlloc;
1027 uptr GuardedAllocSlotSize = 0;
1028 #endif // GWP_ASAN_HOOKS
1029
1030 StackDepot Depot;
1031
1032 struct AllocationRingBuffer {
1033 struct Entry {
1034 atomic_uptr Ptr;
1035 atomic_uptr AllocationSize;
1036 atomic_u32 AllocationTrace;
1037 atomic_u32 AllocationTid;
1038 atomic_u32 DeallocationTrace;
1039 atomic_u32 DeallocationTid;
1040 };
1041
1042 atomic_uptr Pos;
1043 // An array of Size (at least one) elements of type Entry is immediately
1044 // following to this struct.
1045 };
1046 // Pointer to memory mapped area starting with AllocationRingBuffer struct,
1047 // and immediately followed by Size elements of type Entry.
1048 char *RawRingBuffer = {};
1049 u32 RingBufferElements = 0;
1050 MemMapT RawRingBufferMap;
1051
1052 // The following might get optimized out by the compiler.
performSanityChecks()1053 NOINLINE void performSanityChecks() {
1054 // Verify that the header offset field can hold the maximum offset. In the
1055 // case of the Secondary allocator, it takes care of alignment and the
1056 // offset will always be small. In the case of the Primary, the worst case
1057 // scenario happens in the last size class, when the backend allocation
1058 // would already be aligned on the requested alignment, which would happen
1059 // to be the maximum alignment that would fit in that size class. As a
1060 // result, the maximum offset will be at most the maximum alignment for the
1061 // last size class minus the header size, in multiples of MinAlignment.
1062 Chunk::UnpackedHeader Header = {};
1063 const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1064 SizeClassMap::MaxSize - MinAlignment);
1065 const uptr MaxOffset =
1066 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1067 Header.Offset = MaxOffset & Chunk::OffsetMask;
1068 if (UNLIKELY(Header.Offset != MaxOffset))
1069 reportSanityCheckError("offset");
1070
1071 // Verify that we can fit the maximum size or amount of unused bytes in the
1072 // header. Given that the Secondary fits the allocation to a page, the worst
1073 // case scenario happens in the Primary. It will depend on the second to
1074 // last and last class sizes, as well as the dynamic base for the Primary.
1075 // The following is an over-approximation that works for our needs.
1076 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1077 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1078 if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1079 reportSanityCheckError("size (or unused bytes)");
1080
1081 const uptr LargestClassId = SizeClassMap::LargestClassId;
1082 Header.ClassId = LargestClassId;
1083 if (UNLIKELY(Header.ClassId != LargestClassId))
1084 reportSanityCheckError("class ID");
1085 }
1086
getBlockBegin(const void * Ptr,Chunk::UnpackedHeader * Header)1087 static inline void *getBlockBegin(const void *Ptr,
1088 Chunk::UnpackedHeader *Header) {
1089 return reinterpret_cast<void *>(
1090 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1091 (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1092 }
1093
1094 // Return the size of a chunk as requested during its allocation.
getSize(const void * Ptr,Chunk::UnpackedHeader * Header)1095 inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1096 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1097 if (LIKELY(Header->ClassId))
1098 return SizeOrUnusedBytes;
1099 if (allocatorSupportsMemoryTagging<Config>())
1100 Ptr = untagPointer(const_cast<void *>(Ptr));
1101 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1102 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1103 }
1104
quarantineOrDeallocateChunk(const Options & Options,void * TaggedPtr,Chunk::UnpackedHeader * Header,uptr Size)1105 void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
1106 Chunk::UnpackedHeader *Header,
1107 uptr Size) NO_THREAD_SAFETY_ANALYSIS {
1108 void *Ptr = getHeaderTaggedPointer(TaggedPtr);
1109 // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1110 // than the maximum allowed, we return a chunk directly to the backend.
1111 // This purposefully underflows for Size == 0.
1112 const bool BypassQuarantine = !Quarantine.getCacheSize() ||
1113 ((Size - 1) >= QuarantineMaxChunkSize) ||
1114 !Header->ClassId;
1115 if (BypassQuarantine)
1116 Header->State = Chunk::State::Available;
1117 else
1118 Header->State = Chunk::State::Quarantined;
1119 Header->OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
1120 Header->ClassId &&
1121 !TSDRegistry.getDisableMemInit();
1122 Chunk::storeHeader(Cookie, Ptr, Header);
1123
1124 if (UNLIKELY(useMemoryTagging<Config>(Options))) {
1125 u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
1126 storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1127 if (Header->ClassId) {
1128 if (!TSDRegistry.getDisableMemInit()) {
1129 uptr TaggedBegin, TaggedEnd;
1130 const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1131 Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
1132 Header->ClassId);
1133 // Exclude the previous tag so that immediate use after free is
1134 // detected 100% of the time.
1135 setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
1136 &TaggedEnd);
1137 }
1138 }
1139 }
1140 if (BypassQuarantine) {
1141 if (allocatorSupportsMemoryTagging<Config>())
1142 Ptr = untagPointer(Ptr);
1143 void *BlockBegin = getBlockBegin(Ptr, Header);
1144 const uptr ClassId = Header->ClassId;
1145 if (LIKELY(ClassId)) {
1146 bool UnlockRequired;
1147 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1148 TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
1149 const bool CacheDrained =
1150 TSD->getCache().deallocate(ClassId, BlockBegin);
1151 if (UnlockRequired)
1152 TSD->unlock();
1153 // When we have drained some blocks back to the Primary from TSD, that
1154 // implies that we may have the chance to release some pages as well.
1155 // Note that in order not to block other thread's accessing the TSD,
1156 // release the TSD first then try the page release.
1157 if (CacheDrained)
1158 Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
1159 } else {
1160 if (UNLIKELY(useMemoryTagging<Config>(Options)))
1161 storeTags(reinterpret_cast<uptr>(BlockBegin),
1162 reinterpret_cast<uptr>(Ptr));
1163 Secondary.deallocate(Options, BlockBegin);
1164 }
1165 } else {
1166 bool UnlockRequired;
1167 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1168 TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
1169 Quarantine.put(&TSD->getQuarantineCache(),
1170 QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
1171 if (UnlockRequired)
1172 TSD->unlock();
1173 }
1174 }
1175
getChunkFromBlock(uptr Block,uptr * Chunk,Chunk::UnpackedHeader * Header)1176 bool getChunkFromBlock(uptr Block, uptr *Chunk,
1177 Chunk::UnpackedHeader *Header) {
1178 *Chunk =
1179 Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
1180 return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
1181 }
1182
getChunkOffsetFromBlock(const char * Block)1183 static uptr getChunkOffsetFromBlock(const char *Block) {
1184 u32 Offset = 0;
1185 if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
1186 Offset = reinterpret_cast<const u32 *>(Block)[1];
1187 return Offset + Chunk::getHeaderSize();
1188 }
1189
1190 // Set the tag of the granule past the end of the allocation to 0, to catch
1191 // linear overflows even if a previous larger allocation used the same block
1192 // and tag. Only do this if the granule past the end is in our block, because
1193 // this would otherwise lead to a SEGV if the allocation covers the entire
1194 // block and our block is at the end of a mapping. The tag of the next block's
1195 // header granule will be set to 0, so it will serve the purpose of catching
1196 // linear overflows in this case.
1197 //
1198 // For allocations of size 0 we do not end up storing the address tag to the
1199 // memory tag space, which getInlineErrorInfo() normally relies on to match
1200 // address tags against chunks. To allow matching in this case we store the
1201 // address tag in the first byte of the chunk.
storeEndMarker(uptr End,uptr Size,uptr BlockEnd)1202 void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1203 DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1204 uptr UntaggedEnd = untagPointer(End);
1205 if (UntaggedEnd != BlockEnd) {
1206 storeTag(UntaggedEnd);
1207 if (Size == 0)
1208 *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
1209 }
1210 }
1211
prepareTaggedChunk(void * Ptr,uptr Size,uptr ExcludeMask,uptr BlockEnd)1212 void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
1213 uptr BlockEnd) {
1214 // Prepare the granule before the chunk to store the chunk header by setting
1215 // its tag to 0. Normally its tag will already be 0, but in the case where a
1216 // chunk holding a low alignment allocation is reused for a higher alignment
1217 // allocation, the chunk may already have a non-zero tag from the previous
1218 // allocation.
1219 storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1220
1221 uptr TaggedBegin, TaggedEnd;
1222 setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
1223
1224 storeEndMarker(TaggedEnd, Size, BlockEnd);
1225 return reinterpret_cast<void *>(TaggedBegin);
1226 }
1227
resizeTaggedChunk(uptr OldPtr,uptr NewPtr,uptr NewSize,uptr BlockEnd)1228 void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1229 uptr BlockEnd) {
1230 uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
1231 uptr RoundNewPtr;
1232 if (RoundOldPtr >= NewPtr) {
1233 // If the allocation is shrinking we just need to set the tag past the end
1234 // of the allocation to 0. See explanation in storeEndMarker() above.
1235 RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
1236 } else {
1237 // Set the memory tag of the region
1238 // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
1239 // to the pointer tag stored in OldPtr.
1240 RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
1241 }
1242 storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
1243 }
1244
storePrimaryAllocationStackMaybe(const Options & Options,void * Ptr)1245 void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
1246 if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1247 return;
1248 auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1249 Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
1250 Ptr32[MemTagAllocationTidIndex] = getThreadID();
1251 }
1252
storeRingBufferEntry(void * Ptr,u32 AllocationTrace,u32 AllocationTid,uptr AllocationSize,u32 DeallocationTrace,u32 DeallocationTid)1253 void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
1254 uptr AllocationSize, u32 DeallocationTrace,
1255 u32 DeallocationTid) {
1256 uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
1257 typename AllocationRingBuffer::Entry *Entry =
1258 getRingBufferEntry(RawRingBuffer, Pos % RingBufferElements);
1259
1260 // First invalidate our entry so that we don't attempt to interpret a
1261 // partially written state in getSecondaryErrorInfo(). The fences below
1262 // ensure that the compiler does not move the stores to Ptr in between the
1263 // stores to the other fields.
1264 atomic_store_relaxed(&Entry->Ptr, 0);
1265
1266 __atomic_signal_fence(__ATOMIC_SEQ_CST);
1267 atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1268 atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1269 atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1270 atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1271 atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1272 __atomic_signal_fence(__ATOMIC_SEQ_CST);
1273
1274 atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1275 }
1276
storeSecondaryAllocationStackMaybe(const Options & Options,void * Ptr,uptr Size)1277 void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
1278 uptr Size) {
1279 if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1280 return;
1281
1282 u32 Trace = collectStackTrace();
1283 u32 Tid = getThreadID();
1284
1285 auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1286 Ptr32[MemTagAllocationTraceIndex] = Trace;
1287 Ptr32[MemTagAllocationTidIndex] = Tid;
1288
1289 storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
1290 }
1291
storeDeallocationStackMaybe(const Options & Options,void * Ptr,u8 PrevTag,uptr Size)1292 void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
1293 u8 PrevTag, uptr Size) {
1294 if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1295 return;
1296
1297 auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1298 u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1299 u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1300
1301 u32 DeallocationTrace = collectStackTrace();
1302 u32 DeallocationTid = getThreadID();
1303
1304 storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
1305 AllocationTrace, AllocationTid, Size,
1306 DeallocationTrace, DeallocationTid);
1307 }
1308
1309 static const size_t NumErrorReports =
1310 sizeof(((scudo_error_info *)nullptr)->reports) /
1311 sizeof(((scudo_error_info *)nullptr)->reports[0]);
1312
getInlineErrorInfo(struct scudo_error_info * ErrorInfo,size_t & NextErrorReport,uintptr_t FaultAddr,const StackDepot * Depot,const char * RegionInfoPtr,const char * Memory,const char * MemoryTags,uintptr_t MemoryAddr,size_t MemorySize,size_t MinDistance,size_t MaxDistance)1313 static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
1314 size_t &NextErrorReport, uintptr_t FaultAddr,
1315 const StackDepot *Depot,
1316 const char *RegionInfoPtr, const char *Memory,
1317 const char *MemoryTags, uintptr_t MemoryAddr,
1318 size_t MemorySize, size_t MinDistance,
1319 size_t MaxDistance) {
1320 uptr UntaggedFaultAddr = untagPointer(FaultAddr);
1321 u8 FaultAddrTag = extractTag(FaultAddr);
1322 BlockInfo Info =
1323 PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
1324
1325 auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1326 if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
1327 Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
1328 return false;
1329 *Data = &Memory[Addr - MemoryAddr];
1330 *Tag = static_cast<u8>(
1331 MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1332 return true;
1333 };
1334
1335 auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
1336 Chunk::UnpackedHeader *Header, const u32 **Data,
1337 u8 *Tag) {
1338 const char *BlockBegin;
1339 u8 BlockBeginTag;
1340 if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
1341 return false;
1342 uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
1343 *ChunkAddr = Addr + ChunkOffset;
1344
1345 const char *ChunkBegin;
1346 if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
1347 return false;
1348 *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
1349 ChunkBegin - Chunk::getHeaderSize());
1350 *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1351
1352 // Allocations of size 0 will have stashed the tag in the first byte of
1353 // the chunk, see storeEndMarker().
1354 if (Header->SizeOrUnusedBytes == 0)
1355 *Tag = static_cast<u8>(*ChunkBegin);
1356
1357 return true;
1358 };
1359
1360 if (NextErrorReport == NumErrorReports)
1361 return;
1362
1363 auto CheckOOB = [&](uptr BlockAddr) {
1364 if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
1365 return false;
1366
1367 uptr ChunkAddr;
1368 Chunk::UnpackedHeader Header;
1369 const u32 *Data;
1370 uint8_t Tag;
1371 if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
1372 Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
1373 return false;
1374
1375 auto *R = &ErrorInfo->reports[NextErrorReport++];
1376 R->error_type =
1377 UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1378 R->allocation_address = ChunkAddr;
1379 R->allocation_size = Header.SizeOrUnusedBytes;
1380 collectTraceMaybe(Depot, R->allocation_trace,
1381 Data[MemTagAllocationTraceIndex]);
1382 R->allocation_tid = Data[MemTagAllocationTidIndex];
1383 return NextErrorReport == NumErrorReports;
1384 };
1385
1386 if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
1387 return;
1388
1389 for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
1390 if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
1391 CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1392 return;
1393 }
1394
getRingBufferErrorInfo(struct scudo_error_info * ErrorInfo,size_t & NextErrorReport,uintptr_t FaultAddr,const StackDepot * Depot,const char * RingBufferPtr,size_t RingBufferSize)1395 static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
1396 size_t &NextErrorReport,
1397 uintptr_t FaultAddr,
1398 const StackDepot *Depot,
1399 const char *RingBufferPtr,
1400 size_t RingBufferSize) {
1401 auto *RingBuffer =
1402 reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1403 size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
1404 if (!RingBuffer || RingBufferElements == 0)
1405 return;
1406 uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1407
1408 for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
1409 NextErrorReport != NumErrorReports;
1410 --I) {
1411 auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBufferElements);
1412 uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1413 if (!EntryPtr)
1414 continue;
1415
1416 uptr UntaggedEntryPtr = untagPointer(EntryPtr);
1417 uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1418 u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1419 u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1420 u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1421 u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1422
1423 if (DeallocationTid) {
1424 // For UAF we only consider in-bounds fault addresses because
1425 // out-of-bounds UAF is rare and attempting to detect it is very likely
1426 // to result in false positives.
1427 if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
1428 continue;
1429 } else {
1430 // Ring buffer OOB is only possible with secondary allocations. In this
1431 // case we are guaranteed a guard region of at least a page on either
1432 // side of the allocation (guard page on the right, guard page + tagged
1433 // region on the left), so ignore any faults outside of that range.
1434 if (FaultAddr < EntryPtr - getPageSizeCached() ||
1435 FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
1436 continue;
1437
1438 // For UAF the ring buffer will contain two entries, one for the
1439 // allocation and another for the deallocation. Don't report buffer
1440 // overflow/underflow using the allocation entry if we have already
1441 // collected a report from the deallocation entry.
1442 bool Found = false;
1443 for (uptr J = 0; J != NextErrorReport; ++J) {
1444 if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1445 Found = true;
1446 break;
1447 }
1448 }
1449 if (Found)
1450 continue;
1451 }
1452
1453 auto *R = &ErrorInfo->reports[NextErrorReport++];
1454 if (DeallocationTid)
1455 R->error_type = USE_AFTER_FREE;
1456 else if (FaultAddr < EntryPtr)
1457 R->error_type = BUFFER_UNDERFLOW;
1458 else
1459 R->error_type = BUFFER_OVERFLOW;
1460
1461 R->allocation_address = UntaggedEntryPtr;
1462 R->allocation_size = EntrySize;
1463 collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1464 R->allocation_tid = AllocationTid;
1465 collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1466 R->deallocation_tid = DeallocationTid;
1467 }
1468 }
1469
getStats(ScopedString * Str)1470 uptr getStats(ScopedString *Str) {
1471 Primary.getStats(Str);
1472 Secondary.getStats(Str);
1473 Quarantine.getStats(Str);
1474 TSDRegistry.getStats(Str);
1475 return Str->length();
1476 }
1477
1478 static typename AllocationRingBuffer::Entry *
getRingBufferEntry(char * RawRingBuffer,uptr N)1479 getRingBufferEntry(char *RawRingBuffer, uptr N) {
1480 return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
1481 &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1482 }
1483 static const typename AllocationRingBuffer::Entry *
getRingBufferEntry(const char * RawRingBuffer,uptr N)1484 getRingBufferEntry(const char *RawRingBuffer, uptr N) {
1485 return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
1486 &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1487 }
1488
mapAndInitializeRingBuffer()1489 void mapAndInitializeRingBuffer() {
1490 if (getFlags()->allocation_ring_buffer_size <= 0)
1491 return;
1492 u32 AllocationRingBufferSize =
1493 static_cast<u32>(getFlags()->allocation_ring_buffer_size);
1494 MemMapT MemMap;
1495 MemMap.map(
1496 /*Addr=*/0U,
1497 roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
1498 getPageSizeCached()),
1499 "scudo:ring_buffer");
1500 RawRingBuffer = reinterpret_cast<char *>(MemMap.getBase());
1501 RawRingBufferMap = MemMap;
1502 RingBufferElements = AllocationRingBufferSize;
1503 static_assert(sizeof(AllocationRingBuffer) %
1504 alignof(typename AllocationRingBuffer::Entry) ==
1505 0,
1506 "invalid alignment");
1507 }
1508
unmapRingBuffer()1509 void unmapRingBuffer() {
1510 auto *RingBuffer = getRingBuffer();
1511 if (RingBuffer != nullptr) {
1512 RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
1513 RawRingBufferMap.getCapacity());
1514 }
1515 RawRingBuffer = nullptr;
1516 }
1517
ringBufferSizeInBytes(u32 RingBufferElements)1518 static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
1519 return sizeof(AllocationRingBuffer) +
1520 RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
1521 }
1522
ringBufferElementsFromBytes(size_t Bytes)1523 static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
1524 if (Bytes < sizeof(AllocationRingBuffer)) {
1525 return 0;
1526 }
1527 return (Bytes - sizeof(AllocationRingBuffer)) /
1528 sizeof(typename AllocationRingBuffer::Entry);
1529 }
1530
getRingBuffer()1531 inline AllocationRingBuffer *getRingBuffer() {
1532 return reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
1533 }
1534 };
1535
1536 } // namespace scudo
1537
1538 #endif // SCUDO_COMBINED_H_
1539