13cab2bb3Spatrick //===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick 
9d89ec533Spatrick #include "memtag.h"
103cab2bb3Spatrick #include "tests/scudo_unit_test.h"
113cab2bb3Spatrick 
123cab2bb3Spatrick #include "allocator_config.h"
13*810390e3Srobert #include "chunk.h"
143cab2bb3Spatrick #include "combined.h"
153cab2bb3Spatrick 
163cab2bb3Spatrick #include <condition_variable>
17d89ec533Spatrick #include <memory>
183cab2bb3Spatrick #include <mutex>
19d89ec533Spatrick #include <set>
20d89ec533Spatrick #include <stdlib.h>
213cab2bb3Spatrick #include <thread>
223cab2bb3Spatrick #include <vector>
233cab2bb3Spatrick 
243cab2bb3Spatrick static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
25d89ec533Spatrick static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
263cab2bb3Spatrick 
27d89ec533Spatrick // Fuchsia complains that the function is not used.
disableDebuggerdMaybe()28d89ec533Spatrick UNUSED static void disableDebuggerdMaybe() {
291f9cb04fSpatrick #if SCUDO_ANDROID
301f9cb04fSpatrick   // Disable the debuggerd signal handler on Android, without this we can end
311f9cb04fSpatrick   // up spending a significant amount of time creating tombstones.
321f9cb04fSpatrick   signal(SIGSEGV, SIG_DFL);
331f9cb04fSpatrick #endif
341f9cb04fSpatrick }
351f9cb04fSpatrick 
361f9cb04fSpatrick template <class AllocatorT>
isPrimaryAllocation(scudo::uptr Size,scudo::uptr Alignment)37d89ec533Spatrick bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
381f9cb04fSpatrick   const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
391f9cb04fSpatrick   if (Alignment < MinAlignment)
401f9cb04fSpatrick     Alignment = MinAlignment;
411f9cb04fSpatrick   const scudo::uptr NeededSize =
421f9cb04fSpatrick       scudo::roundUpTo(Size, MinAlignment) +
431f9cb04fSpatrick       ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
441f9cb04fSpatrick   return AllocatorT::PrimaryT::canAllocate(NeededSize);
451f9cb04fSpatrick }
461f9cb04fSpatrick 
471f9cb04fSpatrick template <class AllocatorT>
checkMemoryTaggingMaybe(AllocatorT * Allocator,void * P,scudo::uptr Size,scudo::uptr Alignment)481f9cb04fSpatrick void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
491f9cb04fSpatrick                              scudo::uptr Alignment) {
50d89ec533Spatrick   const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
51d89ec533Spatrick   Size = scudo::roundUpTo(Size, MinAlignment);
52d89ec533Spatrick   if (Allocator->useMemoryTaggingTestOnly())
531f9cb04fSpatrick     EXPECT_DEATH(
541f9cb04fSpatrick         {
551f9cb04fSpatrick           disableDebuggerdMaybe();
561f9cb04fSpatrick           reinterpret_cast<char *>(P)[-1] = 0xaa;
571f9cb04fSpatrick         },
581f9cb04fSpatrick         "");
59d89ec533Spatrick   if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
60d89ec533Spatrick           ? Allocator->useMemoryTaggingTestOnly()
61d89ec533Spatrick           : Alignment == MinAlignment) {
621f9cb04fSpatrick     EXPECT_DEATH(
631f9cb04fSpatrick         {
641f9cb04fSpatrick           disableDebuggerdMaybe();
651f9cb04fSpatrick           reinterpret_cast<char *>(P)[Size] = 0xaa;
661f9cb04fSpatrick         },
671f9cb04fSpatrick         "");
681f9cb04fSpatrick   }
69d89ec533Spatrick }
701f9cb04fSpatrick 
71d89ec533Spatrick template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
TestAllocatorTestAllocator72d89ec533Spatrick   TestAllocator() {
73d89ec533Spatrick     this->initThreadMaybe();
74d89ec533Spatrick     if (scudo::archSupportsMemoryTagging() &&
75d89ec533Spatrick         !scudo::systemDetectsMemoryTagFaultsTestOnly())
76d89ec533Spatrick       this->disableMemoryTagging();
77d89ec533Spatrick   }
~TestAllocatorTestAllocator78d89ec533Spatrick   ~TestAllocator() { this->unmapTestOnly(); }
79d89ec533Spatrick 
operator newTestAllocator80d89ec533Spatrick   void *operator new(size_t size) {
81d89ec533Spatrick     void *p = nullptr;
82d89ec533Spatrick     EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
83d89ec533Spatrick     return p;
84d89ec533Spatrick   }
85d89ec533Spatrick 
operator deleteTestAllocator86d89ec533Spatrick   void operator delete(void *ptr) { free(ptr); }
873cab2bb3Spatrick };
883cab2bb3Spatrick 
89d89ec533Spatrick template <class TypeParam> struct ScudoCombinedTest : public Test {
ScudoCombinedTestScudoCombinedTest90d89ec533Spatrick   ScudoCombinedTest() {
91d89ec533Spatrick     UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
92d89ec533Spatrick     Allocator = std::make_unique<AllocatorT>();
93d89ec533Spatrick   }
~ScudoCombinedTestScudoCombinedTest94d89ec533Spatrick   ~ScudoCombinedTest() {
95d89ec533Spatrick     Allocator->releaseToOS();
96d89ec533Spatrick     UseQuarantine = true;
97d89ec533Spatrick   }
983cab2bb3Spatrick 
99d89ec533Spatrick   void RunTest();
100d89ec533Spatrick 
101d89ec533Spatrick   void BasicTest(scudo::uptr SizeLog);
102d89ec533Spatrick 
103d89ec533Spatrick   using AllocatorT = TestAllocator<TypeParam>;
104d89ec533Spatrick   std::unique_ptr<AllocatorT> Allocator;
105d89ec533Spatrick };
106d89ec533Spatrick 
107d89ec533Spatrick template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
108d89ec533Spatrick 
109d89ec533Spatrick #if SCUDO_FUCHSIA
110d89ec533Spatrick #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
111d89ec533Spatrick   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig)                    \
112d89ec533Spatrick   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
113d89ec533Spatrick #else
114d89ec533Spatrick #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
115d89ec533Spatrick   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig)                    \
116d89ec533Spatrick   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig)                          \
117d89ec533Spatrick   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)
118d89ec533Spatrick #endif
119d89ec533Spatrick 
120d89ec533Spatrick #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE)                             \
121d89ec533Spatrick   using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>;                   \
122*810390e3Srobert   TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
123d89ec533Spatrick 
124d89ec533Spatrick #define SCUDO_TYPED_TEST(FIXTURE, NAME)                                        \
125d89ec533Spatrick   template <class TypeParam>                                                   \
126d89ec533Spatrick   struct FIXTURE##NAME : public FIXTURE<TypeParam> {                           \
127d89ec533Spatrick     void Run();                                                                \
128d89ec533Spatrick   };                                                                           \
129d89ec533Spatrick   SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                                    \
130d89ec533Spatrick   template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
131d89ec533Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,IsOwned)132d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
133d89ec533Spatrick   auto *Allocator = this->Allocator.get();
134d89ec533Spatrick   static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
135d89ec533Spatrick   EXPECT_FALSE(
136d89ec533Spatrick       Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
137d89ec533Spatrick 
138d89ec533Spatrick   scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
139d89ec533Spatrick   for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
140d89ec533Spatrick     StackBuffer[I] = 0x42U;
141d89ec533Spatrick   EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
142d89ec533Spatrick   for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
143d89ec533Spatrick     EXPECT_EQ(StackBuffer[I], 0x42U);
144d89ec533Spatrick }
145d89ec533Spatrick 
146d89ec533Spatrick template <class Config>
BasicTest(scudo::uptr SizeLog)147d89ec533Spatrick void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
148d89ec533Spatrick   auto *Allocator = this->Allocator.get();
1493cab2bb3Spatrick 
1503cab2bb3Spatrick   // This allocates and deallocates a bunch of chunks, with a wide range of
1513cab2bb3Spatrick   // sizes and alignments, with a focus on sizes that could trigger weird
1523cab2bb3Spatrick   // behaviors (plus or minus a small delta of a power of two for example).
1533cab2bb3Spatrick   for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
1543cab2bb3Spatrick     const scudo::uptr Align = 1U << AlignLog;
1553cab2bb3Spatrick     for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
156*810390e3Srobert       if (static_cast<scudo::sptr>(1U << SizeLog) + Delta < 0)
1573cab2bb3Spatrick         continue;
1583cab2bb3Spatrick       const scudo::uptr Size = (1U << SizeLog) + Delta;
1593cab2bb3Spatrick       void *P = Allocator->allocate(Size, Origin, Align);
1603cab2bb3Spatrick       EXPECT_NE(P, nullptr);
1613cab2bb3Spatrick       EXPECT_TRUE(Allocator->isOwned(P));
1623cab2bb3Spatrick       EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
1633cab2bb3Spatrick       EXPECT_LE(Size, Allocator->getUsableSize(P));
1643cab2bb3Spatrick       memset(P, 0xaa, Size);
165d89ec533Spatrick       checkMemoryTaggingMaybe(Allocator, P, Size, Align);
1663cab2bb3Spatrick       Allocator->deallocate(P, Origin, Size);
1673cab2bb3Spatrick     }
1683cab2bb3Spatrick   }
1693cab2bb3Spatrick }
170d89ec533Spatrick 
171d89ec533Spatrick #define SCUDO_MAKE_BASIC_TEST(SizeLog)                                         \
172d89ec533Spatrick   SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) {           \
173d89ec533Spatrick     this->BasicTest(SizeLog);                                                  \
174d89ec533Spatrick   }
175d89ec533Spatrick 
176d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(0)
177d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(1)
178d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(2)
179d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(3)
180d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(4)
181d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(5)
182d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(6)
183d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(7)
184d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(8)
185d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(9)
186d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(10)
187d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(11)
188d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(12)
189d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(13)
190d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(14)
191d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(15)
192d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(16)
193d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(17)
194d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(18)
195d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(19)
196d89ec533Spatrick SCUDO_MAKE_BASIC_TEST(20)
197d89ec533Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroContents)198d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
199d89ec533Spatrick   auto *Allocator = this->Allocator.get();
2003cab2bb3Spatrick 
2013cab2bb3Spatrick   // Ensure that specifying ZeroContents returns a zero'd out block.
2023cab2bb3Spatrick   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
2033cab2bb3Spatrick     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
2043cab2bb3Spatrick       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
2053cab2bb3Spatrick       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
2063cab2bb3Spatrick       EXPECT_NE(P, nullptr);
2073cab2bb3Spatrick       for (scudo::uptr I = 0; I < Size; I++)
2081f9cb04fSpatrick         ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
2091f9cb04fSpatrick       memset(P, 0xaa, Size);
2101f9cb04fSpatrick       Allocator->deallocate(P, Origin, Size);
2111f9cb04fSpatrick     }
2121f9cb04fSpatrick   }
213d89ec533Spatrick }
2141f9cb04fSpatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroFill)215d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
216d89ec533Spatrick   auto *Allocator = this->Allocator.get();
217d89ec533Spatrick 
218d89ec533Spatrick   // Ensure that specifying ZeroFill returns a zero'd out block.
2191f9cb04fSpatrick   Allocator->setFillContents(scudo::ZeroFill);
2201f9cb04fSpatrick   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
2211f9cb04fSpatrick     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
2221f9cb04fSpatrick       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
2231f9cb04fSpatrick       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
2241f9cb04fSpatrick       EXPECT_NE(P, nullptr);
2251f9cb04fSpatrick       for (scudo::uptr I = 0; I < Size; I++)
2261f9cb04fSpatrick         ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
2271f9cb04fSpatrick       memset(P, 0xaa, Size);
2281f9cb04fSpatrick       Allocator->deallocate(P, Origin, Size);
2291f9cb04fSpatrick     }
2301f9cb04fSpatrick   }
231d89ec533Spatrick }
2321f9cb04fSpatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,PatternOrZeroFill)233d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
234d89ec533Spatrick   auto *Allocator = this->Allocator.get();
235d89ec533Spatrick 
236d89ec533Spatrick   // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
237d89ec533Spatrick   // block. The primary allocator only produces pattern filled blocks if MTE
238d89ec533Spatrick   // is disabled, so we only require pattern filled blocks in that case.
2391f9cb04fSpatrick   Allocator->setFillContents(scudo::PatternOrZeroFill);
2401f9cb04fSpatrick   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
2411f9cb04fSpatrick     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
2421f9cb04fSpatrick       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
2431f9cb04fSpatrick       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
2441f9cb04fSpatrick       EXPECT_NE(P, nullptr);
2451f9cb04fSpatrick       for (scudo::uptr I = 0; I < Size; I++) {
2461f9cb04fSpatrick         unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
247d89ec533Spatrick         if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
248d89ec533Spatrick                                                           1U << MinAlignLog) &&
249d89ec533Spatrick             !Allocator->useMemoryTaggingTestOnly())
2501f9cb04fSpatrick           ASSERT_EQ(V, scudo::PatternFillByte);
2511f9cb04fSpatrick         else
2521f9cb04fSpatrick           ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
2531f9cb04fSpatrick       }
2543cab2bb3Spatrick       memset(P, 0xaa, Size);
2553cab2bb3Spatrick       Allocator->deallocate(P, Origin, Size);
2563cab2bb3Spatrick     }
2573cab2bb3Spatrick   }
258d89ec533Spatrick }
259d89ec533Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,BlockReuse)260d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
261d89ec533Spatrick   auto *Allocator = this->Allocator.get();
2623cab2bb3Spatrick 
2633cab2bb3Spatrick   // Verify that a chunk will end up being reused, at some point.
2643cab2bb3Spatrick   const scudo::uptr NeedleSize = 1024U;
2653cab2bb3Spatrick   void *NeedleP = Allocator->allocate(NeedleSize, Origin);
2663cab2bb3Spatrick   Allocator->deallocate(NeedleP, Origin);
2673cab2bb3Spatrick   bool Found = false;
2683cab2bb3Spatrick   for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
2693cab2bb3Spatrick     void *P = Allocator->allocate(NeedleSize, Origin);
270d89ec533Spatrick     if (Allocator->getHeaderTaggedPointer(P) ==
271d89ec533Spatrick         Allocator->getHeaderTaggedPointer(NeedleP))
2723cab2bb3Spatrick       Found = true;
2733cab2bb3Spatrick     Allocator->deallocate(P, Origin);
2743cab2bb3Spatrick   }
2753cab2bb3Spatrick   EXPECT_TRUE(Found);
276d89ec533Spatrick }
2773cab2bb3Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeIncreasing)278d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
279d89ec533Spatrick   auto *Allocator = this->Allocator.get();
280d89ec533Spatrick 
281d89ec533Spatrick   // Reallocate a chunk all the way up to a secondary allocation, verifying that
282d89ec533Spatrick   // we preserve the data in the process.
283d89ec533Spatrick   scudo::uptr Size = 16;
284d89ec533Spatrick   void *P = Allocator->allocate(Size, Origin);
285d89ec533Spatrick   const char Marker = 0xab;
286d89ec533Spatrick   memset(P, Marker, Size);
287d89ec533Spatrick   while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
288d89ec533Spatrick     void *NewP = Allocator->reallocate(P, Size * 2);
289d89ec533Spatrick     EXPECT_NE(NewP, nullptr);
290d89ec533Spatrick     for (scudo::uptr J = 0; J < Size; J++)
291d89ec533Spatrick       EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
292d89ec533Spatrick     memset(reinterpret_cast<char *>(NewP) + Size, Marker, Size);
293d89ec533Spatrick     Size *= 2U;
294d89ec533Spatrick     P = NewP;
295d89ec533Spatrick   }
296d89ec533Spatrick   Allocator->deallocate(P, Origin);
297d89ec533Spatrick }
298d89ec533Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeDecreasing)299d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
300d89ec533Spatrick   auto *Allocator = this->Allocator.get();
3013cab2bb3Spatrick 
3023cab2bb3Spatrick   // Reallocate a large chunk all the way down to a byte, verifying that we
3033cab2bb3Spatrick   // preserve the data in the process.
304d89ec533Spatrick   scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
3053cab2bb3Spatrick   const scudo::uptr DataSize = 2048U;
3063cab2bb3Spatrick   void *P = Allocator->allocate(Size, Origin);
3073cab2bb3Spatrick   const char Marker = 0xab;
3083cab2bb3Spatrick   memset(P, Marker, scudo::Min(Size, DataSize));
3093cab2bb3Spatrick   while (Size > 1U) {
3103cab2bb3Spatrick     Size /= 2U;
3113cab2bb3Spatrick     void *NewP = Allocator->reallocate(P, Size);
3123cab2bb3Spatrick     EXPECT_NE(NewP, nullptr);
3133cab2bb3Spatrick     for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
3143cab2bb3Spatrick       EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
3153cab2bb3Spatrick     P = NewP;
3163cab2bb3Spatrick   }
3173cab2bb3Spatrick   Allocator->deallocate(P, Origin);
318d89ec533Spatrick }
319d89ec533Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,ReallocateSame)320d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
321d89ec533Spatrick   auto *Allocator = this->Allocator.get();
3223cab2bb3Spatrick 
3233cab2bb3Spatrick   // Check that reallocating a chunk to a slightly smaller or larger size
3243cab2bb3Spatrick   // returns the same chunk. This requires that all the sizes we iterate on use
3251f9cb04fSpatrick   // the same block size, but that should be the case for MaxSize - 64 with our
3261f9cb04fSpatrick   // default class size maps.
327d89ec533Spatrick   constexpr scudo::uptr ReallocSize =
328d89ec533Spatrick       TypeParam::Primary::SizeClassMap::MaxSize - 64;
329d89ec533Spatrick   void *P = Allocator->allocate(ReallocSize, Origin);
330d89ec533Spatrick   const char Marker = 0xab;
3311f9cb04fSpatrick   memset(P, Marker, ReallocSize);
3323cab2bb3Spatrick   for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
3331f9cb04fSpatrick     const scudo::uptr NewSize = ReallocSize + Delta;
3343cab2bb3Spatrick     void *NewP = Allocator->reallocate(P, NewSize);
3353cab2bb3Spatrick     EXPECT_EQ(NewP, P);
3361f9cb04fSpatrick     for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
3373cab2bb3Spatrick       EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
338d89ec533Spatrick     checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
3393cab2bb3Spatrick   }
3403cab2bb3Spatrick   Allocator->deallocate(P, Origin);
341d89ec533Spatrick }
3423cab2bb3Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,IterateOverChunks)343d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
344d89ec533Spatrick   auto *Allocator = this->Allocator.get();
3453cab2bb3Spatrick   // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
3463cab2bb3Spatrick   // they are the ones we allocated. This requires the allocator to not have any
3473cab2bb3Spatrick   // other allocated chunk at this point (eg: won't work with the Quarantine).
348d89ec533Spatrick   // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
349d89ec533Spatrick   // iterateOverChunks reads header by tagged and non-tagger pointers so one of
350d89ec533Spatrick   // them will fail.
3513cab2bb3Spatrick   if (!UseQuarantine) {
3523cab2bb3Spatrick     std::vector<void *> V;
3533cab2bb3Spatrick     for (scudo::uptr I = 0; I < 64U; I++)
354d89ec533Spatrick       V.push_back(Allocator->allocate(
355d89ec533Spatrick           rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
3563cab2bb3Spatrick     Allocator->disable();
3573cab2bb3Spatrick     Allocator->iterateOverChunks(
3583cab2bb3Spatrick         0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
3593cab2bb3Spatrick         [](uintptr_t Base, size_t Size, void *Arg) {
3603cab2bb3Spatrick           std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
3613cab2bb3Spatrick           void *P = reinterpret_cast<void *>(Base);
3623cab2bb3Spatrick           EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
3633cab2bb3Spatrick         },
3643cab2bb3Spatrick         reinterpret_cast<void *>(&V));
3653cab2bb3Spatrick     Allocator->enable();
366d89ec533Spatrick     for (auto P : V)
367d89ec533Spatrick       Allocator->deallocate(P, Origin);
3683cab2bb3Spatrick   }
3693cab2bb3Spatrick }
3703cab2bb3Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,UseAfterFree)371d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
372d89ec533Spatrick   auto *Allocator = this->Allocator.get();
3733cab2bb3Spatrick 
3741f9cb04fSpatrick   // Check that use-after-free is detected.
3751f9cb04fSpatrick   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
3761f9cb04fSpatrick     const scudo::uptr Size = 1U << SizeLog;
377d89ec533Spatrick     if (!Allocator->useMemoryTaggingTestOnly())
3781f9cb04fSpatrick       continue;
3791f9cb04fSpatrick     EXPECT_DEATH(
3801f9cb04fSpatrick         {
3811f9cb04fSpatrick           disableDebuggerdMaybe();
3821f9cb04fSpatrick           void *P = Allocator->allocate(Size, Origin);
3831f9cb04fSpatrick           Allocator->deallocate(P, Origin);
3841f9cb04fSpatrick           reinterpret_cast<char *>(P)[0] = 0xaa;
3851f9cb04fSpatrick         },
3861f9cb04fSpatrick         "");
3871f9cb04fSpatrick     EXPECT_DEATH(
3881f9cb04fSpatrick         {
3891f9cb04fSpatrick           disableDebuggerdMaybe();
3901f9cb04fSpatrick           void *P = Allocator->allocate(Size, Origin);
3911f9cb04fSpatrick           Allocator->deallocate(P, Origin);
3921f9cb04fSpatrick           reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
3931f9cb04fSpatrick         },
3941f9cb04fSpatrick         "");
3951f9cb04fSpatrick   }
396d89ec533Spatrick }
3971f9cb04fSpatrick 
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,DisableMemoryTagging)398d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
399d89ec533Spatrick   auto *Allocator = this->Allocator.get();
400d89ec533Spatrick 
401d89ec533Spatrick   if (Allocator->useMemoryTaggingTestOnly()) {
4021f9cb04fSpatrick     // Check that disabling memory tagging works correctly.
4031f9cb04fSpatrick     void *P = Allocator->allocate(2048, Origin);
4041f9cb04fSpatrick     EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
405d89ec533Spatrick     scudo::ScopedDisableMemoryTagChecks NoTagChecks;
4061f9cb04fSpatrick     Allocator->disableMemoryTagging();
4071f9cb04fSpatrick     reinterpret_cast<char *>(P)[2048] = 0xaa;
4081f9cb04fSpatrick     Allocator->deallocate(P, Origin);
4091f9cb04fSpatrick 
4101f9cb04fSpatrick     P = Allocator->allocate(2048, Origin);
411d89ec533Spatrick     EXPECT_EQ(scudo::untagPointer(P), P);
4121f9cb04fSpatrick     reinterpret_cast<char *>(P)[2048] = 0xaa;
4131f9cb04fSpatrick     Allocator->deallocate(P, Origin);
4141f9cb04fSpatrick 
4151f9cb04fSpatrick     Allocator->releaseToOS();
4161f9cb04fSpatrick   }
417d89ec533Spatrick }
418d89ec533Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,Stats)419d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
420d89ec533Spatrick   auto *Allocator = this->Allocator.get();
4211f9cb04fSpatrick 
4223cab2bb3Spatrick   scudo::uptr BufferSize = 8192;
4233cab2bb3Spatrick   std::vector<char> Buffer(BufferSize);
4243cab2bb3Spatrick   scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
4253cab2bb3Spatrick   while (ActualSize > BufferSize) {
4263cab2bb3Spatrick     BufferSize = ActualSize + 1024;
4273cab2bb3Spatrick     Buffer.resize(BufferSize);
4283cab2bb3Spatrick     ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
4293cab2bb3Spatrick   }
4303cab2bb3Spatrick   std::string Stats(Buffer.begin(), Buffer.end());
4313cab2bb3Spatrick   // Basic checks on the contents of the statistics output, which also allows us
4323cab2bb3Spatrick   // to verify that we got it all.
4333cab2bb3Spatrick   EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
4343cab2bb3Spatrick   EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
4353cab2bb3Spatrick   EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
4363cab2bb3Spatrick }
4373cab2bb3Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,CacheDrain)438d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) {
439d89ec533Spatrick   auto *Allocator = this->Allocator.get();
440d89ec533Spatrick 
441d89ec533Spatrick   std::vector<void *> V;
442d89ec533Spatrick   for (scudo::uptr I = 0; I < 64U; I++)
443d89ec533Spatrick     V.push_back(Allocator->allocate(
444d89ec533Spatrick         rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
445d89ec533Spatrick   for (auto P : V)
446d89ec533Spatrick     Allocator->deallocate(P, Origin);
447d89ec533Spatrick 
448d89ec533Spatrick   bool UnlockRequired;
449d89ec533Spatrick   auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
450d89ec533Spatrick   EXPECT_TRUE(!TSD->Cache.isEmpty());
451d89ec533Spatrick   TSD->Cache.drain();
452d89ec533Spatrick   EXPECT_TRUE(TSD->Cache.isEmpty());
453d89ec533Spatrick   if (UnlockRequired)
454d89ec533Spatrick     TSD->unlock();
4551f9cb04fSpatrick }
4561f9cb04fSpatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,ThreadedCombined)457d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
458d89ec533Spatrick   std::mutex Mutex;
459d89ec533Spatrick   std::condition_variable Cv;
460d89ec533Spatrick   bool Ready = false;
461d89ec533Spatrick   auto *Allocator = this->Allocator.get();
462d89ec533Spatrick   std::thread Threads[32];
463d89ec533Spatrick   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
464d89ec533Spatrick     Threads[I] = std::thread([&]() {
4653cab2bb3Spatrick       {
4663cab2bb3Spatrick         std::unique_lock<std::mutex> Lock(Mutex);
4673cab2bb3Spatrick         while (!Ready)
4683cab2bb3Spatrick           Cv.wait(Lock);
4693cab2bb3Spatrick       }
4703cab2bb3Spatrick       std::vector<std::pair<void *, scudo::uptr>> V;
4713cab2bb3Spatrick       for (scudo::uptr I = 0; I < 256U; I++) {
4723cab2bb3Spatrick         const scudo::uptr Size = std::rand() % 4096U;
473d89ec533Spatrick         void *P = Allocator->allocate(Size, Origin);
4743cab2bb3Spatrick         // A region could have ran out of memory, resulting in a null P.
4753cab2bb3Spatrick         if (P)
4763cab2bb3Spatrick           V.push_back(std::make_pair(P, Size));
4773cab2bb3Spatrick       }
4783cab2bb3Spatrick       while (!V.empty()) {
4793cab2bb3Spatrick         auto Pair = V.back();
480d89ec533Spatrick         Allocator->deallocate(Pair.first, Origin, Pair.second);
4813cab2bb3Spatrick         V.pop_back();
4823cab2bb3Spatrick       }
483d89ec533Spatrick     });
4843cab2bb3Spatrick   {
4853cab2bb3Spatrick     std::unique_lock<std::mutex> Lock(Mutex);
4863cab2bb3Spatrick     Ready = true;
4873cab2bb3Spatrick     Cv.notify_all();
4883cab2bb3Spatrick   }
4893cab2bb3Spatrick   for (auto &T : Threads)
4903cab2bb3Spatrick     T.join();
4913cab2bb3Spatrick   Allocator->releaseToOS();
4923cab2bb3Spatrick }
4933cab2bb3Spatrick 
494d89ec533Spatrick // Test that multiple instantiations of the allocator have not messed up the
495d89ec533Spatrick // process's signal handlers (GWP-ASan used to do this).
TEST(ScudoCombinedDeathTest,SKIP_ON_FUCHSIA (testSEGV))496d89ec533Spatrick TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
497d89ec533Spatrick   const scudo::uptr Size = 4 * scudo::getPageSizeCached();
498d89ec533Spatrick   scudo::MapPlatformData Data = {};
499d89ec533Spatrick   void *P = scudo::map(nullptr, Size, "testSEGV", MAP_NOACCESS, &Data);
500d89ec533Spatrick   EXPECT_NE(P, nullptr);
501d89ec533Spatrick   EXPECT_DEATH(memset(P, 0xaa, Size), "");
502d89ec533Spatrick   scudo::unmap(P, Size, UNMAP_ALL, &Data);
5033cab2bb3Spatrick }
5043cab2bb3Spatrick 
5051f9cb04fSpatrick struct DeathSizeClassConfig {
5061f9cb04fSpatrick   static const scudo::uptr NumBits = 1;
5071f9cb04fSpatrick   static const scudo::uptr MinSizeLog = 10;
5081f9cb04fSpatrick   static const scudo::uptr MidSizeLog = 10;
5091f9cb04fSpatrick   static const scudo::uptr MaxSizeLog = 13;
510*810390e3Srobert   static const scudo::u16 MaxNumCachedHint = 8;
5111f9cb04fSpatrick   static const scudo::uptr MaxBytesCachedLog = 12;
512d89ec533Spatrick   static const scudo::uptr SizeDelta = 0;
5131f9cb04fSpatrick };
5141f9cb04fSpatrick 
515*810390e3Srobert static const scudo::uptr DeathRegionSizeLog = 21U;
5163cab2bb3Spatrick struct DeathConfig {
517d89ec533Spatrick   static const bool MaySupportMemoryTagging = false;
518d89ec533Spatrick 
5191f9cb04fSpatrick   // Tiny allocator, its Primary only serves chunks of four sizes.
520d89ec533Spatrick   using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
521d89ec533Spatrick   typedef scudo::SizeClassAllocator64<DeathConfig> Primary;
522d89ec533Spatrick   static const scudo::uptr PrimaryRegionSizeLog = DeathRegionSizeLog;
523d89ec533Spatrick   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
524d89ec533Spatrick   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
525d89ec533Spatrick   typedef scudo::uptr PrimaryCompactPtrT;
526d89ec533Spatrick   static const scudo::uptr PrimaryCompactPtrScale = 0;
527d89ec533Spatrick   static const bool PrimaryEnableRandomOffset = true;
528d89ec533Spatrick   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
529*810390e3Srobert   static const scudo::uptr PrimaryGroupSizeLog = 18;
530d89ec533Spatrick 
531d89ec533Spatrick   typedef scudo::MapAllocatorNoCache SecondaryCache;
532d89ec533Spatrick   template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
5333cab2bb3Spatrick };
5343cab2bb3Spatrick 
TEST(ScudoCombinedDeathTest,DeathCombined)535d89ec533Spatrick TEST(ScudoCombinedDeathTest, DeathCombined) {
536d89ec533Spatrick   using AllocatorT = TestAllocator<DeathConfig>;
537d89ec533Spatrick   auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
5383cab2bb3Spatrick 
5393cab2bb3Spatrick   const scudo::uptr Size = 1000U;
5403cab2bb3Spatrick   void *P = Allocator->allocate(Size, Origin);
5413cab2bb3Spatrick   EXPECT_NE(P, nullptr);
5423cab2bb3Spatrick 
5433cab2bb3Spatrick   // Invalid sized deallocation.
5443cab2bb3Spatrick   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
5453cab2bb3Spatrick 
5463cab2bb3Spatrick   // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
5473cab2bb3Spatrick   UNUSED void *MisalignedP =
5483cab2bb3Spatrick       reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
5493cab2bb3Spatrick   EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
5503cab2bb3Spatrick   EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
5513cab2bb3Spatrick 
5523cab2bb3Spatrick   // Header corruption.
5533cab2bb3Spatrick   scudo::u64 *H =
5543cab2bb3Spatrick       reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
5553cab2bb3Spatrick   *H ^= 0x42U;
5563cab2bb3Spatrick   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
5573cab2bb3Spatrick   *H ^= 0x420042U;
5583cab2bb3Spatrick   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
5593cab2bb3Spatrick   *H ^= 0x420000U;
5603cab2bb3Spatrick 
5613cab2bb3Spatrick   // Invalid chunk state.
5623cab2bb3Spatrick   Allocator->deallocate(P, Origin, Size);
5633cab2bb3Spatrick   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
5643cab2bb3Spatrick   EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
5653cab2bb3Spatrick   EXPECT_DEATH(Allocator->getUsableSize(P), "");
5663cab2bb3Spatrick }
5673cab2bb3Spatrick 
5681f9cb04fSpatrick // Verify that when a region gets full, the allocator will still manage to
5691f9cb04fSpatrick // fulfill the allocation through a larger size class.
TEST(ScudoCombinedTest,FullRegion)5701f9cb04fSpatrick TEST(ScudoCombinedTest, FullRegion) {
571d89ec533Spatrick   using AllocatorT = TestAllocator<DeathConfig>;
572d89ec533Spatrick   auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
5731f9cb04fSpatrick 
5741f9cb04fSpatrick   std::vector<void *> V;
5751f9cb04fSpatrick   scudo::uptr FailedAllocationsCount = 0;
5761f9cb04fSpatrick   for (scudo::uptr ClassId = 1U;
577d89ec533Spatrick        ClassId <= DeathConfig::SizeClassMap::LargestClassId; ClassId++) {
5781f9cb04fSpatrick     const scudo::uptr Size =
579d89ec533Spatrick         DeathConfig::SizeClassMap::getSizeByClassId(ClassId);
5801f9cb04fSpatrick     // Allocate enough to fill all of the regions above this one.
5811f9cb04fSpatrick     const scudo::uptr MaxNumberOfChunks =
5821f9cb04fSpatrick         ((1U << DeathRegionSizeLog) / Size) *
583d89ec533Spatrick         (DeathConfig::SizeClassMap::LargestClassId - ClassId + 1);
5841f9cb04fSpatrick     void *P;
5851f9cb04fSpatrick     for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
5861f9cb04fSpatrick       P = Allocator->allocate(Size - 64U, Origin);
5871f9cb04fSpatrick       if (!P)
5881f9cb04fSpatrick         FailedAllocationsCount++;
5891f9cb04fSpatrick       else
5901f9cb04fSpatrick         V.push_back(P);
5911f9cb04fSpatrick     }
5921f9cb04fSpatrick     while (!V.empty()) {
5931f9cb04fSpatrick       Allocator->deallocate(V.back(), Origin);
5941f9cb04fSpatrick       V.pop_back();
5951f9cb04fSpatrick     }
5961f9cb04fSpatrick   }
5971f9cb04fSpatrick   EXPECT_EQ(FailedAllocationsCount, 0U);
5981f9cb04fSpatrick }
599d89ec533Spatrick 
600d89ec533Spatrick // Ensure that releaseToOS can be called prior to any other allocator
601d89ec533Spatrick // operation without issue.
SCUDO_TYPED_TEST(ScudoCombinedTest,ReleaseToOS)602d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
603d89ec533Spatrick   auto *Allocator = this->Allocator.get();
604d89ec533Spatrick   Allocator->releaseToOS();
605d89ec533Spatrick }
606d89ec533Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,OddEven)607d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
608d89ec533Spatrick   auto *Allocator = this->Allocator.get();
609d89ec533Spatrick 
610d89ec533Spatrick   if (!Allocator->useMemoryTaggingTestOnly())
611d89ec533Spatrick     return;
612d89ec533Spatrick 
613d89ec533Spatrick   auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
614d89ec533Spatrick     scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
615d89ec533Spatrick     scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
616d89ec533Spatrick     EXPECT_NE(Tag1 % 2, Tag2 % 2);
617d89ec533Spatrick   };
618d89ec533Spatrick 
619d89ec533Spatrick   using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
620d89ec533Spatrick   for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
621d89ec533Spatrick        ClassId++) {
622d89ec533Spatrick     const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
623d89ec533Spatrick 
624d89ec533Spatrick     std::set<scudo::uptr> Ptrs;
625d89ec533Spatrick     bool Found = false;
626d89ec533Spatrick     for (unsigned I = 0; I != 65536; ++I) {
627d89ec533Spatrick       scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
628d89ec533Spatrick           Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
629d89ec533Spatrick       if (Ptrs.count(P - Size)) {
630d89ec533Spatrick         Found = true;
631d89ec533Spatrick         CheckOddEven(P, P - Size);
632d89ec533Spatrick         break;
633d89ec533Spatrick       }
634d89ec533Spatrick       if (Ptrs.count(P + Size)) {
635d89ec533Spatrick         Found = true;
636d89ec533Spatrick         CheckOddEven(P, P + Size);
637d89ec533Spatrick         break;
638d89ec533Spatrick       }
639d89ec533Spatrick       Ptrs.insert(P);
640d89ec533Spatrick     }
641d89ec533Spatrick     EXPECT_TRUE(Found);
642d89ec533Spatrick   }
643d89ec533Spatrick }
644d89ec533Spatrick 
SCUDO_TYPED_TEST(ScudoCombinedTest,DisableMemInit)645d89ec533Spatrick SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
646d89ec533Spatrick   auto *Allocator = this->Allocator.get();
647d89ec533Spatrick 
648*810390e3Srobert   std::vector<void *> Ptrs(65536);
649d89ec533Spatrick 
650d89ec533Spatrick   Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
651d89ec533Spatrick 
652d89ec533Spatrick   constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
653d89ec533Spatrick 
654d89ec533Spatrick   // Test that if mem-init is disabled on a thread, calloc should still work as
655d89ec533Spatrick   // expected. This is tricky to ensure when MTE is enabled, so this test tries
656d89ec533Spatrick   // to exercise the relevant code on our MTE path.
657d89ec533Spatrick   for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
658d89ec533Spatrick     using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
659d89ec533Spatrick     const scudo::uptr Size =
660d89ec533Spatrick         SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
661d89ec533Spatrick     if (Size < 8)
662d89ec533Spatrick       continue;
663d89ec533Spatrick     for (unsigned I = 0; I != Ptrs.size(); ++I) {
664d89ec533Spatrick       Ptrs[I] = Allocator->allocate(Size, Origin);
665d89ec533Spatrick       memset(Ptrs[I], 0xaa, Size);
666d89ec533Spatrick     }
667d89ec533Spatrick     for (unsigned I = 0; I != Ptrs.size(); ++I)
668d89ec533Spatrick       Allocator->deallocate(Ptrs[I], Origin, Size);
669d89ec533Spatrick     for (unsigned I = 0; I != Ptrs.size(); ++I) {
670d89ec533Spatrick       Ptrs[I] = Allocator->allocate(Size - 8, Origin);
671d89ec533Spatrick       memset(Ptrs[I], 0xbb, Size - 8);
672d89ec533Spatrick     }
673d89ec533Spatrick     for (unsigned I = 0; I != Ptrs.size(); ++I)
674d89ec533Spatrick       Allocator->deallocate(Ptrs[I], Origin, Size - 8);
675d89ec533Spatrick     for (unsigned I = 0; I != Ptrs.size(); ++I) {
676d89ec533Spatrick       Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
677d89ec533Spatrick       for (scudo::uptr J = 0; J < Size; ++J)
678d89ec533Spatrick         ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], 0);
679d89ec533Spatrick     }
680d89ec533Spatrick   }
681d89ec533Spatrick 
682d89ec533Spatrick   Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
683d89ec533Spatrick }
684*810390e3Srobert 
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateInPlaceStress)685*810390e3Srobert SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
686*810390e3Srobert   auto *Allocator = this->Allocator.get();
687*810390e3Srobert 
688*810390e3Srobert   // Regression test: make realloc-in-place happen at the very right end of a
689*810390e3Srobert   // mapped region.
690*810390e3Srobert   constexpr int nPtrs = 10000;
691*810390e3Srobert   for (int i = 1; i < 32; ++i) {
692*810390e3Srobert     scudo::uptr Size = 16 * i - 1;
693*810390e3Srobert     std::vector<void *> Ptrs;
694*810390e3Srobert     for (int i = 0; i < nPtrs; ++i) {
695*810390e3Srobert       void *P = Allocator->allocate(Size, Origin);
696*810390e3Srobert       P = Allocator->reallocate(P, Size + 1);
697*810390e3Srobert       Ptrs.push_back(P);
698*810390e3Srobert     }
699*810390e3Srobert 
700*810390e3Srobert     for (int i = 0; i < nPtrs; ++i)
701*810390e3Srobert       Allocator->deallocate(Ptrs[i], Origin);
702*810390e3Srobert   }
703*810390e3Srobert }
704*810390e3Srobert 
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferSize)705*810390e3Srobert SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
706*810390e3Srobert   auto *Allocator = this->Allocator.get();
707*810390e3Srobert   auto Size = Allocator->getRingBufferSize();
708*810390e3Srobert   ASSERT_GT(Size, 0u);
709*810390e3Srobert   EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
710*810390e3Srobert }
711*810390e3Srobert 
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferAddress)712*810390e3Srobert SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
713*810390e3Srobert   auto *Allocator = this->Allocator.get();
714*810390e3Srobert   auto *Addr = Allocator->getRingBufferAddress();
715*810390e3Srobert   EXPECT_NE(Addr, nullptr);
716*810390e3Srobert   EXPECT_EQ(Addr, Allocator->getRingBufferAddress());
717*810390e3Srobert }
718*810390e3Srobert 
719*810390e3Srobert #if SCUDO_CAN_USE_PRIMARY64
720*810390e3Srobert #if SCUDO_TRUSTY
721*810390e3Srobert 
722*810390e3Srobert // TrustyConfig is designed for a domain-specific allocator. Add a basic test
723*810390e3Srobert // which covers only simple operations and ensure the configuration is able to
724*810390e3Srobert // compile.
TEST(ScudoCombinedTest,BasicTrustyConfig)725*810390e3Srobert TEST(ScudoCombinedTest, BasicTrustyConfig) {
726*810390e3Srobert   using AllocatorT = scudo::Allocator<scudo::TrustyConfig>;
727*810390e3Srobert   auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
728*810390e3Srobert 
729*810390e3Srobert   for (scudo::uptr ClassId = 1U;
730*810390e3Srobert        ClassId <= scudo::TrustyConfig::SizeClassMap::LargestClassId;
731*810390e3Srobert        ClassId++) {
732*810390e3Srobert     const scudo::uptr Size =
733*810390e3Srobert         scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId);
734*810390e3Srobert     void *p = Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin);
735*810390e3Srobert     ASSERT_NE(p, nullptr);
736*810390e3Srobert     free(p);
737*810390e3Srobert   }
738*810390e3Srobert 
739*810390e3Srobert   bool UnlockRequired;
740*810390e3Srobert   auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
741*810390e3Srobert   TSD->Cache.drain();
742*810390e3Srobert 
743*810390e3Srobert   Allocator->releaseToOS();
744*810390e3Srobert }
745*810390e3Srobert 
746*810390e3Srobert #endif
747*810390e3Srobert #endif
748*810390e3Srobert 
749*810390e3Srobert #if SCUDO_LINUX
750*810390e3Srobert 
SCUDO_TYPED_TEST(ScudoCombinedTest,SoftRssLimit)751*810390e3Srobert SCUDO_TYPED_TEST(ScudoCombinedTest, SoftRssLimit) {
752*810390e3Srobert   auto *Allocator = this->Allocator.get();
753*810390e3Srobert   Allocator->setRssLimitsTestOnly(1, 0, true);
754*810390e3Srobert 
755*810390e3Srobert   size_t Megabyte = 1024 * 1024;
756*810390e3Srobert   size_t ChunkSize = 16;
757*810390e3Srobert   size_t Error = 256;
758*810390e3Srobert 
759*810390e3Srobert   std::vector<void *> Ptrs;
760*810390e3Srobert   for (size_t index = 0; index < Megabyte + Error; index += ChunkSize) {
761*810390e3Srobert     void *Ptr = Allocator->allocate(ChunkSize, Origin);
762*810390e3Srobert     Ptrs.push_back(Ptr);
763*810390e3Srobert   }
764*810390e3Srobert 
765*810390e3Srobert   EXPECT_EQ(nullptr, Allocator->allocate(ChunkSize, Origin));
766*810390e3Srobert 
767*810390e3Srobert   for (void *Ptr : Ptrs)
768*810390e3Srobert     Allocator->deallocate(Ptr, Origin);
769*810390e3Srobert }
770*810390e3Srobert 
SCUDO_TYPED_TEST(ScudoCombinedTest,HardRssLimit)771*810390e3Srobert SCUDO_TYPED_TEST(ScudoCombinedTest, HardRssLimit) {
772*810390e3Srobert   auto *Allocator = this->Allocator.get();
773*810390e3Srobert   Allocator->setRssLimitsTestOnly(0, 1, false);
774*810390e3Srobert 
775*810390e3Srobert   size_t Megabyte = 1024 * 1024;
776*810390e3Srobert 
777*810390e3Srobert   EXPECT_DEATH(
778*810390e3Srobert       {
779*810390e3Srobert         disableDebuggerdMaybe();
780*810390e3Srobert         Allocator->allocate(Megabyte, Origin);
781*810390e3Srobert       },
782*810390e3Srobert       "");
783*810390e3Srobert }
784*810390e3Srobert 
785*810390e3Srobert #endif
786