1 //===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "memtag.h"
10 #include "tests/scudo_unit_test.h"
11
12 #include "allocator_config.h"
13 #include "combined.h"
14
15 #include <condition_variable>
16 #include <memory>
17 #include <mutex>
18 #include <set>
19 #include <stdlib.h>
20 #include <thread>
21 #include <vector>
22
23 static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
24 static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
25
26 // Fuchsia complains that the function is not used.
disableDebuggerdMaybe()27 UNUSED static void disableDebuggerdMaybe() {
28 #if SCUDO_ANDROID
29 // Disable the debuggerd signal handler on Android, without this we can end
30 // up spending a significant amount of time creating tombstones.
31 signal(SIGSEGV, SIG_DFL);
32 #endif
33 }
34
35 template <class AllocatorT>
isPrimaryAllocation(scudo::uptr Size,scudo::uptr Alignment)36 bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
37 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
38 if (Alignment < MinAlignment)
39 Alignment = MinAlignment;
40 const scudo::uptr NeededSize =
41 scudo::roundUpTo(Size, MinAlignment) +
42 ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
43 return AllocatorT::PrimaryT::canAllocate(NeededSize);
44 }
45
46 template <class AllocatorT>
checkMemoryTaggingMaybe(AllocatorT * Allocator,void * P,scudo::uptr Size,scudo::uptr Alignment)47 void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
48 scudo::uptr Alignment) {
49 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
50 Size = scudo::roundUpTo(Size, MinAlignment);
51 if (Allocator->useMemoryTaggingTestOnly())
52 EXPECT_DEATH(
53 {
54 disableDebuggerdMaybe();
55 reinterpret_cast<char *>(P)[-1] = 0xaa;
56 },
57 "");
58 if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
59 ? Allocator->useMemoryTaggingTestOnly()
60 : Alignment == MinAlignment) {
61 EXPECT_DEATH(
62 {
63 disableDebuggerdMaybe();
64 reinterpret_cast<char *>(P)[Size] = 0xaa;
65 },
66 "");
67 }
68 }
69
70 template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
TestAllocatorTestAllocator71 TestAllocator() {
72 this->initThreadMaybe();
73 if (scudo::archSupportsMemoryTagging() &&
74 !scudo::systemDetectsMemoryTagFaultsTestOnly())
75 this->disableMemoryTagging();
76 }
~TestAllocatorTestAllocator77 ~TestAllocator() { this->unmapTestOnly(); }
78
operator newTestAllocator79 void *operator new(size_t size) {
80 void *p = nullptr;
81 EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
82 return p;
83 }
84
operator deleteTestAllocator85 void operator delete(void *ptr) { free(ptr); }
86 };
87
88 template <class TypeParam> struct ScudoCombinedTest : public Test {
ScudoCombinedTestScudoCombinedTest89 ScudoCombinedTest() {
90 UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
91 Allocator = std::make_unique<AllocatorT>();
92 }
~ScudoCombinedTestScudoCombinedTest93 ~ScudoCombinedTest() {
94 Allocator->releaseToOS();
95 UseQuarantine = true;
96 }
97
98 void RunTest();
99
100 void BasicTest(scudo::uptr SizeLog);
101
102 using AllocatorT = TestAllocator<TypeParam>;
103 std::unique_ptr<AllocatorT> Allocator;
104 };
105
106 template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
107
108 #if SCUDO_FUCHSIA
109 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
110 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
111 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
112 #else
113 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
114 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
115 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
116 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)
117 #endif
118
119 #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
120 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>; \
121 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { Run(); }
122
123 #define SCUDO_TYPED_TEST(FIXTURE, NAME) \
124 template <class TypeParam> \
125 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
126 void Run(); \
127 }; \
128 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
129 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
130
SCUDO_TYPED_TEST(ScudoCombinedTest,IsOwned)131 SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
132 auto *Allocator = this->Allocator.get();
133 static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
134 EXPECT_FALSE(
135 Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
136
137 scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
138 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
139 StackBuffer[I] = 0x42U;
140 EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
141 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
142 EXPECT_EQ(StackBuffer[I], 0x42U);
143 }
144
145 template <class Config>
BasicTest(scudo::uptr SizeLog)146 void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
147 auto *Allocator = this->Allocator.get();
148
149 // This allocates and deallocates a bunch of chunks, with a wide range of
150 // sizes and alignments, with a focus on sizes that could trigger weird
151 // behaviors (plus or minus a small delta of a power of two for example).
152 for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
153 const scudo::uptr Align = 1U << AlignLog;
154 for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
155 if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
156 continue;
157 const scudo::uptr Size = (1U << SizeLog) + Delta;
158 void *P = Allocator->allocate(Size, Origin, Align);
159 EXPECT_NE(P, nullptr);
160 EXPECT_TRUE(Allocator->isOwned(P));
161 EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
162 EXPECT_LE(Size, Allocator->getUsableSize(P));
163 memset(P, 0xaa, Size);
164 checkMemoryTaggingMaybe(Allocator, P, Size, Align);
165 Allocator->deallocate(P, Origin, Size);
166 }
167 }
168 }
169
170 #define SCUDO_MAKE_BASIC_TEST(SizeLog) \
171 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) { \
172 this->BasicTest(SizeLog); \
173 }
174
175 SCUDO_MAKE_BASIC_TEST(0)
176 SCUDO_MAKE_BASIC_TEST(1)
177 SCUDO_MAKE_BASIC_TEST(2)
178 SCUDO_MAKE_BASIC_TEST(3)
179 SCUDO_MAKE_BASIC_TEST(4)
180 SCUDO_MAKE_BASIC_TEST(5)
181 SCUDO_MAKE_BASIC_TEST(6)
182 SCUDO_MAKE_BASIC_TEST(7)
183 SCUDO_MAKE_BASIC_TEST(8)
184 SCUDO_MAKE_BASIC_TEST(9)
185 SCUDO_MAKE_BASIC_TEST(10)
186 SCUDO_MAKE_BASIC_TEST(11)
187 SCUDO_MAKE_BASIC_TEST(12)
188 SCUDO_MAKE_BASIC_TEST(13)
189 SCUDO_MAKE_BASIC_TEST(14)
190 SCUDO_MAKE_BASIC_TEST(15)
191 SCUDO_MAKE_BASIC_TEST(16)
192 SCUDO_MAKE_BASIC_TEST(17)
193 SCUDO_MAKE_BASIC_TEST(18)
194 SCUDO_MAKE_BASIC_TEST(19)
195 SCUDO_MAKE_BASIC_TEST(20)
196
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroContents)197 SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
198 auto *Allocator = this->Allocator.get();
199
200 // Ensure that specifying ZeroContents returns a zero'd out block.
201 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
202 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
203 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
204 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
205 EXPECT_NE(P, nullptr);
206 for (scudo::uptr I = 0; I < Size; I++)
207 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
208 memset(P, 0xaa, Size);
209 Allocator->deallocate(P, Origin, Size);
210 }
211 }
212 }
213
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroFill)214 SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
215 auto *Allocator = this->Allocator.get();
216
217 // Ensure that specifying ZeroFill returns a zero'd out block.
218 Allocator->setFillContents(scudo::ZeroFill);
219 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
220 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
221 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
222 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
223 EXPECT_NE(P, nullptr);
224 for (scudo::uptr I = 0; I < Size; I++)
225 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
226 memset(P, 0xaa, Size);
227 Allocator->deallocate(P, Origin, Size);
228 }
229 }
230 }
231
SCUDO_TYPED_TEST(ScudoCombinedTest,PatternOrZeroFill)232 SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
233 auto *Allocator = this->Allocator.get();
234
235 // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
236 // block. The primary allocator only produces pattern filled blocks if MTE
237 // is disabled, so we only require pattern filled blocks in that case.
238 Allocator->setFillContents(scudo::PatternOrZeroFill);
239 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
240 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
241 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
242 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
243 EXPECT_NE(P, nullptr);
244 for (scudo::uptr I = 0; I < Size; I++) {
245 unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
246 if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
247 1U << MinAlignLog) &&
248 !Allocator->useMemoryTaggingTestOnly())
249 ASSERT_EQ(V, scudo::PatternFillByte);
250 else
251 ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
252 }
253 memset(P, 0xaa, Size);
254 Allocator->deallocate(P, Origin, Size);
255 }
256 }
257 }
258
SCUDO_TYPED_TEST(ScudoCombinedTest,BlockReuse)259 SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
260 auto *Allocator = this->Allocator.get();
261
262 // Verify that a chunk will end up being reused, at some point.
263 const scudo::uptr NeedleSize = 1024U;
264 void *NeedleP = Allocator->allocate(NeedleSize, Origin);
265 Allocator->deallocate(NeedleP, Origin);
266 bool Found = false;
267 for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
268 void *P = Allocator->allocate(NeedleSize, Origin);
269 if (Allocator->getHeaderTaggedPointer(P) ==
270 Allocator->getHeaderTaggedPointer(NeedleP))
271 Found = true;
272 Allocator->deallocate(P, Origin);
273 }
274 EXPECT_TRUE(Found);
275 }
276
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeIncreasing)277 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
278 auto *Allocator = this->Allocator.get();
279
280 // Reallocate a chunk all the way up to a secondary allocation, verifying that
281 // we preserve the data in the process.
282 scudo::uptr Size = 16;
283 void *P = Allocator->allocate(Size, Origin);
284 const char Marker = 0xab;
285 memset(P, Marker, Size);
286 while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
287 void *NewP = Allocator->reallocate(P, Size * 2);
288 EXPECT_NE(NewP, nullptr);
289 for (scudo::uptr J = 0; J < Size; J++)
290 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
291 memset(reinterpret_cast<char *>(NewP) + Size, Marker, Size);
292 Size *= 2U;
293 P = NewP;
294 }
295 Allocator->deallocate(P, Origin);
296 }
297
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeDecreasing)298 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
299 auto *Allocator = this->Allocator.get();
300
301 // Reallocate a large chunk all the way down to a byte, verifying that we
302 // preserve the data in the process.
303 scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
304 const scudo::uptr DataSize = 2048U;
305 void *P = Allocator->allocate(Size, Origin);
306 const char Marker = 0xab;
307 memset(P, Marker, scudo::Min(Size, DataSize));
308 while (Size > 1U) {
309 Size /= 2U;
310 void *NewP = Allocator->reallocate(P, Size);
311 EXPECT_NE(NewP, nullptr);
312 for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
313 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
314 P = NewP;
315 }
316 Allocator->deallocate(P, Origin);
317 }
318
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,ReallocateSame)319 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
320 auto *Allocator = this->Allocator.get();
321
322 // Check that reallocating a chunk to a slightly smaller or larger size
323 // returns the same chunk. This requires that all the sizes we iterate on use
324 // the same block size, but that should be the case for MaxSize - 64 with our
325 // default class size maps.
326 constexpr scudo::uptr ReallocSize =
327 TypeParam::Primary::SizeClassMap::MaxSize - 64;
328 void *P = Allocator->allocate(ReallocSize, Origin);
329 const char Marker = 0xab;
330 memset(P, Marker, ReallocSize);
331 for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
332 const scudo::uptr NewSize = ReallocSize + Delta;
333 void *NewP = Allocator->reallocate(P, NewSize);
334 EXPECT_EQ(NewP, P);
335 for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
336 EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
337 checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
338 }
339 Allocator->deallocate(P, Origin);
340 }
341
SCUDO_TYPED_TEST(ScudoCombinedTest,IterateOverChunks)342 SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
343 auto *Allocator = this->Allocator.get();
344 // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
345 // they are the ones we allocated. This requires the allocator to not have any
346 // other allocated chunk at this point (eg: won't work with the Quarantine).
347 // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
348 // iterateOverChunks reads header by tagged and non-tagger pointers so one of
349 // them will fail.
350 if (!UseQuarantine) {
351 std::vector<void *> V;
352 for (scudo::uptr I = 0; I < 64U; I++)
353 V.push_back(Allocator->allocate(
354 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
355 Allocator->disable();
356 Allocator->iterateOverChunks(
357 0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
358 [](uintptr_t Base, size_t Size, void *Arg) {
359 std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
360 void *P = reinterpret_cast<void *>(Base);
361 EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
362 },
363 reinterpret_cast<void *>(&V));
364 Allocator->enable();
365 for (auto P : V)
366 Allocator->deallocate(P, Origin);
367 }
368 }
369
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,UseAfterFree)370 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
371 auto *Allocator = this->Allocator.get();
372
373 // Check that use-after-free is detected.
374 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
375 const scudo::uptr Size = 1U << SizeLog;
376 if (!Allocator->useMemoryTaggingTestOnly())
377 continue;
378 EXPECT_DEATH(
379 {
380 disableDebuggerdMaybe();
381 void *P = Allocator->allocate(Size, Origin);
382 Allocator->deallocate(P, Origin);
383 reinterpret_cast<char *>(P)[0] = 0xaa;
384 },
385 "");
386 EXPECT_DEATH(
387 {
388 disableDebuggerdMaybe();
389 void *P = Allocator->allocate(Size, Origin);
390 Allocator->deallocate(P, Origin);
391 reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
392 },
393 "");
394 }
395 }
396
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,DisableMemoryTagging)397 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
398 auto *Allocator = this->Allocator.get();
399
400 if (Allocator->useMemoryTaggingTestOnly()) {
401 // Check that disabling memory tagging works correctly.
402 void *P = Allocator->allocate(2048, Origin);
403 EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
404 scudo::ScopedDisableMemoryTagChecks NoTagChecks;
405 Allocator->disableMemoryTagging();
406 reinterpret_cast<char *>(P)[2048] = 0xaa;
407 Allocator->deallocate(P, Origin);
408
409 P = Allocator->allocate(2048, Origin);
410 EXPECT_EQ(scudo::untagPointer(P), P);
411 reinterpret_cast<char *>(P)[2048] = 0xaa;
412 Allocator->deallocate(P, Origin);
413
414 Allocator->releaseToOS();
415 }
416 }
417
SCUDO_TYPED_TEST(ScudoCombinedTest,Stats)418 SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
419 auto *Allocator = this->Allocator.get();
420
421 scudo::uptr BufferSize = 8192;
422 std::vector<char> Buffer(BufferSize);
423 scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
424 while (ActualSize > BufferSize) {
425 BufferSize = ActualSize + 1024;
426 Buffer.resize(BufferSize);
427 ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
428 }
429 std::string Stats(Buffer.begin(), Buffer.end());
430 // Basic checks on the contents of the statistics output, which also allows us
431 // to verify that we got it all.
432 EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
433 EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
434 EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
435 }
436
SCUDO_TYPED_TEST(ScudoCombinedTest,CacheDrain)437 SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) {
438 auto *Allocator = this->Allocator.get();
439
440 std::vector<void *> V;
441 for (scudo::uptr I = 0; I < 64U; I++)
442 V.push_back(Allocator->allocate(
443 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
444 for (auto P : V)
445 Allocator->deallocate(P, Origin);
446
447 bool UnlockRequired;
448 auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
449 EXPECT_TRUE(!TSD->Cache.isEmpty());
450 TSD->Cache.drain();
451 EXPECT_TRUE(TSD->Cache.isEmpty());
452 if (UnlockRequired)
453 TSD->unlock();
454 }
455
SCUDO_TYPED_TEST(ScudoCombinedTest,ThreadedCombined)456 SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
457 std::mutex Mutex;
458 std::condition_variable Cv;
459 bool Ready = false;
460 auto *Allocator = this->Allocator.get();
461 std::thread Threads[32];
462 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
463 Threads[I] = std::thread([&]() {
464 {
465 std::unique_lock<std::mutex> Lock(Mutex);
466 while (!Ready)
467 Cv.wait(Lock);
468 }
469 std::vector<std::pair<void *, scudo::uptr>> V;
470 for (scudo::uptr I = 0; I < 256U; I++) {
471 const scudo::uptr Size = std::rand() % 4096U;
472 void *P = Allocator->allocate(Size, Origin);
473 // A region could have ran out of memory, resulting in a null P.
474 if (P)
475 V.push_back(std::make_pair(P, Size));
476 }
477 while (!V.empty()) {
478 auto Pair = V.back();
479 Allocator->deallocate(Pair.first, Origin, Pair.second);
480 V.pop_back();
481 }
482 });
483 {
484 std::unique_lock<std::mutex> Lock(Mutex);
485 Ready = true;
486 Cv.notify_all();
487 }
488 for (auto &T : Threads)
489 T.join();
490 Allocator->releaseToOS();
491 }
492
493 // Test that multiple instantiations of the allocator have not messed up the
494 // process's signal handlers (GWP-ASan used to do this).
TEST(ScudoCombinedDeathTest,SKIP_ON_FUCHSIA (testSEGV))495 TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
496 const scudo::uptr Size = 4 * scudo::getPageSizeCached();
497 scudo::MapPlatformData Data = {};
498 void *P = scudo::map(nullptr, Size, "testSEGV", MAP_NOACCESS, &Data);
499 EXPECT_NE(P, nullptr);
500 EXPECT_DEATH(memset(P, 0xaa, Size), "");
501 scudo::unmap(P, Size, UNMAP_ALL, &Data);
502 }
503
504 struct DeathSizeClassConfig {
505 static const scudo::uptr NumBits = 1;
506 static const scudo::uptr MinSizeLog = 10;
507 static const scudo::uptr MidSizeLog = 10;
508 static const scudo::uptr MaxSizeLog = 13;
509 static const scudo::u32 MaxNumCachedHint = 4;
510 static const scudo::uptr MaxBytesCachedLog = 12;
511 static const scudo::uptr SizeDelta = 0;
512 };
513
514 static const scudo::uptr DeathRegionSizeLog = 20U;
515 struct DeathConfig {
516 static const bool MaySupportMemoryTagging = false;
517
518 // Tiny allocator, its Primary only serves chunks of four sizes.
519 using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
520 typedef scudo::SizeClassAllocator64<DeathConfig> Primary;
521 static const scudo::uptr PrimaryRegionSizeLog = DeathRegionSizeLog;
522 static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
523 static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
524 typedef scudo::uptr PrimaryCompactPtrT;
525 static const scudo::uptr PrimaryCompactPtrScale = 0;
526 static const bool PrimaryEnableRandomOffset = true;
527 static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
528
529 typedef scudo::MapAllocatorNoCache SecondaryCache;
530 template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
531 };
532
TEST(ScudoCombinedDeathTest,DeathCombined)533 TEST(ScudoCombinedDeathTest, DeathCombined) {
534 using AllocatorT = TestAllocator<DeathConfig>;
535 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
536
537 const scudo::uptr Size = 1000U;
538 void *P = Allocator->allocate(Size, Origin);
539 EXPECT_NE(P, nullptr);
540
541 // Invalid sized deallocation.
542 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
543
544 // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
545 UNUSED void *MisalignedP =
546 reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
547 EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
548 EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
549
550 // Header corruption.
551 scudo::u64 *H =
552 reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
553 *H ^= 0x42U;
554 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
555 *H ^= 0x420042U;
556 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
557 *H ^= 0x420000U;
558
559 // Invalid chunk state.
560 Allocator->deallocate(P, Origin, Size);
561 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
562 EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
563 EXPECT_DEATH(Allocator->getUsableSize(P), "");
564 }
565
566 // Verify that when a region gets full, the allocator will still manage to
567 // fulfill the allocation through a larger size class.
TEST(ScudoCombinedTest,FullRegion)568 TEST(ScudoCombinedTest, FullRegion) {
569 using AllocatorT = TestAllocator<DeathConfig>;
570 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
571
572 std::vector<void *> V;
573 scudo::uptr FailedAllocationsCount = 0;
574 for (scudo::uptr ClassId = 1U;
575 ClassId <= DeathConfig::SizeClassMap::LargestClassId; ClassId++) {
576 const scudo::uptr Size =
577 DeathConfig::SizeClassMap::getSizeByClassId(ClassId);
578 // Allocate enough to fill all of the regions above this one.
579 const scudo::uptr MaxNumberOfChunks =
580 ((1U << DeathRegionSizeLog) / Size) *
581 (DeathConfig::SizeClassMap::LargestClassId - ClassId + 1);
582 void *P;
583 for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
584 P = Allocator->allocate(Size - 64U, Origin);
585 if (!P)
586 FailedAllocationsCount++;
587 else
588 V.push_back(P);
589 }
590 while (!V.empty()) {
591 Allocator->deallocate(V.back(), Origin);
592 V.pop_back();
593 }
594 }
595 EXPECT_EQ(FailedAllocationsCount, 0U);
596 }
597
598 // Ensure that releaseToOS can be called prior to any other allocator
599 // operation without issue.
SCUDO_TYPED_TEST(ScudoCombinedTest,ReleaseToOS)600 SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
601 auto *Allocator = this->Allocator.get();
602 Allocator->releaseToOS();
603 }
604
SCUDO_TYPED_TEST(ScudoCombinedTest,OddEven)605 SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
606 auto *Allocator = this->Allocator.get();
607
608 if (!Allocator->useMemoryTaggingTestOnly())
609 return;
610
611 auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
612 scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
613 scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
614 EXPECT_NE(Tag1 % 2, Tag2 % 2);
615 };
616
617 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
618 for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
619 ClassId++) {
620 const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
621
622 std::set<scudo::uptr> Ptrs;
623 bool Found = false;
624 for (unsigned I = 0; I != 65536; ++I) {
625 scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
626 Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
627 if (Ptrs.count(P - Size)) {
628 Found = true;
629 CheckOddEven(P, P - Size);
630 break;
631 }
632 if (Ptrs.count(P + Size)) {
633 Found = true;
634 CheckOddEven(P, P + Size);
635 break;
636 }
637 Ptrs.insert(P);
638 }
639 EXPECT_TRUE(Found);
640 }
641 }
642
SCUDO_TYPED_TEST(ScudoCombinedTest,DisableMemInit)643 SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
644 auto *Allocator = this->Allocator.get();
645
646 std::vector<void *> Ptrs(65536, nullptr);
647
648 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
649
650 constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
651
652 // Test that if mem-init is disabled on a thread, calloc should still work as
653 // expected. This is tricky to ensure when MTE is enabled, so this test tries
654 // to exercise the relevant code on our MTE path.
655 for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
656 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
657 const scudo::uptr Size =
658 SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
659 if (Size < 8)
660 continue;
661 for (unsigned I = 0; I != Ptrs.size(); ++I) {
662 Ptrs[I] = Allocator->allocate(Size, Origin);
663 memset(Ptrs[I], 0xaa, Size);
664 }
665 for (unsigned I = 0; I != Ptrs.size(); ++I)
666 Allocator->deallocate(Ptrs[I], Origin, Size);
667 for (unsigned I = 0; I != Ptrs.size(); ++I) {
668 Ptrs[I] = Allocator->allocate(Size - 8, Origin);
669 memset(Ptrs[I], 0xbb, Size - 8);
670 }
671 for (unsigned I = 0; I != Ptrs.size(); ++I)
672 Allocator->deallocate(Ptrs[I], Origin, Size - 8);
673 for (unsigned I = 0; I != Ptrs.size(); ++I) {
674 Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
675 for (scudo::uptr J = 0; J < Size; ++J)
676 ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], 0);
677 }
678 }
679
680 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
681 }
682