1 //===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "memtag.h"
10 #include "tests/scudo_unit_test.h"
11
12 #include "allocator_config.h"
13 #include "chunk.h"
14 #include "combined.h"
15
16 #include <condition_variable>
17 #include <memory>
18 #include <mutex>
19 #include <set>
20 #include <stdlib.h>
21 #include <thread>
22 #include <vector>
23
24 static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
25 static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
26
27 // Fuchsia complains that the function is not used.
disableDebuggerdMaybe()28 UNUSED static void disableDebuggerdMaybe() {
29 #if SCUDO_ANDROID
30 // Disable the debuggerd signal handler on Android, without this we can end
31 // up spending a significant amount of time creating tombstones.
32 signal(SIGSEGV, SIG_DFL);
33 #endif
34 }
35
36 template <class AllocatorT>
isPrimaryAllocation(scudo::uptr Size,scudo::uptr Alignment)37 bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
38 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
39 if (Alignment < MinAlignment)
40 Alignment = MinAlignment;
41 const scudo::uptr NeededSize =
42 scudo::roundUpTo(Size, MinAlignment) +
43 ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
44 return AllocatorT::PrimaryT::canAllocate(NeededSize);
45 }
46
47 template <class AllocatorT>
checkMemoryTaggingMaybe(AllocatorT * Allocator,void * P,scudo::uptr Size,scudo::uptr Alignment)48 void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
49 scudo::uptr Alignment) {
50 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
51 Size = scudo::roundUpTo(Size, MinAlignment);
52 if (Allocator->useMemoryTaggingTestOnly())
53 EXPECT_DEATH(
54 {
55 disableDebuggerdMaybe();
56 reinterpret_cast<char *>(P)[-1] = 0xaa;
57 },
58 "");
59 if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
60 ? Allocator->useMemoryTaggingTestOnly()
61 : Alignment == MinAlignment) {
62 EXPECT_DEATH(
63 {
64 disableDebuggerdMaybe();
65 reinterpret_cast<char *>(P)[Size] = 0xaa;
66 },
67 "");
68 }
69 }
70
71 template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
TestAllocatorTestAllocator72 TestAllocator() {
73 this->initThreadMaybe();
74 if (scudo::archSupportsMemoryTagging() &&
75 !scudo::systemDetectsMemoryTagFaultsTestOnly())
76 this->disableMemoryTagging();
77 }
~TestAllocatorTestAllocator78 ~TestAllocator() { this->unmapTestOnly(); }
79
operator newTestAllocator80 void *operator new(size_t size) {
81 void *p = nullptr;
82 EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
83 return p;
84 }
85
operator deleteTestAllocator86 void operator delete(void *ptr) { free(ptr); }
87 };
88
89 template <class TypeParam> struct ScudoCombinedTest : public Test {
ScudoCombinedTestScudoCombinedTest90 ScudoCombinedTest() {
91 UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
92 Allocator = std::make_unique<AllocatorT>();
93 }
~ScudoCombinedTestScudoCombinedTest94 ~ScudoCombinedTest() {
95 Allocator->releaseToOS();
96 UseQuarantine = true;
97 }
98
99 void RunTest();
100
101 void BasicTest(scudo::uptr SizeLog);
102
103 using AllocatorT = TestAllocator<TypeParam>;
104 std::unique_ptr<AllocatorT> Allocator;
105 };
106
107 template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
108
109 #if SCUDO_FUCHSIA
110 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
111 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
112 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
113 #else
114 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
115 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
116 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
117 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)
118 #endif
119
120 #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
121 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>; \
122 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
123
124 #define SCUDO_TYPED_TEST(FIXTURE, NAME) \
125 template <class TypeParam> \
126 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
127 void Run(); \
128 }; \
129 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
130 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
131
SCUDO_TYPED_TEST(ScudoCombinedTest,IsOwned)132 SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
133 auto *Allocator = this->Allocator.get();
134 static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
135 EXPECT_FALSE(
136 Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
137
138 scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
139 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
140 StackBuffer[I] = 0x42U;
141 EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
142 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
143 EXPECT_EQ(StackBuffer[I], 0x42U);
144 }
145
146 template <class Config>
BasicTest(scudo::uptr SizeLog)147 void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
148 auto *Allocator = this->Allocator.get();
149
150 // This allocates and deallocates a bunch of chunks, with a wide range of
151 // sizes and alignments, with a focus on sizes that could trigger weird
152 // behaviors (plus or minus a small delta of a power of two for example).
153 for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
154 const scudo::uptr Align = 1U << AlignLog;
155 for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
156 if (static_cast<scudo::sptr>(1U << SizeLog) + Delta < 0)
157 continue;
158 const scudo::uptr Size = (1U << SizeLog) + Delta;
159 void *P = Allocator->allocate(Size, Origin, Align);
160 EXPECT_NE(P, nullptr);
161 EXPECT_TRUE(Allocator->isOwned(P));
162 EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
163 EXPECT_LE(Size, Allocator->getUsableSize(P));
164 memset(P, 0xaa, Size);
165 checkMemoryTaggingMaybe(Allocator, P, Size, Align);
166 Allocator->deallocate(P, Origin, Size);
167 }
168 }
169 }
170
171 #define SCUDO_MAKE_BASIC_TEST(SizeLog) \
172 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) { \
173 this->BasicTest(SizeLog); \
174 }
175
176 SCUDO_MAKE_BASIC_TEST(0)
177 SCUDO_MAKE_BASIC_TEST(1)
178 SCUDO_MAKE_BASIC_TEST(2)
179 SCUDO_MAKE_BASIC_TEST(3)
180 SCUDO_MAKE_BASIC_TEST(4)
181 SCUDO_MAKE_BASIC_TEST(5)
182 SCUDO_MAKE_BASIC_TEST(6)
183 SCUDO_MAKE_BASIC_TEST(7)
184 SCUDO_MAKE_BASIC_TEST(8)
185 SCUDO_MAKE_BASIC_TEST(9)
186 SCUDO_MAKE_BASIC_TEST(10)
187 SCUDO_MAKE_BASIC_TEST(11)
188 SCUDO_MAKE_BASIC_TEST(12)
189 SCUDO_MAKE_BASIC_TEST(13)
190 SCUDO_MAKE_BASIC_TEST(14)
191 SCUDO_MAKE_BASIC_TEST(15)
192 SCUDO_MAKE_BASIC_TEST(16)
193 SCUDO_MAKE_BASIC_TEST(17)
194 SCUDO_MAKE_BASIC_TEST(18)
195 SCUDO_MAKE_BASIC_TEST(19)
196 SCUDO_MAKE_BASIC_TEST(20)
197
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroContents)198 SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
199 auto *Allocator = this->Allocator.get();
200
201 // Ensure that specifying ZeroContents returns a zero'd out block.
202 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
203 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
204 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
205 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
206 EXPECT_NE(P, nullptr);
207 for (scudo::uptr I = 0; I < Size; I++)
208 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
209 memset(P, 0xaa, Size);
210 Allocator->deallocate(P, Origin, Size);
211 }
212 }
213 }
214
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroFill)215 SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
216 auto *Allocator = this->Allocator.get();
217
218 // Ensure that specifying ZeroFill returns a zero'd out block.
219 Allocator->setFillContents(scudo::ZeroFill);
220 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
221 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
222 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
223 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
224 EXPECT_NE(P, nullptr);
225 for (scudo::uptr I = 0; I < Size; I++)
226 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
227 memset(P, 0xaa, Size);
228 Allocator->deallocate(P, Origin, Size);
229 }
230 }
231 }
232
SCUDO_TYPED_TEST(ScudoCombinedTest,PatternOrZeroFill)233 SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
234 auto *Allocator = this->Allocator.get();
235
236 // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
237 // block. The primary allocator only produces pattern filled blocks if MTE
238 // is disabled, so we only require pattern filled blocks in that case.
239 Allocator->setFillContents(scudo::PatternOrZeroFill);
240 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
241 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
242 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
243 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
244 EXPECT_NE(P, nullptr);
245 for (scudo::uptr I = 0; I < Size; I++) {
246 unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
247 if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
248 1U << MinAlignLog) &&
249 !Allocator->useMemoryTaggingTestOnly())
250 ASSERT_EQ(V, scudo::PatternFillByte);
251 else
252 ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
253 }
254 memset(P, 0xaa, Size);
255 Allocator->deallocate(P, Origin, Size);
256 }
257 }
258 }
259
SCUDO_TYPED_TEST(ScudoCombinedTest,BlockReuse)260 SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
261 auto *Allocator = this->Allocator.get();
262
263 // Verify that a chunk will end up being reused, at some point.
264 const scudo::uptr NeedleSize = 1024U;
265 void *NeedleP = Allocator->allocate(NeedleSize, Origin);
266 Allocator->deallocate(NeedleP, Origin);
267 bool Found = false;
268 for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
269 void *P = Allocator->allocate(NeedleSize, Origin);
270 if (Allocator->getHeaderTaggedPointer(P) ==
271 Allocator->getHeaderTaggedPointer(NeedleP))
272 Found = true;
273 Allocator->deallocate(P, Origin);
274 }
275 EXPECT_TRUE(Found);
276 }
277
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeIncreasing)278 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
279 auto *Allocator = this->Allocator.get();
280
281 // Reallocate a chunk all the way up to a secondary allocation, verifying that
282 // we preserve the data in the process.
283 scudo::uptr Size = 16;
284 void *P = Allocator->allocate(Size, Origin);
285 const char Marker = 0xab;
286 memset(P, Marker, Size);
287 while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
288 void *NewP = Allocator->reallocate(P, Size * 2);
289 EXPECT_NE(NewP, nullptr);
290 for (scudo::uptr J = 0; J < Size; J++)
291 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
292 memset(reinterpret_cast<char *>(NewP) + Size, Marker, Size);
293 Size *= 2U;
294 P = NewP;
295 }
296 Allocator->deallocate(P, Origin);
297 }
298
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeDecreasing)299 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
300 auto *Allocator = this->Allocator.get();
301
302 // Reallocate a large chunk all the way down to a byte, verifying that we
303 // preserve the data in the process.
304 scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
305 const scudo::uptr DataSize = 2048U;
306 void *P = Allocator->allocate(Size, Origin);
307 const char Marker = 0xab;
308 memset(P, Marker, scudo::Min(Size, DataSize));
309 while (Size > 1U) {
310 Size /= 2U;
311 void *NewP = Allocator->reallocate(P, Size);
312 EXPECT_NE(NewP, nullptr);
313 for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
314 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
315 P = NewP;
316 }
317 Allocator->deallocate(P, Origin);
318 }
319
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,ReallocateSame)320 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
321 auto *Allocator = this->Allocator.get();
322
323 // Check that reallocating a chunk to a slightly smaller or larger size
324 // returns the same chunk. This requires that all the sizes we iterate on use
325 // the same block size, but that should be the case for MaxSize - 64 with our
326 // default class size maps.
327 constexpr scudo::uptr ReallocSize =
328 TypeParam::Primary::SizeClassMap::MaxSize - 64;
329 void *P = Allocator->allocate(ReallocSize, Origin);
330 const char Marker = 0xab;
331 memset(P, Marker, ReallocSize);
332 for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
333 const scudo::uptr NewSize = ReallocSize + Delta;
334 void *NewP = Allocator->reallocate(P, NewSize);
335 EXPECT_EQ(NewP, P);
336 for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
337 EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
338 checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
339 }
340 Allocator->deallocate(P, Origin);
341 }
342
SCUDO_TYPED_TEST(ScudoCombinedTest,IterateOverChunks)343 SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
344 auto *Allocator = this->Allocator.get();
345 // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
346 // they are the ones we allocated. This requires the allocator to not have any
347 // other allocated chunk at this point (eg: won't work with the Quarantine).
348 // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
349 // iterateOverChunks reads header by tagged and non-tagger pointers so one of
350 // them will fail.
351 if (!UseQuarantine) {
352 std::vector<void *> V;
353 for (scudo::uptr I = 0; I < 64U; I++)
354 V.push_back(Allocator->allocate(
355 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
356 Allocator->disable();
357 Allocator->iterateOverChunks(
358 0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
359 [](uintptr_t Base, size_t Size, void *Arg) {
360 std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
361 void *P = reinterpret_cast<void *>(Base);
362 EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
363 },
364 reinterpret_cast<void *>(&V));
365 Allocator->enable();
366 for (auto P : V)
367 Allocator->deallocate(P, Origin);
368 }
369 }
370
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,UseAfterFree)371 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
372 auto *Allocator = this->Allocator.get();
373
374 // Check that use-after-free is detected.
375 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
376 const scudo::uptr Size = 1U << SizeLog;
377 if (!Allocator->useMemoryTaggingTestOnly())
378 continue;
379 EXPECT_DEATH(
380 {
381 disableDebuggerdMaybe();
382 void *P = Allocator->allocate(Size, Origin);
383 Allocator->deallocate(P, Origin);
384 reinterpret_cast<char *>(P)[0] = 0xaa;
385 },
386 "");
387 EXPECT_DEATH(
388 {
389 disableDebuggerdMaybe();
390 void *P = Allocator->allocate(Size, Origin);
391 Allocator->deallocate(P, Origin);
392 reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
393 },
394 "");
395 }
396 }
397
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,DisableMemoryTagging)398 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
399 auto *Allocator = this->Allocator.get();
400
401 if (Allocator->useMemoryTaggingTestOnly()) {
402 // Check that disabling memory tagging works correctly.
403 void *P = Allocator->allocate(2048, Origin);
404 EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
405 scudo::ScopedDisableMemoryTagChecks NoTagChecks;
406 Allocator->disableMemoryTagging();
407 reinterpret_cast<char *>(P)[2048] = 0xaa;
408 Allocator->deallocate(P, Origin);
409
410 P = Allocator->allocate(2048, Origin);
411 EXPECT_EQ(scudo::untagPointer(P), P);
412 reinterpret_cast<char *>(P)[2048] = 0xaa;
413 Allocator->deallocate(P, Origin);
414
415 Allocator->releaseToOS();
416 }
417 }
418
SCUDO_TYPED_TEST(ScudoCombinedTest,Stats)419 SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
420 auto *Allocator = this->Allocator.get();
421
422 scudo::uptr BufferSize = 8192;
423 std::vector<char> Buffer(BufferSize);
424 scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
425 while (ActualSize > BufferSize) {
426 BufferSize = ActualSize + 1024;
427 Buffer.resize(BufferSize);
428 ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
429 }
430 std::string Stats(Buffer.begin(), Buffer.end());
431 // Basic checks on the contents of the statistics output, which also allows us
432 // to verify that we got it all.
433 EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
434 EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
435 EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
436 }
437
SCUDO_TYPED_TEST(ScudoCombinedTest,CacheDrain)438 SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) {
439 auto *Allocator = this->Allocator.get();
440
441 std::vector<void *> V;
442 for (scudo::uptr I = 0; I < 64U; I++)
443 V.push_back(Allocator->allocate(
444 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
445 for (auto P : V)
446 Allocator->deallocate(P, Origin);
447
448 bool UnlockRequired;
449 auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
450 EXPECT_TRUE(!TSD->Cache.isEmpty());
451 TSD->Cache.drain();
452 EXPECT_TRUE(TSD->Cache.isEmpty());
453 if (UnlockRequired)
454 TSD->unlock();
455 }
456
SCUDO_TYPED_TEST(ScudoCombinedTest,ThreadedCombined)457 SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
458 std::mutex Mutex;
459 std::condition_variable Cv;
460 bool Ready = false;
461 auto *Allocator = this->Allocator.get();
462 std::thread Threads[32];
463 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
464 Threads[I] = std::thread([&]() {
465 {
466 std::unique_lock<std::mutex> Lock(Mutex);
467 while (!Ready)
468 Cv.wait(Lock);
469 }
470 std::vector<std::pair<void *, scudo::uptr>> V;
471 for (scudo::uptr I = 0; I < 256U; I++) {
472 const scudo::uptr Size = std::rand() % 4096U;
473 void *P = Allocator->allocate(Size, Origin);
474 // A region could have ran out of memory, resulting in a null P.
475 if (P)
476 V.push_back(std::make_pair(P, Size));
477 }
478 while (!V.empty()) {
479 auto Pair = V.back();
480 Allocator->deallocate(Pair.first, Origin, Pair.second);
481 V.pop_back();
482 }
483 });
484 {
485 std::unique_lock<std::mutex> Lock(Mutex);
486 Ready = true;
487 Cv.notify_all();
488 }
489 for (auto &T : Threads)
490 T.join();
491 Allocator->releaseToOS();
492 }
493
494 // Test that multiple instantiations of the allocator have not messed up the
495 // process's signal handlers (GWP-ASan used to do this).
TEST(ScudoCombinedDeathTest,SKIP_ON_FUCHSIA (testSEGV))496 TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
497 const scudo::uptr Size = 4 * scudo::getPageSizeCached();
498 scudo::MapPlatformData Data = {};
499 void *P = scudo::map(nullptr, Size, "testSEGV", MAP_NOACCESS, &Data);
500 EXPECT_NE(P, nullptr);
501 EXPECT_DEATH(memset(P, 0xaa, Size), "");
502 scudo::unmap(P, Size, UNMAP_ALL, &Data);
503 }
504
505 struct DeathSizeClassConfig {
506 static const scudo::uptr NumBits = 1;
507 static const scudo::uptr MinSizeLog = 10;
508 static const scudo::uptr MidSizeLog = 10;
509 static const scudo::uptr MaxSizeLog = 13;
510 static const scudo::u16 MaxNumCachedHint = 8;
511 static const scudo::uptr MaxBytesCachedLog = 12;
512 static const scudo::uptr SizeDelta = 0;
513 };
514
515 static const scudo::uptr DeathRegionSizeLog = 21U;
516 struct DeathConfig {
517 static const bool MaySupportMemoryTagging = false;
518
519 // Tiny allocator, its Primary only serves chunks of four sizes.
520 using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
521 typedef scudo::SizeClassAllocator64<DeathConfig> Primary;
522 static const scudo::uptr PrimaryRegionSizeLog = DeathRegionSizeLog;
523 static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
524 static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
525 typedef scudo::uptr PrimaryCompactPtrT;
526 static const scudo::uptr PrimaryCompactPtrScale = 0;
527 static const bool PrimaryEnableRandomOffset = true;
528 static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
529 static const scudo::uptr PrimaryGroupSizeLog = 18;
530
531 typedef scudo::MapAllocatorNoCache SecondaryCache;
532 template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
533 };
534
TEST(ScudoCombinedDeathTest,DeathCombined)535 TEST(ScudoCombinedDeathTest, DeathCombined) {
536 using AllocatorT = TestAllocator<DeathConfig>;
537 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
538
539 const scudo::uptr Size = 1000U;
540 void *P = Allocator->allocate(Size, Origin);
541 EXPECT_NE(P, nullptr);
542
543 // Invalid sized deallocation.
544 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
545
546 // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
547 UNUSED void *MisalignedP =
548 reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
549 EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
550 EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
551
552 // Header corruption.
553 scudo::u64 *H =
554 reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
555 *H ^= 0x42U;
556 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
557 *H ^= 0x420042U;
558 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
559 *H ^= 0x420000U;
560
561 // Invalid chunk state.
562 Allocator->deallocate(P, Origin, Size);
563 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
564 EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
565 EXPECT_DEATH(Allocator->getUsableSize(P), "");
566 }
567
568 // Verify that when a region gets full, the allocator will still manage to
569 // fulfill the allocation through a larger size class.
TEST(ScudoCombinedTest,FullRegion)570 TEST(ScudoCombinedTest, FullRegion) {
571 using AllocatorT = TestAllocator<DeathConfig>;
572 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
573
574 std::vector<void *> V;
575 scudo::uptr FailedAllocationsCount = 0;
576 for (scudo::uptr ClassId = 1U;
577 ClassId <= DeathConfig::SizeClassMap::LargestClassId; ClassId++) {
578 const scudo::uptr Size =
579 DeathConfig::SizeClassMap::getSizeByClassId(ClassId);
580 // Allocate enough to fill all of the regions above this one.
581 const scudo::uptr MaxNumberOfChunks =
582 ((1U << DeathRegionSizeLog) / Size) *
583 (DeathConfig::SizeClassMap::LargestClassId - ClassId + 1);
584 void *P;
585 for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
586 P = Allocator->allocate(Size - 64U, Origin);
587 if (!P)
588 FailedAllocationsCount++;
589 else
590 V.push_back(P);
591 }
592 while (!V.empty()) {
593 Allocator->deallocate(V.back(), Origin);
594 V.pop_back();
595 }
596 }
597 EXPECT_EQ(FailedAllocationsCount, 0U);
598 }
599
600 // Ensure that releaseToOS can be called prior to any other allocator
601 // operation without issue.
SCUDO_TYPED_TEST(ScudoCombinedTest,ReleaseToOS)602 SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
603 auto *Allocator = this->Allocator.get();
604 Allocator->releaseToOS();
605 }
606
SCUDO_TYPED_TEST(ScudoCombinedTest,OddEven)607 SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
608 auto *Allocator = this->Allocator.get();
609
610 if (!Allocator->useMemoryTaggingTestOnly())
611 return;
612
613 auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
614 scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
615 scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
616 EXPECT_NE(Tag1 % 2, Tag2 % 2);
617 };
618
619 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
620 for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
621 ClassId++) {
622 const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
623
624 std::set<scudo::uptr> Ptrs;
625 bool Found = false;
626 for (unsigned I = 0; I != 65536; ++I) {
627 scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
628 Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
629 if (Ptrs.count(P - Size)) {
630 Found = true;
631 CheckOddEven(P, P - Size);
632 break;
633 }
634 if (Ptrs.count(P + Size)) {
635 Found = true;
636 CheckOddEven(P, P + Size);
637 break;
638 }
639 Ptrs.insert(P);
640 }
641 EXPECT_TRUE(Found);
642 }
643 }
644
SCUDO_TYPED_TEST(ScudoCombinedTest,DisableMemInit)645 SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
646 auto *Allocator = this->Allocator.get();
647
648 std::vector<void *> Ptrs(65536);
649
650 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
651
652 constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
653
654 // Test that if mem-init is disabled on a thread, calloc should still work as
655 // expected. This is tricky to ensure when MTE is enabled, so this test tries
656 // to exercise the relevant code on our MTE path.
657 for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
658 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
659 const scudo::uptr Size =
660 SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
661 if (Size < 8)
662 continue;
663 for (unsigned I = 0; I != Ptrs.size(); ++I) {
664 Ptrs[I] = Allocator->allocate(Size, Origin);
665 memset(Ptrs[I], 0xaa, Size);
666 }
667 for (unsigned I = 0; I != Ptrs.size(); ++I)
668 Allocator->deallocate(Ptrs[I], Origin, Size);
669 for (unsigned I = 0; I != Ptrs.size(); ++I) {
670 Ptrs[I] = Allocator->allocate(Size - 8, Origin);
671 memset(Ptrs[I], 0xbb, Size - 8);
672 }
673 for (unsigned I = 0; I != Ptrs.size(); ++I)
674 Allocator->deallocate(Ptrs[I], Origin, Size - 8);
675 for (unsigned I = 0; I != Ptrs.size(); ++I) {
676 Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
677 for (scudo::uptr J = 0; J < Size; ++J)
678 ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], 0);
679 }
680 }
681
682 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
683 }
684
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateInPlaceStress)685 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
686 auto *Allocator = this->Allocator.get();
687
688 // Regression test: make realloc-in-place happen at the very right end of a
689 // mapped region.
690 constexpr int nPtrs = 10000;
691 for (int i = 1; i < 32; ++i) {
692 scudo::uptr Size = 16 * i - 1;
693 std::vector<void *> Ptrs;
694 for (int i = 0; i < nPtrs; ++i) {
695 void *P = Allocator->allocate(Size, Origin);
696 P = Allocator->reallocate(P, Size + 1);
697 Ptrs.push_back(P);
698 }
699
700 for (int i = 0; i < nPtrs; ++i)
701 Allocator->deallocate(Ptrs[i], Origin);
702 }
703 }
704
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferSize)705 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
706 auto *Allocator = this->Allocator.get();
707 auto Size = Allocator->getRingBufferSize();
708 ASSERT_GT(Size, 0u);
709 EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
710 }
711
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferAddress)712 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
713 auto *Allocator = this->Allocator.get();
714 auto *Addr = Allocator->getRingBufferAddress();
715 EXPECT_NE(Addr, nullptr);
716 EXPECT_EQ(Addr, Allocator->getRingBufferAddress());
717 }
718
719 #if SCUDO_CAN_USE_PRIMARY64
720 #if SCUDO_TRUSTY
721
722 // TrustyConfig is designed for a domain-specific allocator. Add a basic test
723 // which covers only simple operations and ensure the configuration is able to
724 // compile.
TEST(ScudoCombinedTest,BasicTrustyConfig)725 TEST(ScudoCombinedTest, BasicTrustyConfig) {
726 using AllocatorT = scudo::Allocator<scudo::TrustyConfig>;
727 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
728
729 for (scudo::uptr ClassId = 1U;
730 ClassId <= scudo::TrustyConfig::SizeClassMap::LargestClassId;
731 ClassId++) {
732 const scudo::uptr Size =
733 scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId);
734 void *p = Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin);
735 ASSERT_NE(p, nullptr);
736 free(p);
737 }
738
739 bool UnlockRequired;
740 auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
741 TSD->Cache.drain();
742
743 Allocator->releaseToOS();
744 }
745
746 #endif
747 #endif
748
749 #if SCUDO_LINUX
750
SCUDO_TYPED_TEST(ScudoCombinedTest,SoftRssLimit)751 SCUDO_TYPED_TEST(ScudoCombinedTest, SoftRssLimit) {
752 auto *Allocator = this->Allocator.get();
753 Allocator->setRssLimitsTestOnly(1, 0, true);
754
755 size_t Megabyte = 1024 * 1024;
756 size_t ChunkSize = 16;
757 size_t Error = 256;
758
759 std::vector<void *> Ptrs;
760 for (size_t index = 0; index < Megabyte + Error; index += ChunkSize) {
761 void *Ptr = Allocator->allocate(ChunkSize, Origin);
762 Ptrs.push_back(Ptr);
763 }
764
765 EXPECT_EQ(nullptr, Allocator->allocate(ChunkSize, Origin));
766
767 for (void *Ptr : Ptrs)
768 Allocator->deallocate(Ptr, Origin);
769 }
770
SCUDO_TYPED_TEST(ScudoCombinedTest,HardRssLimit)771 SCUDO_TYPED_TEST(ScudoCombinedTest, HardRssLimit) {
772 auto *Allocator = this->Allocator.get();
773 Allocator->setRssLimitsTestOnly(0, 1, false);
774
775 size_t Megabyte = 1024 * 1024;
776
777 EXPECT_DEATH(
778 {
779 disableDebuggerdMaybe();
780 Allocator->allocate(Megabyte, Origin);
781 },
782 "");
783 }
784
785 #endif
786