1 //===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "tests/scudo_unit_test.h"
10 
11 #include "allocator_config.h"
12 #include "combined.h"
13 
14 #include <condition_variable>
15 #include <mutex>
16 #include <thread>
17 #include <vector>
18 
19 static std::mutex Mutex;
20 static std::condition_variable Cv;
21 static bool Ready = false;
22 
23 static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
24 
25 static void disableDebuggerdMaybe() {
26 #if SCUDO_ANDROID
27   // Disable the debuggerd signal handler on Android, without this we can end
28   // up spending a significant amount of time creating tombstones.
29   signal(SIGSEGV, SIG_DFL);
30 #endif
31 }
32 
33 template <class AllocatorT>
34 bool isTaggedAllocation(AllocatorT *Allocator, scudo::uptr Size,
35                         scudo::uptr Alignment) {
36   if (!Allocator->useMemoryTagging() ||
37       !scudo::systemDetectsMemoryTagFaultsTestOnly())
38     return false;
39 
40   const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
41   if (Alignment < MinAlignment)
42     Alignment = MinAlignment;
43   const scudo::uptr NeededSize =
44       scudo::roundUpTo(Size, MinAlignment) +
45       ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
46   return AllocatorT::PrimaryT::canAllocate(NeededSize);
47 }
48 
49 template <class AllocatorT>
50 void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
51                              scudo::uptr Alignment) {
52   if (!isTaggedAllocation(Allocator, Size, Alignment))
53     return;
54 
55   Size = scudo::roundUpTo(Size, scudo::archMemoryTagGranuleSize());
56   EXPECT_DEATH(
57       {
58         disableDebuggerdMaybe();
59         reinterpret_cast<char *>(P)[-1] = 0xaa;
60       },
61       "");
62   EXPECT_DEATH(
63       {
64         disableDebuggerdMaybe();
65         reinterpret_cast<char *>(P)[Size] = 0xaa;
66       },
67       "");
68 }
69 
70 template <class Config> static void testAllocator() {
71   using AllocatorT = scudo::Allocator<Config>;
72   auto Deleter = [](AllocatorT *A) {
73     A->unmapTestOnly();
74     delete A;
75   };
76   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
77                                                            Deleter);
78   Allocator->reset();
79 
80   EXPECT_FALSE(Allocator->isOwned(&Mutex));
81   EXPECT_FALSE(Allocator->isOwned(&Allocator));
82   scudo::u64 StackVariable = 0x42424242U;
83   EXPECT_FALSE(Allocator->isOwned(&StackVariable));
84   EXPECT_EQ(StackVariable, 0x42424242U);
85 
86   constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
87 
88   // This allocates and deallocates a bunch of chunks, with a wide range of
89   // sizes and alignments, with a focus on sizes that could trigger weird
90   // behaviors (plus or minus a small delta of a power of two for example).
91   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
92     for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
93       const scudo::uptr Align = 1U << AlignLog;
94       for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
95         if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
96           continue;
97         const scudo::uptr Size = (1U << SizeLog) + Delta;
98         void *P = Allocator->allocate(Size, Origin, Align);
99         EXPECT_NE(P, nullptr);
100         EXPECT_TRUE(Allocator->isOwned(P));
101         EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
102         EXPECT_LE(Size, Allocator->getUsableSize(P));
103         memset(P, 0xaa, Size);
104         checkMemoryTaggingMaybe(Allocator.get(), P, Size, Align);
105         Allocator->deallocate(P, Origin, Size);
106       }
107     }
108   }
109   Allocator->releaseToOS();
110 
111   // Ensure that specifying ZeroContents returns a zero'd out block.
112   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
113     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
114       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
115       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
116       EXPECT_NE(P, nullptr);
117       for (scudo::uptr I = 0; I < Size; I++)
118         ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
119       memset(P, 0xaa, Size);
120       Allocator->deallocate(P, Origin, Size);
121     }
122   }
123   Allocator->releaseToOS();
124 
125   // Ensure that specifying ZeroContents returns a zero'd out block.
126   Allocator->setFillContents(scudo::ZeroFill);
127   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
128     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
129       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
130       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
131       EXPECT_NE(P, nullptr);
132       for (scudo::uptr I = 0; I < Size; I++)
133         ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
134       memset(P, 0xaa, Size);
135       Allocator->deallocate(P, Origin, Size);
136     }
137   }
138   Allocator->releaseToOS();
139 
140   // Ensure that specifying PatternOrZeroFill returns a pattern-filled block in
141   // the primary allocator, and either pattern or zero filled block in the
142   // secondary.
143   Allocator->setFillContents(scudo::PatternOrZeroFill);
144   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
145     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
146       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
147       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
148       EXPECT_NE(P, nullptr);
149       for (scudo::uptr I = 0; I < Size; I++) {
150         unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
151         if (AllocatorT::PrimaryT::canAllocate(Size))
152           ASSERT_EQ(V, scudo::PatternFillByte);
153         else
154           ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
155       }
156       memset(P, 0xaa, Size);
157       Allocator->deallocate(P, Origin, Size);
158     }
159   }
160   Allocator->releaseToOS();
161 
162   // Verify that a chunk will end up being reused, at some point.
163   const scudo::uptr NeedleSize = 1024U;
164   void *NeedleP = Allocator->allocate(NeedleSize, Origin);
165   Allocator->deallocate(NeedleP, Origin);
166   bool Found = false;
167   for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
168     void *P = Allocator->allocate(NeedleSize, Origin);
169     if (Allocator->untagPointerMaybe(P) ==
170         Allocator->untagPointerMaybe(NeedleP))
171       Found = true;
172     Allocator->deallocate(P, Origin);
173   }
174   EXPECT_TRUE(Found);
175 
176   constexpr scudo::uptr MaxSize = Config::Primary::SizeClassMap::MaxSize;
177 
178   // Reallocate a large chunk all the way down to a byte, verifying that we
179   // preserve the data in the process.
180   scudo::uptr Size = MaxSize * 2;
181   const scudo::uptr DataSize = 2048U;
182   void *P = Allocator->allocate(Size, Origin);
183   const char Marker = 0xab;
184   memset(P, Marker, scudo::Min(Size, DataSize));
185   while (Size > 1U) {
186     Size /= 2U;
187     void *NewP = Allocator->reallocate(P, Size);
188     EXPECT_NE(NewP, nullptr);
189     for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
190       EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
191     P = NewP;
192   }
193   Allocator->deallocate(P, Origin);
194 
195   // Check that reallocating a chunk to a slightly smaller or larger size
196   // returns the same chunk. This requires that all the sizes we iterate on use
197   // the same block size, but that should be the case for MaxSize - 64 with our
198   // default class size maps.
199   constexpr scudo::uptr ReallocSize = MaxSize - 64;
200   P = Allocator->allocate(ReallocSize, Origin);
201   memset(P, Marker, ReallocSize);
202   for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
203     const scudo::uptr NewSize = ReallocSize + Delta;
204     void *NewP = Allocator->reallocate(P, NewSize);
205     EXPECT_EQ(NewP, P);
206     for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
207       EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
208     checkMemoryTaggingMaybe(Allocator.get(), NewP, NewSize, 0);
209   }
210   Allocator->deallocate(P, Origin);
211 
212   // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
213   // they are the ones we allocated. This requires the allocator to not have any
214   // other allocated chunk at this point (eg: won't work with the Quarantine).
215   if (!UseQuarantine) {
216     std::vector<void *> V;
217     for (scudo::uptr I = 0; I < 64U; I++)
218       V.push_back(Allocator->allocate(rand() % (MaxSize / 2U), Origin));
219     Allocator->disable();
220     Allocator->iterateOverChunks(
221         0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
222         [](uintptr_t Base, size_t Size, void *Arg) {
223           std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
224           void *P = reinterpret_cast<void *>(Base);
225           EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
226         },
227         reinterpret_cast<void *>(&V));
228     Allocator->enable();
229     while (!V.empty()) {
230       Allocator->deallocate(V.back(), Origin);
231       V.pop_back();
232     }
233   }
234 
235   Allocator->releaseToOS();
236 
237   if (Allocator->useMemoryTagging() &&
238       scudo::systemDetectsMemoryTagFaultsTestOnly()) {
239     // Check that use-after-free is detected.
240     for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
241       const scudo::uptr Size = 1U << SizeLog;
242       if (!isTaggedAllocation(Allocator.get(), Size, 1))
243         continue;
244       // UAF detection is probabilistic, so we repeat the test up to 256 times
245       // if necessary. With 15 possible tags this means a 1 in 15^256 chance of
246       // a false positive.
247       EXPECT_DEATH(
248           {
249             disableDebuggerdMaybe();
250             for (unsigned I = 0; I != 256; ++I) {
251               void *P = Allocator->allocate(Size, Origin);
252               Allocator->deallocate(P, Origin);
253               reinterpret_cast<char *>(P)[0] = 0xaa;
254             }
255           },
256           "");
257       EXPECT_DEATH(
258           {
259             disableDebuggerdMaybe();
260             for (unsigned I = 0; I != 256; ++I) {
261               void *P = Allocator->allocate(Size, Origin);
262               Allocator->deallocate(P, Origin);
263               reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
264             }
265           },
266           "");
267     }
268 
269     // Check that disabling memory tagging works correctly.
270     void *P = Allocator->allocate(2048, Origin);
271     EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
272     scudo::disableMemoryTagChecksTestOnly();
273     Allocator->disableMemoryTagging();
274     reinterpret_cast<char *>(P)[2048] = 0xaa;
275     Allocator->deallocate(P, Origin);
276 
277     P = Allocator->allocate(2048, Origin);
278     EXPECT_EQ(Allocator->untagPointerMaybe(P), P);
279     reinterpret_cast<char *>(P)[2048] = 0xaa;
280     Allocator->deallocate(P, Origin);
281 
282     Allocator->releaseToOS();
283 
284     // Disabling memory tag checks may interfere with subsequent tests.
285     // Re-enable them now.
286     scudo::enableMemoryTagChecksTestOnly();
287   }
288 
289   scudo::uptr BufferSize = 8192;
290   std::vector<char> Buffer(BufferSize);
291   scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
292   while (ActualSize > BufferSize) {
293     BufferSize = ActualSize + 1024;
294     Buffer.resize(BufferSize);
295     ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
296   }
297   std::string Stats(Buffer.begin(), Buffer.end());
298   // Basic checks on the contents of the statistics output, which also allows us
299   // to verify that we got it all.
300   EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
301   EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
302   EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
303 }
304 
305 // Test that multiple instantiations of the allocator have not messed up the
306 // process's signal handlers (GWP-ASan used to do this).
307 void testSEGV() {
308   const scudo::uptr Size = 4 * scudo::getPageSizeCached();
309   scudo::MapPlatformData Data = {};
310   void *P = scudo::map(nullptr, Size, "testSEGV", MAP_NOACCESS, &Data);
311   EXPECT_NE(P, nullptr);
312   EXPECT_DEATH(memset(P, 0xaa, Size), "");
313   scudo::unmap(P, Size, UNMAP_ALL, &Data);
314 }
315 
316 TEST(ScudoCombinedTest, BasicCombined) {
317   UseQuarantine = false;
318   testAllocator<scudo::AndroidSvelteConfig>();
319 #if SCUDO_FUCHSIA
320   testAllocator<scudo::FuchsiaConfig>();
321 #else
322   testAllocator<scudo::DefaultConfig>();
323   UseQuarantine = true;
324   testAllocator<scudo::AndroidConfig>();
325   testSEGV();
326 #endif
327 }
328 
329 template <typename AllocatorT> static void stressAllocator(AllocatorT *A) {
330   {
331     std::unique_lock<std::mutex> Lock(Mutex);
332     while (!Ready)
333       Cv.wait(Lock);
334   }
335   std::vector<std::pair<void *, scudo::uptr>> V;
336   for (scudo::uptr I = 0; I < 256U; I++) {
337     const scudo::uptr Size = std::rand() % 4096U;
338     void *P = A->allocate(Size, Origin);
339     // A region could have ran out of memory, resulting in a null P.
340     if (P)
341       V.push_back(std::make_pair(P, Size));
342   }
343   while (!V.empty()) {
344     auto Pair = V.back();
345     A->deallocate(Pair.first, Origin, Pair.second);
346     V.pop_back();
347   }
348 }
349 
350 template <class Config> static void testAllocatorThreaded() {
351   using AllocatorT = scudo::Allocator<Config>;
352   auto Deleter = [](AllocatorT *A) {
353     A->unmapTestOnly();
354     delete A;
355   };
356   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
357                                                            Deleter);
358   Allocator->reset();
359   std::thread Threads[32];
360   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
361     Threads[I] = std::thread(stressAllocator<AllocatorT>, Allocator.get());
362   {
363     std::unique_lock<std::mutex> Lock(Mutex);
364     Ready = true;
365     Cv.notify_all();
366   }
367   for (auto &T : Threads)
368     T.join();
369   Allocator->releaseToOS();
370 }
371 
372 TEST(ScudoCombinedTest, ThreadedCombined) {
373   UseQuarantine = false;
374   testAllocatorThreaded<scudo::AndroidSvelteConfig>();
375 #if SCUDO_FUCHSIA
376   testAllocatorThreaded<scudo::FuchsiaConfig>();
377 #else
378   testAllocatorThreaded<scudo::DefaultConfig>();
379   UseQuarantine = true;
380   testAllocatorThreaded<scudo::AndroidConfig>();
381 #endif
382 }
383 
384 struct DeathSizeClassConfig {
385   static const scudo::uptr NumBits = 1;
386   static const scudo::uptr MinSizeLog = 10;
387   static const scudo::uptr MidSizeLog = 10;
388   static const scudo::uptr MaxSizeLog = 13;
389   static const scudo::u32 MaxNumCachedHint = 4;
390   static const scudo::uptr MaxBytesCachedLog = 12;
391 };
392 
393 static const scudo::uptr DeathRegionSizeLog = 20U;
394 struct DeathConfig {
395   // Tiny allocator, its Primary only serves chunks of four sizes.
396   using DeathSizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
397   typedef scudo::SizeClassAllocator64<DeathSizeClassMap, DeathRegionSizeLog>
398       Primary;
399   typedef scudo::MapAllocator<scudo::MapAllocatorNoCache> Secondary;
400   template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U>;
401 };
402 
403 TEST(ScudoCombinedTest, DeathCombined) {
404   using AllocatorT = scudo::Allocator<DeathConfig>;
405   auto Deleter = [](AllocatorT *A) {
406     A->unmapTestOnly();
407     delete A;
408   };
409   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
410                                                            Deleter);
411   Allocator->reset();
412 
413   const scudo::uptr Size = 1000U;
414   void *P = Allocator->allocate(Size, Origin);
415   EXPECT_NE(P, nullptr);
416 
417   // Invalid sized deallocation.
418   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
419 
420   // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
421   UNUSED void *MisalignedP =
422       reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
423   EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
424   EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
425 
426   // Header corruption.
427   scudo::u64 *H =
428       reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
429   *H ^= 0x42U;
430   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
431   *H ^= 0x420042U;
432   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
433   *H ^= 0x420000U;
434 
435   // Invalid chunk state.
436   Allocator->deallocate(P, Origin, Size);
437   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
438   EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
439   EXPECT_DEATH(Allocator->getUsableSize(P), "");
440 }
441 
442 // Ensure that releaseToOS can be called prior to any other allocator
443 // operation without issue.
444 TEST(ScudoCombinedTest, ReleaseToOS) {
445   using AllocatorT = scudo::Allocator<DeathConfig>;
446   auto Deleter = [](AllocatorT *A) {
447     A->unmapTestOnly();
448     delete A;
449   };
450   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
451                                                            Deleter);
452   Allocator->reset();
453 
454   Allocator->releaseToOS();
455 }
456 
457 // Verify that when a region gets full, the allocator will still manage to
458 // fulfill the allocation through a larger size class.
459 TEST(ScudoCombinedTest, FullRegion) {
460   using AllocatorT = scudo::Allocator<DeathConfig>;
461   auto Deleter = [](AllocatorT *A) {
462     A->unmapTestOnly();
463     delete A;
464   };
465   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
466                                                            Deleter);
467   Allocator->reset();
468 
469   std::vector<void *> V;
470   scudo::uptr FailedAllocationsCount = 0;
471   for (scudo::uptr ClassId = 1U;
472        ClassId <= DeathConfig::DeathSizeClassMap::LargestClassId; ClassId++) {
473     const scudo::uptr Size =
474         DeathConfig::DeathSizeClassMap::getSizeByClassId(ClassId);
475     // Allocate enough to fill all of the regions above this one.
476     const scudo::uptr MaxNumberOfChunks =
477         ((1U << DeathRegionSizeLog) / Size) *
478         (DeathConfig::DeathSizeClassMap::LargestClassId - ClassId + 1);
479     void *P;
480     for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
481       P = Allocator->allocate(Size - 64U, Origin);
482       if (!P)
483         FailedAllocationsCount++;
484       else
485         V.push_back(P);
486     }
487     while (!V.empty()) {
488       Allocator->deallocate(V.back(), Origin);
489       V.pop_back();
490     }
491   }
492   EXPECT_EQ(FailedAllocationsCount, 0U);
493 }
494