1 //===-- primary_test.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "tests/scudo_unit_test.h"
10 
11 #include "primary32.h"
12 #include "primary64.h"
13 #include "size_class_map.h"
14 
15 #include <algorithm>
16 #include <chrono>
17 #include <condition_variable>
18 #include <mutex>
19 #include <random>
20 #include <stdlib.h>
21 #include <thread>
22 #include <vector>
23 
24 // Note that with small enough regions, the SizeClassAllocator64 also works on
25 // 32-bit architectures. It's not something we want to encourage, but we still
26 // should ensure the tests pass.
27 
28 struct TestConfig1 {
29   static const scudo::uptr PrimaryRegionSizeLog = 18U;
30   static const scudo::uptr PrimaryGroupSizeLog = 18U;
31   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
32   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
33   static const bool MaySupportMemoryTagging = false;
34   typedef scudo::uptr PrimaryCompactPtrT;
35   static const scudo::uptr PrimaryCompactPtrScale = 0;
36   static const bool PrimaryEnableRandomOffset = true;
37   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
38 };
39 
40 struct TestConfig2 {
41 #if defined(__mips__)
42   // Unable to allocate greater size on QEMU-user.
43   static const scudo::uptr PrimaryRegionSizeLog = 23U;
44 #else
45   static const scudo::uptr PrimaryRegionSizeLog = 24U;
46 #endif
47   static const scudo::uptr PrimaryGroupSizeLog = 20U;
48   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
49   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
50   static const bool MaySupportMemoryTagging = false;
51   typedef scudo::uptr PrimaryCompactPtrT;
52   static const scudo::uptr PrimaryCompactPtrScale = 0;
53   static const bool PrimaryEnableRandomOffset = true;
54   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
55 };
56 
57 struct TestConfig3 {
58 #if defined(__mips__)
59   // Unable to allocate greater size on QEMU-user.
60   static const scudo::uptr PrimaryRegionSizeLog = 23U;
61 #else
62   static const scudo::uptr PrimaryRegionSizeLog = 24U;
63 #endif
64   static const scudo::uptr PrimaryGroupSizeLog = 20U;
65   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
66   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
67   static const bool MaySupportMemoryTagging = true;
68   typedef scudo::uptr PrimaryCompactPtrT;
69   static const scudo::uptr PrimaryCompactPtrScale = 0;
70   static const bool PrimaryEnableRandomOffset = true;
71   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
72 };
73 
74 struct TestConfig4 {
75 #if defined(__mips__)
76   // Unable to allocate greater size on QEMU-user.
77   static const scudo::uptr PrimaryRegionSizeLog = 23U;
78 #else
79   static const scudo::uptr PrimaryRegionSizeLog = 24U;
80 #endif
81   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
82   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
83   static const bool MaySupportMemoryTagging = true;
84   static const scudo::uptr PrimaryCompactPtrScale = 3U;
85   static const scudo::uptr PrimaryGroupSizeLog = 20U;
86   typedef scudo::u32 PrimaryCompactPtrT;
87   static const bool PrimaryEnableRandomOffset = true;
88   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
89 };
90 
91 template <typename BaseConfig, typename SizeClassMapT>
92 struct Config : public BaseConfig {
93   using SizeClassMap = SizeClassMapT;
94 };
95 
96 template <typename BaseConfig, typename SizeClassMapT>
97 struct SizeClassAllocator
98     : public scudo::SizeClassAllocator64<Config<BaseConfig, SizeClassMapT>> {};
99 template <typename SizeClassMapT>
100 struct SizeClassAllocator<TestConfig1, SizeClassMapT>
101     : public scudo::SizeClassAllocator32<Config<TestConfig1, SizeClassMapT>> {};
102 
103 template <typename BaseConfig, typename SizeClassMapT>
104 struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
~TestAllocatorTestAllocator105   ~TestAllocator() { this->unmapTestOnly(); }
106 
operator newTestAllocator107   void *operator new(size_t size) {
108     void *p = nullptr;
109     EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
110     return p;
111   }
112 
operator deleteTestAllocator113   void operator delete(void *ptr) { free(ptr); }
114 };
115 
116 template <class BaseConfig> struct ScudoPrimaryTest : public Test {};
117 
118 #if SCUDO_FUCHSIA
119 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
120   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2)                            \
121   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)
122 #else
123 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
124   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1)                            \
125   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2)                            \
126   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)                            \
127   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4)
128 #endif
129 
130 #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE)                             \
131   using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<TYPE>;                          \
132   TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<TYPE>::Run(); }
133 
134 #define SCUDO_TYPED_TEST(FIXTURE, NAME)                                        \
135   template <class TypeParam>                                                   \
136   struct FIXTURE##NAME : public FIXTURE<TypeParam> {                           \
137     void Run();                                                                \
138   };                                                                           \
139   SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                                    \
140   template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
141 
SCUDO_TYPED_TEST(ScudoPrimaryTest,BasicPrimary)142 SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
143   using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
144   std::unique_ptr<Primary> Allocator(new Primary);
145   Allocator->init(/*ReleaseToOsInterval=*/-1);
146   typename Primary::CacheT Cache;
147   Cache.init(nullptr, Allocator.get());
148   const scudo::uptr NumberOfAllocations = 32U;
149   for (scudo::uptr I = 0; I <= 16U; I++) {
150     const scudo::uptr Size = 1UL << I;
151     if (!Primary::canAllocate(Size))
152       continue;
153     const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
154     void *Pointers[NumberOfAllocations];
155     for (scudo::uptr J = 0; J < NumberOfAllocations; J++) {
156       void *P = Cache.allocate(ClassId);
157       memset(P, 'B', Size);
158       Pointers[J] = P;
159     }
160     for (scudo::uptr J = 0; J < NumberOfAllocations; J++)
161       Cache.deallocate(ClassId, Pointers[J]);
162   }
163   Cache.destroy(nullptr);
164   Allocator->releaseToOS();
165   scudo::ScopedString Str;
166   Allocator->getStats(&Str);
167   Str.output();
168 }
169 
170 struct SmallRegionsConfig {
171   using SizeClassMap = scudo::DefaultSizeClassMap;
172   static const scudo::uptr PrimaryRegionSizeLog = 21U;
173   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
174   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
175   static const bool MaySupportMemoryTagging = false;
176   typedef scudo::uptr PrimaryCompactPtrT;
177   static const scudo::uptr PrimaryCompactPtrScale = 0;
178   static const bool PrimaryEnableRandomOffset = true;
179   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
180   static const scudo::uptr PrimaryGroupSizeLog = 20U;
181 };
182 
183 // The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.
184 // For the 32-bit one, it requires actually exhausting memory, so we skip it.
TEST(ScudoPrimaryTest,Primary64OOM)185 TEST(ScudoPrimaryTest, Primary64OOM) {
186   using Primary = scudo::SizeClassAllocator64<SmallRegionsConfig>;
187   using TransferBatch = Primary::CacheT::TransferBatch;
188   Primary Allocator;
189   Allocator.init(/*ReleaseToOsInterval=*/-1);
190   typename Primary::CacheT Cache;
191   scudo::GlobalStats Stats;
192   Stats.init();
193   Cache.init(&Stats, &Allocator);
194   bool AllocationFailed = false;
195   std::vector<TransferBatch *> Batches;
196   const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId;
197   const scudo::uptr Size = Primary::getSizeByClassId(ClassId);
198   typename Primary::CacheT::CompactPtrT Blocks[TransferBatch::MaxNumCached];
199 
200   for (scudo::uptr I = 0; I < 10000U; I++) {
201     TransferBatch *B = Allocator.popBatch(&Cache, ClassId);
202     if (!B) {
203       AllocationFailed = true;
204       break;
205     }
206     for (scudo::u16 J = 0; J < B->getCount(); J++)
207       memset(Allocator.decompactPtr(ClassId, B->get(J)), 'B', Size);
208     Batches.push_back(B);
209   }
210   while (!Batches.empty()) {
211     TransferBatch *B = Batches.back();
212     Batches.pop_back();
213     B->copyToArray(Blocks);
214     Allocator.pushBlocks(&Cache, ClassId, Blocks, B->getCount());
215     Cache.deallocate(Primary::SizeClassMap::BatchClassId, B);
216   }
217   Cache.destroy(nullptr);
218   Allocator.releaseToOS();
219   scudo::ScopedString Str;
220   Allocator.getStats(&Str);
221   Str.output();
222   EXPECT_EQ(AllocationFailed, true);
223   Allocator.unmapTestOnly();
224 }
225 
SCUDO_TYPED_TEST(ScudoPrimaryTest,PrimaryIterate)226 SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
227   using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
228   std::unique_ptr<Primary> Allocator(new Primary);
229   Allocator->init(/*ReleaseToOsInterval=*/-1);
230   typename Primary::CacheT Cache;
231   Cache.init(nullptr, Allocator.get());
232   std::vector<std::pair<scudo::uptr, void *>> V;
233   for (scudo::uptr I = 0; I < 64U; I++) {
234     const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize;
235     const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
236     void *P = Cache.allocate(ClassId);
237     V.push_back(std::make_pair(ClassId, P));
238   }
239   scudo::uptr Found = 0;
240   auto Lambda = [&V, &Found](scudo::uptr Block) {
241     for (const auto &Pair : V) {
242       if (Pair.second == reinterpret_cast<void *>(Block))
243         Found++;
244     }
245   };
246   Allocator->disable();
247   Allocator->iterateOverBlocks(Lambda);
248   Allocator->enable();
249   EXPECT_EQ(Found, V.size());
250   while (!V.empty()) {
251     auto Pair = V.back();
252     Cache.deallocate(Pair.first, Pair.second);
253     V.pop_back();
254   }
255   Cache.destroy(nullptr);
256   Allocator->releaseToOS();
257   scudo::ScopedString Str;
258   Allocator->getStats(&Str);
259   Str.output();
260 }
261 
SCUDO_TYPED_TEST(ScudoPrimaryTest,PrimaryThreaded)262 SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
263   using Primary = TestAllocator<TypeParam, scudo::SvelteSizeClassMap>;
264   std::unique_ptr<Primary> Allocator(new Primary);
265   Allocator->init(/*ReleaseToOsInterval=*/-1);
266   std::mutex Mutex;
267   std::condition_variable Cv;
268   bool Ready = false;
269   std::thread Threads[32];
270   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
271     Threads[I] = std::thread([&]() {
272       static thread_local typename Primary::CacheT Cache;
273       Cache.init(nullptr, Allocator.get());
274       std::vector<std::pair<scudo::uptr, void *>> V;
275       {
276         std::unique_lock<std::mutex> Lock(Mutex);
277         while (!Ready)
278           Cv.wait(Lock);
279       }
280       for (scudo::uptr I = 0; I < 256U; I++) {
281         const scudo::uptr Size =
282             std::rand() % Primary::SizeClassMap::MaxSize / 4;
283         const scudo::uptr ClassId =
284             Primary::SizeClassMap::getClassIdBySize(Size);
285         void *P = Cache.allocate(ClassId);
286         if (P)
287           V.push_back(std::make_pair(ClassId, P));
288       }
289       while (!V.empty()) {
290         auto Pair = V.back();
291         Cache.deallocate(Pair.first, Pair.second);
292         V.pop_back();
293       }
294       Cache.destroy(nullptr);
295     });
296   {
297     std::unique_lock<std::mutex> Lock(Mutex);
298     Ready = true;
299     Cv.notify_all();
300   }
301   for (auto &T : Threads)
302     T.join();
303   Allocator->releaseToOS();
304   scudo::ScopedString Str;
305   Allocator->getStats(&Str);
306   Str.output();
307 }
308 
309 // Through a simple allocation that spans two pages, verify that releaseToOS
310 // actually releases some bytes (at least one page worth). This is a regression
311 // test for an error in how the release criteria were computed.
SCUDO_TYPED_TEST(ScudoPrimaryTest,ReleaseToOS)312 SCUDO_TYPED_TEST(ScudoPrimaryTest, ReleaseToOS) {
313   using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
314   std::unique_ptr<Primary> Allocator(new Primary);
315   Allocator->init(/*ReleaseToOsInterval=*/-1);
316   typename Primary::CacheT Cache;
317   Cache.init(nullptr, Allocator.get());
318   const scudo::uptr Size = scudo::getPageSizeCached() * 2;
319   EXPECT_TRUE(Primary::canAllocate(Size));
320   const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
321   void *P = Cache.allocate(ClassId);
322   EXPECT_NE(P, nullptr);
323   Cache.deallocate(ClassId, P);
324   Cache.destroy(nullptr);
325   EXPECT_GT(Allocator->releaseToOS(), 0U);
326 }
327 
SCUDO_TYPED_TEST(ScudoPrimaryTest,MemoryGroup)328 SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {
329   using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
330   std::unique_ptr<Primary> Allocator(new Primary);
331   Allocator->init(/*ReleaseToOsInterval=*/-1);
332   typename Primary::CacheT Cache;
333   Cache.init(nullptr, Allocator.get());
334   const scudo::uptr Size = 32U;
335   const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
336 
337   // We will allocate 4 times the group size memory and release all of them. We
338   // expect the free blocks will be classified with groups. Then we will
339   // allocate the same amount of memory as group size and expect the blocks will
340   // have the max address difference smaller or equal to 2 times the group size.
341   // Note that it isn't necessary to be in the range of single group size
342   // because the way we get the group id is doing compact pointer shifting.
343   // According to configuration, the compact pointer may not align to group
344   // size. As a result, the blocks can cross two groups at most.
345   const scudo::uptr GroupSizeMem = (1ULL << Primary::GroupSizeLog);
346   const scudo::uptr PeakAllocationMem = 4 * GroupSizeMem;
347   const scudo::uptr PeakNumberOfAllocations = PeakAllocationMem / Size;
348   const scudo::uptr FinalNumberOfAllocations = GroupSizeMem / Size;
349   std::vector<scudo::uptr> Blocks;
350   std::mt19937 R;
351 
352   for (scudo::uptr I = 0; I < PeakNumberOfAllocations; ++I)
353     Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
354 
355   std::shuffle(Blocks.begin(), Blocks.end(), R);
356 
357   // Release all the allocated blocks, including those held by local cache.
358   while (!Blocks.empty()) {
359     Cache.deallocate(ClassId, reinterpret_cast<void *>(Blocks.back()));
360     Blocks.pop_back();
361   }
362   Cache.drain();
363 
364   for (scudo::uptr I = 0; I < FinalNumberOfAllocations; ++I)
365     Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
366 
367   EXPECT_LE(*std::max_element(Blocks.begin(), Blocks.end()) -
368                 *std::min_element(Blocks.begin(), Blocks.end()),
369             GroupSizeMem * 2);
370 }
371