1 //===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "tests/scudo_unit_test.h"
10 
11 #include "tsd_exclusive.h"
12 #include "tsd_shared.h"
13 
14 #include <condition_variable>
15 #include <mutex>
16 #include <thread>
17 
18 // We mock out an allocator with a TSD registry, mostly using empty stubs. The
19 // cache contains a single volatile uptr, to be able to test that several
20 // concurrent threads will not access or modify the same cache at the same time.
21 template <class Config> class MockAllocator {
22 public:
23   using ThisT = MockAllocator<Config>;
24   using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>;
25   using CacheT = struct MockCache { volatile scudo::uptr Canary; };
26   using QuarantineCacheT = struct MockQuarantine {};
27 
28   void initLinkerInitialized() {
29     // This should only be called once by the registry.
30     EXPECT_FALSE(Initialized);
31     Initialized = true;
32   }
33   void reset() { memset(this, 0, sizeof(*this)); }
34 
35   void unmapTestOnly() { TSDRegistry.unmapTestOnly(); }
36   void initCache(CacheT *Cache) { memset(Cache, 0, sizeof(*Cache)); }
37   void commitBack(scudo::TSD<MockAllocator> *TSD) {}
38   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
39   void callPostInitCallback() {}
40 
41   bool isInitialized() { return Initialized; }
42 
43 private:
44   bool Initialized;
45   TSDRegistryT TSDRegistry;
46 };
47 
48 struct OneCache {
49   template <class Allocator>
50   using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U>;
51 };
52 
53 struct SharedCaches {
54   template <class Allocator>
55   using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U>;
56 };
57 
58 struct ExclusiveCaches {
59   template <class Allocator>
60   using TSDRegistryT = scudo::TSDRegistryExT<Allocator>;
61 };
62 
63 TEST(ScudoTSDTest, TSDRegistryInit) {
64   using AllocatorT = MockAllocator<OneCache>;
65   auto Deleter = [](AllocatorT *A) {
66     A->unmapTestOnly();
67     delete A;
68   };
69   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
70                                                            Deleter);
71   Allocator->reset();
72   EXPECT_FALSE(Allocator->isInitialized());
73 
74   auto Registry = Allocator->getTSDRegistry();
75   Registry->initLinkerInitialized(Allocator.get());
76   EXPECT_TRUE(Allocator->isInitialized());
77 }
78 
79 template <class AllocatorT> static void testRegistry() {
80   auto Deleter = [](AllocatorT *A) {
81     A->unmapTestOnly();
82     delete A;
83   };
84   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
85                                                            Deleter);
86   Allocator->reset();
87   EXPECT_FALSE(Allocator->isInitialized());
88 
89   auto Registry = Allocator->getTSDRegistry();
90   Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
91   EXPECT_TRUE(Allocator->isInitialized());
92 
93   bool UnlockRequired;
94   auto TSD = Registry->getTSDAndLock(&UnlockRequired);
95   EXPECT_NE(TSD, nullptr);
96   EXPECT_EQ(TSD->Cache.Canary, 0U);
97   if (UnlockRequired)
98     TSD->unlock();
99 
100   Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
101   TSD = Registry->getTSDAndLock(&UnlockRequired);
102   EXPECT_NE(TSD, nullptr);
103   EXPECT_EQ(TSD->Cache.Canary, 0U);
104   memset(&TSD->Cache, 0x42, sizeof(TSD->Cache));
105   if (UnlockRequired)
106     TSD->unlock();
107 }
108 
109 TEST(ScudoTSDTest, TSDRegistryBasic) {
110   testRegistry<MockAllocator<OneCache>>();
111   testRegistry<MockAllocator<SharedCaches>>();
112 #if !SCUDO_FUCHSIA
113   testRegistry<MockAllocator<ExclusiveCaches>>();
114 #endif
115 }
116 
117 static std::mutex Mutex;
118 static std::condition_variable Cv;
119 static bool Ready = false;
120 
121 template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
122   auto Registry = Allocator->getTSDRegistry();
123   {
124     std::unique_lock<std::mutex> Lock(Mutex);
125     while (!Ready)
126       Cv.wait(Lock);
127   }
128   Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
129   bool UnlockRequired;
130   auto TSD = Registry->getTSDAndLock(&UnlockRequired);
131   EXPECT_NE(TSD, nullptr);
132   // For an exclusive TSD, the cache should be empty. We cannot guarantee the
133   // same for a shared TSD.
134   if (!UnlockRequired)
135     EXPECT_EQ(TSD->Cache.Canary, 0U);
136   // Transform the thread id to a uptr to use it as canary.
137   const scudo::uptr Canary = static_cast<scudo::uptr>(
138       std::hash<std::thread::id>{}(std::this_thread::get_id()));
139   TSD->Cache.Canary = Canary;
140   // Loop a few times to make sure that a concurrent thread isn't modifying it.
141   for (scudo::uptr I = 0; I < 4096U; I++)
142     EXPECT_EQ(TSD->Cache.Canary, Canary);
143   if (UnlockRequired)
144     TSD->unlock();
145 }
146 
147 template <class AllocatorT> static void testRegistryThreaded() {
148   auto Deleter = [](AllocatorT *A) {
149     A->unmapTestOnly();
150     delete A;
151   };
152   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
153                                                            Deleter);
154   Allocator->reset();
155   std::thread Threads[32];
156   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
157     Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get());
158   {
159     std::unique_lock<std::mutex> Lock(Mutex);
160     Ready = true;
161     Cv.notify_all();
162   }
163   for (auto &T : Threads)
164     T.join();
165 }
166 
167 TEST(ScudoTSDTest, TSDRegistryThreaded) {
168   testRegistryThreaded<MockAllocator<OneCache>>();
169   testRegistryThreaded<MockAllocator<SharedCaches>>();
170 #if !SCUDO_FUCHSIA
171   testRegistryThreaded<MockAllocator<ExclusiveCaches>>();
172 #endif
173 }
174