1 /*
2  * Copyright (c) Facebook, Inc. and its affiliates.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <folly/IndexedMemPool.h>
18 
19 #include <string>
20 #include <thread>
21 
22 #include <folly/portability/GMock.h>
23 #include <folly/portability/GTest.h>
24 #include <folly/portability/Unistd.h>
25 #include <folly/test/DeterministicSchedule.h>
26 
27 using namespace folly;
28 using namespace folly::test;
29 using namespace testing;
30 
TEST(IndexedMemPool,unique_ptr)31 TEST(IndexedMemPool, unique_ptr) {
32   typedef IndexedMemPool<size_t> Pool;
33   Pool pool(100);
34 
35   for (size_t i = 0; i < 100000; ++i) {
36     auto ptr = pool.allocElem();
37     EXPECT_TRUE(!!ptr);
38     *ptr = i;
39   }
40 
41   std::vector<Pool::UniquePtr> leak;
42   while (true) {
43     auto ptr = pool.allocElem();
44     if (!ptr) {
45       // good, we finally ran out
46       break;
47     }
48     leak.emplace_back(std::move(ptr));
49     EXPECT_LT(leak.size(), 10000u);
50   }
51 }
52 
TEST(IndexedMemPool,no_starvation)53 TEST(IndexedMemPool, no_starvation) {
54   const int count = 1000;
55   const uint32_t poolSize = 100;
56 
57   typedef DeterministicSchedule Sched;
58   Sched sched(Sched::uniform(0));
59 
60   typedef IndexedMemPool<int, 8, 8, DeterministicAtomic> Pool;
61   Pool pool(poolSize);
62 
63   for (auto pass = 0; pass < 10; ++pass) {
64     int fd[2];
65     EXPECT_EQ(pipe(fd), 0);
66 
67     // makes sure we wait for available nodes, rather than fail allocIndex
68     DeterministicSchedule::Sem allocSem(poolSize);
69 
70     // this semaphore is only needed for deterministic replay, so that we
71     // always block in an Sched:: operation rather than in a read() syscall
72     DeterministicSchedule::Sem readSem(0);
73 
74     std::thread produce = Sched::thread([&]() {
75       for (auto i = 0; i < count; ++i) {
76         Sched::wait(&allocSem);
77         uint32_t idx = pool.allocIndex();
78         EXPECT_NE(idx, 0u);
79         EXPECT_LE(
80             idx, poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
81         pool[idx] = i;
82         EXPECT_EQ(write(fd[1], &idx, sizeof(idx)), sizeof(idx));
83         Sched::post(&readSem);
84       }
85     });
86 
87     std::thread consume = Sched::thread([&]() {
88       for (auto i = 0; i < count; ++i) {
89         uint32_t idx;
90         Sched::wait(&readSem);
91         EXPECT_EQ(read(fd[0], &idx, sizeof(idx)), sizeof(idx));
92         EXPECT_NE(idx, 0);
93         EXPECT_GE(idx, 1u);
94         EXPECT_LE(
95             idx, poolSize + (Pool::NumLocalLists - 1) * Pool::LocalListLimit);
96         EXPECT_EQ(pool[idx], i);
97         pool.recycleIndex(idx);
98         Sched::post(&allocSem);
99       }
100     });
101 
102     Sched::join(produce);
103     Sched::join(consume);
104     close(fd[0]);
105     close(fd[1]);
106   }
107 }
108 
TEST(IndexedMemPool,st_capacity)109 TEST(IndexedMemPool, st_capacity) {
110   // only one local list => capacity is exact
111   typedef IndexedMemPool<int, 1, 32> Pool;
112   Pool pool(10);
113 
114   EXPECT_EQ(pool.capacity(), 10u);
115   EXPECT_EQ(Pool::maxIndexForCapacity(10), 10u);
116   for (auto i = 0; i < 10; ++i) {
117     EXPECT_NE(pool.allocIndex(), 0u);
118   }
119   EXPECT_EQ(pool.allocIndex(), 0u);
120 }
121 
TEST(IndexedMemPool,mt_capacity)122 TEST(IndexedMemPool, mt_capacity) {
123   typedef IndexedMemPool<int, 16, 32> Pool;
124   Pool pool(1000);
125 
126   std::thread threads[10];
127   for (auto i = 0; i < 10; ++i) {
128     threads[i] = std::thread([&]() {
129       for (auto j = 0; j < 100; ++j) {
130         uint32_t idx = pool.allocIndex();
131         EXPECT_NE(idx, 0u);
132       }
133     });
134   }
135 
136   for (auto i = 0; i < 10; ++i) {
137     threads[i].join();
138   }
139 
140   for (auto i = 0; i < 16 * 32; ++i) {
141     pool.allocIndex();
142   }
143   EXPECT_EQ(pool.allocIndex(), 0u);
144 }
145 
TEST(IndexedMemPool,locate_elem)146 TEST(IndexedMemPool, locate_elem) {
147   IndexedMemPool<int> pool(1000);
148 
149   for (auto i = 0; i < 1000; ++i) {
150     auto idx = pool.allocIndex();
151     EXPECT_FALSE(idx == 0);
152     int* elem = &pool[idx];
153     EXPECT_TRUE(idx == pool.locateElem(elem));
154   }
155 
156   EXPECT_EQ(pool.locateElem(nullptr), 0);
157 }
158 
159 struct NonTrivialStruct {
160   static thread_local size_t count;
161 
162   size_t elem_;
163 
NonTrivialStructNonTrivialStruct164   NonTrivialStruct() {
165     elem_ = 0;
166     ++count;
167   }
168 
NonTrivialStructNonTrivialStruct169   NonTrivialStruct(std::unique_ptr<std::string>&& arg1, size_t arg2) {
170     elem_ = arg1->length() + arg2;
171     ++count;
172   }
173 
~NonTrivialStructNonTrivialStruct174   ~NonTrivialStruct() { --count; }
175 };
176 
177 thread_local size_t NonTrivialStruct::count;
178 
TEST(IndexedMemPool,eager_recycle)179 TEST(IndexedMemPool, eager_recycle) {
180   typedef IndexedMemPool<NonTrivialStruct> Pool;
181   Pool pool(100);
182 
183   EXPECT_EQ(NonTrivialStruct::count, 0);
184 
185   for (size_t i = 0; i < 10; ++i) {
186     {
187       std::unique_ptr<std::string> arg{new std::string{"abc"}};
188       auto ptr = pool.allocElem(std::move(arg), 100);
189       EXPECT_EQ(NonTrivialStruct::count, 1);
190       EXPECT_EQ(ptr->elem_, 103);
191       EXPECT_TRUE(!!ptr);
192     }
193     EXPECT_EQ(NonTrivialStruct::count, 0);
194   }
195 }
196 
TEST(IndexedMemPool,late_recycle)197 TEST(IndexedMemPool, late_recycle) {
198   {
199     using Pool = IndexedMemPool<
200         NonTrivialStruct,
201         8,
202         8,
203         std::atomic,
204         IndexedMemPoolTraitsLazyRecycle<NonTrivialStruct>>;
205     Pool pool(100);
206 
207     EXPECT_EQ(NonTrivialStruct::count, 0);
208 
209     for (size_t i = 0; i < 10; ++i) {
210       {
211         auto ptr = pool.allocElem();
212         EXPECT_TRUE(NonTrivialStruct::count > 0);
213         EXPECT_TRUE(!!ptr);
214         ptr->elem_ = i;
215       }
216       EXPECT_TRUE(NonTrivialStruct::count > 0);
217     }
218   }
219   EXPECT_EQ(NonTrivialStruct::count, 0);
220 }
221 
TEST(IndexedMemPool,no_data_races)222 TEST(IndexedMemPool, no_data_races) {
223   const int count = 1000;
224   const uint32_t poolSize = 100;
225   const int nthreads = 10;
226 
227   using Pool = IndexedMemPool<int, 8, 8>;
228   Pool pool(poolSize);
229 
230   std::vector<std::thread> thr(nthreads);
231   for (auto i = 0; i < nthreads; ++i) {
232     thr[i] = std::thread([&]() {
233       for (auto j = 0; j < count; ++j) {
234         uint32_t idx = pool.allocIndex();
235         EXPECT_NE(idx, 0u);
236         EXPECT_LE(
237             idx, poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
238         pool[idx] = j;
239         pool.recycleIndex(idx);
240       }
241     });
242   }
243   for (auto& t : thr) {
244     t.join();
245   }
246 }
247 
248 std::atomic<int> cnum{0};
249 std::atomic<int> dnum{0};
250 
TEST(IndexedMemPool,construction_destruction)251 TEST(IndexedMemPool, construction_destruction) {
252   struct Foo {
253     Foo() { cnum.fetch_add(1); }
254     ~Foo() { dnum.fetch_add(1); }
255   };
256 
257   std::atomic<bool> start{false};
258   std::atomic<int> started{0};
259 
260   using Pool = IndexedMemPool<
261       Foo,
262       1,
263       1,
264       std::atomic,
265       IndexedMemPoolTraitsLazyRecycle<Foo>>;
266   int nthreads = 20;
267   int count = 1000;
268 
269   {
270     Pool pool(2);
271     std::vector<std::thread> thr(nthreads);
272     for (auto i = 0; i < nthreads; ++i) {
273       thr[i] = std::thread([&]() {
274         started.fetch_add(1);
275         while (!start.load()) {
276           ;
277         }
278         for (auto j = 0; j < count; ++j) {
279           uint32_t idx = pool.allocIndex();
280           if (idx != 0) {
281             pool.recycleIndex(idx);
282           }
283         }
284       });
285     }
286 
287     while (started.load() < nthreads) {
288       ;
289     }
290     start.store(true);
291 
292     for (auto& t : thr) {
293       t.join();
294     }
295   }
296 
297   CHECK_EQ(cnum.load(), dnum.load());
298 }
299 
300 /// Global Traits mock. It can't be a regular (non-global) mock because we
301 /// don't have access to the instance.
302 struct MockTraits {
303   static MockTraits* instance;
304 
MockTraitsMockTraits305   MockTraits() { instance = this; }
306 
~MockTraitsMockTraits307   ~MockTraits() { instance = nullptr; }
308 
309   MOCK_METHOD2(onAllocate, void(std::string*, std::string));
310   MOCK_METHOD1(onRecycle, void(std::string*));
311 
312   struct Forwarder {
initializeMockTraits::Forwarder313     static void initialize(std::string* ptr) { new (ptr) std::string(); }
314 
cleanupMockTraits::Forwarder315     static void cleanup(std::string* ptr) {
316       using std::string;
317       ptr->~string();
318     }
319 
onAllocateMockTraits::Forwarder320     static void onAllocate(std::string* ptr, std::string s) {
321       instance->onAllocate(ptr, s);
322     }
323 
onRecycleMockTraits::Forwarder324     static void onRecycle(std::string* ptr) { instance->onRecycle(ptr); }
325   };
326 };
327 
328 MockTraits* MockTraits::instance;
329 
330 using TraitsTestPool =
331     IndexedMemPool<std::string, 1, 1, std::atomic, MockTraits::Forwarder>;
332 
testTraits(TraitsTestPool & pool)333 void testTraits(TraitsTestPool& pool) {
334   MockTraits traits;
335   const std::string* elem = nullptr;
336   EXPECT_CALL(traits, onAllocate(_, _))
337       .WillOnce(Invoke([&](std::string* s, auto) {
338         EXPECT_FALSE(pool.isAllocated(pool.locateElem(s)));
339         elem = s;
340       }));
341   std::string* ptr = pool.allocElem("foo").release();
342   EXPECT_EQ(ptr, elem);
343 
344   elem = nullptr;
345   EXPECT_CALL(traits, onRecycle(_)).WillOnce(Invoke([&](std::string* s) {
346     EXPECT_FALSE(pool.isAllocated(pool.locateElem(s)));
347     elem = s;
348   }));
349   pool.recycleIndex(pool.locateElem(ptr));
350   EXPECT_EQ(ptr, elem);
351 }
352 
353 // Test that Traits is used when both local and global lists are empty.
TEST(IndexedMemPool,use_traits_empty)354 TEST(IndexedMemPool, use_traits_empty) {
355   TraitsTestPool pool(10);
356   testTraits(pool);
357 }
358 
359 // Test that Traits is used when allocating from a local list.
TEST(IndexedMemPool,use_traits_local_list)360 TEST(IndexedMemPool, use_traits_local_list) {
361   TraitsTestPool pool(10);
362   MockTraits traits;
363   EXPECT_CALL(traits, onAllocate(_, _));
364   // Allocate and immediately recycle an element to populate the local list.
365   pool.allocElem("");
366   testTraits(pool);
367 }
368 
369 // Test that Traits is used when allocating from a global list.
TEST(IndexedMemPool,use_traits_global_list)370 TEST(IndexedMemPool, use_traits_global_list) {
371   TraitsTestPool pool(10);
372   MockTraits traits;
373   EXPECT_CALL(traits, onAllocate(_, _)).Times(2);
374   auto global = pool.allocElem("");
375   // Allocate and immediately recycle an element to fill the local list.
376   pool.allocElem("");
377   // Recycle an element to populate the global list.
378   global.reset();
379   testTraits(pool);
380 }
381 
382 // Test that IndexedMemPool works with incomplete element types.
383 struct IncompleteTestElement;
384 using IncompleteTestPool = IndexedMemPool<IncompleteTestElement>;
385