1 // Copyright 2020 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/partition_allocator/thread_cache.h"
6 
7 #include <atomic>
8 #include <vector>
9 
10 #include "base/allocator/buildflags.h"
11 #include "base/allocator/partition_allocator/partition_alloc.h"
12 #include "base/bind.h"
13 #include "base/callback.h"
14 #include "base/synchronization/lock.h"
15 #include "base/test/bind.h"
16 #include "base/threading/platform_thread.h"
17 #include "build/build_config.h"
18 #include "testing/gtest/include/gtest/gtest.h"
19 
20 // Only a single partition can have a thread cache at a time. When
21 // PartitionAlloc is malloc(), it is already in use.
22 //
23 // With *SAN, PartitionAlloc is replaced in partition_alloc.h by ASAN, so we
24 // cannot test the thread cache.
25 //
26 // Finally, the thread cache currently uses `thread_local`, which causes issues
27 // on Windows 7 (at least). As long as it doesn't use something else on Windows,
28 // disable the cache (and tests)
29 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
30     !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&  \
31     defined(PA_THREAD_CACHE_SUPPORTED)
32 
33 namespace base {
34 namespace internal {
35 
36 namespace {
37 
38 class LambdaThreadDelegate : public PlatformThread::Delegate {
39  public:
LambdaThreadDelegate(OnceClosure f)40   explicit LambdaThreadDelegate(OnceClosure f) : f_(std::move(f)) {}
ThreadMain()41   void ThreadMain() override { std::move(f_).Run(); }
42 
43  private:
44   OnceClosure f_;
45 };
46 
47 class DeltaCounter {
48  public:
DeltaCounter(uint64_t & value)49   explicit DeltaCounter(uint64_t& value)
50       : current_value_(value), initial_value_(value) {}
Reset()51   void Reset() { initial_value_ = current_value_; }
Delta() const52   uint64_t Delta() const { return current_value_ - initial_value_; }
53 
54  private:
55   uint64_t& current_value_;
56   uint64_t initial_value_;
57 };
58 
59 // Need to be a global object without a destructor, because the cache is a
60 // global object with a destructor (to handle thread destruction), and the
61 // PartitionRoot has to outlive it.
62 //
63 // Forbid extras, since they make finding out which bucket is used harder.
64 NoDestructor<ThreadSafePartitionRoot> g_root{
65     PartitionOptions{PartitionOptions::Alignment::kAlignedAlloc,
66                      PartitionOptions::ThreadCache::kEnabled}};
67 
FillThreadCacheAndReturnIndex(size_t size,size_t count=1)68 size_t FillThreadCacheAndReturnIndex(size_t size, size_t count = 1) {
69   uint16_t bucket_index = PartitionRoot<ThreadSafe>::SizeToBucketIndex(size);
70   std::vector<void*> allocated_data;
71 
72   for (size_t i = 0; i < count; ++i) {
73     allocated_data.push_back(g_root->Alloc(size, ""));
74   }
75   for (void* ptr : allocated_data) {
76     g_root->Free(ptr);
77   }
78 
79   return bucket_index;
80 }
81 
82 }  // namespace
83 
84 class ThreadCacheTest : public ::testing::Test {
85  protected:
SetUp()86   void SetUp() override {
87     // Make sure there is a thread cache.
88     void* data = g_root->Alloc(1, "");
89     g_root->Free(data);
90 
91     auto* tcache = g_root->thread_cache_for_testing();
92     ASSERT_TRUE(tcache);
93     tcache->Purge();
94   }
TearDown()95   void TearDown() override {}
96 };
97 
TEST_F(ThreadCacheTest,Simple)98 TEST_F(ThreadCacheTest, Simple) {
99   const size_t kTestSize = 12;
100   void* ptr = g_root->Alloc(kTestSize, "");
101   ASSERT_TRUE(ptr);
102 
103   // There is a cache.
104   auto* tcache = g_root->thread_cache_for_testing();
105   EXPECT_TRUE(tcache);
106 
107   uint16_t index = PartitionRoot<ThreadSafe>::SizeToBucketIndex(kTestSize);
108   EXPECT_EQ(0u, tcache->bucket_count_for_testing(index));
109 
110   g_root->Free(ptr);
111   // Freeing fills the thread cache.
112   EXPECT_EQ(1u, tcache->bucket_count_for_testing(index));
113 
114   void* ptr2 = g_root->Alloc(kTestSize, "");
115   EXPECT_EQ(ptr, ptr2);
116   // Allocated from the thread cache.
117   EXPECT_EQ(0u, tcache->bucket_count_for_testing(index));
118 }
119 
TEST_F(ThreadCacheTest,InexactSizeMatch)120 TEST_F(ThreadCacheTest, InexactSizeMatch) {
121   const size_t kTestSize = 12;
122   void* ptr = g_root->Alloc(kTestSize, "");
123   ASSERT_TRUE(ptr);
124 
125   // There is a cache.
126   auto* tcache = g_root->thread_cache_for_testing();
127   EXPECT_TRUE(tcache);
128 
129   uint16_t index = PartitionRoot<ThreadSafe>::SizeToBucketIndex(kTestSize);
130   EXPECT_EQ(0u, tcache->bucket_count_for_testing(index));
131 
132   g_root->Free(ptr);
133   // Freeing fills the thread cache.
134   EXPECT_EQ(1u, tcache->bucket_count_for_testing(index));
135 
136   void* ptr2 = g_root->Alloc(kTestSize + 1, "");
137   EXPECT_EQ(ptr, ptr2);
138   // Allocated from the thread cache.
139   EXPECT_EQ(0u, tcache->bucket_count_for_testing(index));
140 }
141 
TEST_F(ThreadCacheTest,MultipleObjectsCachedPerBucket)142 TEST_F(ThreadCacheTest, MultipleObjectsCachedPerBucket) {
143   size_t bucket_index = FillThreadCacheAndReturnIndex(100, 10);
144   auto* tcache = g_root->thread_cache_for_testing();
145   EXPECT_EQ(10u, tcache->bucket_count_for_testing(bucket_index));
146 }
147 
TEST_F(ThreadCacheTest,ObjectsCachedCountIsLimited)148 TEST_F(ThreadCacheTest, ObjectsCachedCountIsLimited) {
149   size_t bucket_index = FillThreadCacheAndReturnIndex(100, 1000);
150   auto* tcache = g_root->thread_cache_for_testing();
151   EXPECT_LT(tcache->bucket_count_for_testing(bucket_index), 1000u);
152 }
153 
TEST_F(ThreadCacheTest,Purge)154 TEST_F(ThreadCacheTest, Purge) {
155   size_t bucket_index = FillThreadCacheAndReturnIndex(100, 10);
156   auto* tcache = g_root->thread_cache_for_testing();
157   EXPECT_EQ(10u, tcache->bucket_count_for_testing(bucket_index));
158   tcache->Purge();
159   EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
160 }
161 
TEST_F(ThreadCacheTest,NoCrossPartitionCache)162 TEST_F(ThreadCacheTest, NoCrossPartitionCache) {
163   const size_t kTestSize = 12;
164   ThreadSafePartitionRoot root{{PartitionOptions::Alignment::kAlignedAlloc,
165                                 PartitionOptions::ThreadCache::kDisabled}};
166 
167   size_t bucket_index = FillThreadCacheAndReturnIndex(kTestSize);
168   void* ptr = root.Alloc(kTestSize, "");
169   ASSERT_TRUE(ptr);
170 
171   auto* tcache = g_root->thread_cache_for_testing();
172   EXPECT_EQ(1u, tcache->bucket_count_for_testing(bucket_index));
173 
174   ThreadSafePartitionRoot::Free(ptr);
175   EXPECT_EQ(1u, tcache->bucket_count_for_testing(bucket_index));
176 }
177 
178 #if defined(PA_ENABLE_THREAD_CACHE_STATISTICS)  // Required to record hits and
179                                                 // misses.
TEST_F(ThreadCacheTest,LargeAllocationsAreNotCached)180 TEST_F(ThreadCacheTest, LargeAllocationsAreNotCached) {
181   auto* tcache = g_root->thread_cache_for_testing();
182   DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
183   DeltaCounter alloc_miss_too_large_counter{
184       tcache->stats_.alloc_miss_too_large};
185   DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
186   DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
187   DeltaCounter cache_fill_too_large_counter{
188       tcache->stats_.cache_fill_too_large};
189 
190   FillThreadCacheAndReturnIndex(100 * 1024);
191   tcache = g_root->thread_cache_for_testing();
192   EXPECT_EQ(1u, alloc_miss_counter.Delta());
193   EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
194   EXPECT_EQ(1u, cache_fill_counter.Delta());
195   EXPECT_EQ(1u, cache_fill_misses_counter.Delta());
196   EXPECT_EQ(1u, cache_fill_too_large_counter.Delta());
197 }
198 #endif  // defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
199 
TEST_F(ThreadCacheTest,DirectMappedAllocationsAreNotCached)200 TEST_F(ThreadCacheTest, DirectMappedAllocationsAreNotCached) {
201   FillThreadCacheAndReturnIndex(1024 * 1024);
202   // The line above would crash due to out of bounds access if this wasn't
203   // properly handled.
204 }
205 
TEST_F(ThreadCacheTest,MultipleThreadCaches)206 TEST_F(ThreadCacheTest, MultipleThreadCaches) {
207   const size_t kTestSize = 100;
208   FillThreadCacheAndReturnIndex(kTestSize);
209   auto* parent_thread_tcache = g_root->thread_cache_for_testing();
210   ASSERT_TRUE(parent_thread_tcache);
211 
212   LambdaThreadDelegate delegate{BindLambdaForTesting([&]() {
213     EXPECT_FALSE(g_root->thread_cache_for_testing());  // No allocations yet.
214     FillThreadCacheAndReturnIndex(kTestSize);
215     auto* tcache = g_root->thread_cache_for_testing();
216     EXPECT_TRUE(tcache);
217 
218     EXPECT_NE(parent_thread_tcache, tcache);
219   })};
220 
221   PlatformThreadHandle thread_handle;
222   PlatformThread::Create(0, &delegate, &thread_handle);
223   PlatformThread::Join(thread_handle);
224 }
225 
TEST_F(ThreadCacheTest,ThreadCacheReclaimedWhenThreadExits)226 TEST_F(ThreadCacheTest, ThreadCacheReclaimedWhenThreadExits) {
227   const size_t kTestSize = 100;
228   // Make sure that there is always at least one object allocated in the test
229   // bucket, so that the PartitionPage is no reclaimed.
230   void* tmp = g_root->Alloc(kTestSize, "");
231   void* other_thread_ptr;
232 
233   LambdaThreadDelegate delegate{BindLambdaForTesting([&]() {
234     EXPECT_FALSE(g_root->thread_cache_for_testing());  // No allocations yet.
235     other_thread_ptr = g_root->Alloc(kTestSize, "");
236     g_root->Free(other_thread_ptr);
237     // |other_thread_ptr| is now in the thread cache.
238   })};
239 
240   PlatformThreadHandle thread_handle;
241   PlatformThread::Create(0, &delegate, &thread_handle);
242   PlatformThread::Join(thread_handle);
243 
244   void* this_thread_ptr = g_root->Alloc(kTestSize, "");
245   // |other_thread_ptr| was returned to the central allocator, and is returned
246   // |here, as is comes from the freelist.
247   EXPECT_EQ(this_thread_ptr, other_thread_ptr);
248   g_root->Free(other_thread_ptr);
249   g_root->Free(tmp);
250 }
251 
TEST_F(ThreadCacheTest,ThreadCacheRegistry)252 TEST_F(ThreadCacheTest, ThreadCacheRegistry) {
253   const size_t kTestSize = 100;
254   auto* parent_thread_tcache = g_root->thread_cache_for_testing();
255   ASSERT_TRUE(parent_thread_tcache);
256 
257   LambdaThreadDelegate delegate{BindLambdaForTesting([&]() {
258     EXPECT_FALSE(g_root->thread_cache_for_testing());  // No allocations yet.
259     FillThreadCacheAndReturnIndex(kTestSize);
260     auto* tcache = g_root->thread_cache_for_testing();
261     EXPECT_TRUE(tcache);
262 
263     PartitionAutoLock lock(ThreadCacheRegistry::GetLock());
264     EXPECT_EQ(tcache->prev_, nullptr);
265     EXPECT_EQ(tcache->next_, parent_thread_tcache);
266   })};
267 
268   PlatformThreadHandle thread_handle;
269   PlatformThread::Create(0, &delegate, &thread_handle);
270   PlatformThread::Join(thread_handle);
271 
272   PartitionAutoLock lock(ThreadCacheRegistry::GetLock());
273   EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
274   EXPECT_EQ(parent_thread_tcache->next_, nullptr);
275 }
276 
277 #if defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
TEST_F(ThreadCacheTest,RecordStats)278 TEST_F(ThreadCacheTest, RecordStats) {
279   const size_t kTestSize = 100;
280   auto* tcache = g_root->thread_cache_for_testing();
281   DeltaCounter alloc_counter{tcache->stats_.alloc_count};
282   DeltaCounter alloc_hits_counter{tcache->stats_.alloc_hits};
283   DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
284 
285   DeltaCounter alloc_miss_empty_counter{tcache->stats_.alloc_miss_empty};
286 
287   DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
288   DeltaCounter cache_fill_hits_counter{tcache->stats_.cache_fill_hits};
289   DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
290   DeltaCounter cache_fill_bucket_full_counter{
291       tcache->stats_.cache_fill_bucket_full};
292 
293   // Cache has been purged, first allocation is a miss.
294   void* data = g_root->Alloc(kTestSize, "");
295   EXPECT_EQ(1u, alloc_counter.Delta());
296   EXPECT_EQ(1u, alloc_miss_counter.Delta());
297   EXPECT_EQ(0u, alloc_hits_counter.Delta());
298 
299   // Cache fill worked.
300   g_root->Free(data);
301   EXPECT_EQ(1u, cache_fill_counter.Delta());
302   EXPECT_EQ(1u, cache_fill_hits_counter.Delta());
303   EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
304 
305   tcache->Purge();
306   cache_fill_counter.Reset();
307   // Bucket full accounting.
308   size_t bucket_index = FillThreadCacheAndReturnIndex(
309       kTestSize, ThreadCache::kMaxCountPerBucket + 10);
310   EXPECT_EQ(ThreadCache::kMaxCountPerBucket + 10, cache_fill_counter.Delta());
311   EXPECT_EQ(10u, cache_fill_bucket_full_counter.Delta());
312   EXPECT_EQ(10u, cache_fill_misses_counter.Delta());
313 
314   // Memory footprint.
315   ThreadCacheStats stats;
316   ThreadCacheRegistry::Instance().DumpStats(true, &stats);
317   EXPECT_EQ(
318       g_root->buckets[bucket_index].slot_size * ThreadCache::kMaxCountPerBucket,
319       stats.bucket_total_memory);
320   EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
321 }
322 
TEST_F(ThreadCacheTest,MultipleThreadCachesAccounting)323 TEST_F(ThreadCacheTest, MultipleThreadCachesAccounting) {
324   const size_t kTestSize = 100;
325   void* data = g_root->Alloc(kTestSize, "");
326   g_root->Free(data);
327   uint64_t alloc_count = g_root->thread_cache_for_testing()->stats_.alloc_count;
328 
329   LambdaThreadDelegate delegate{BindLambdaForTesting([&]() {
330     EXPECT_FALSE(g_root->thread_cache_for_testing());  // No allocations yet.
331     size_t bucket_index = FillThreadCacheAndReturnIndex(kTestSize);
332 
333     ThreadCacheStats stats;
334     ThreadCacheRegistry::Instance().DumpStats(false, &stats);
335     // 2* for this thread and the parent one.
336     EXPECT_EQ(2 * g_root->buckets[bucket_index].slot_size,
337               stats.bucket_total_memory);
338     EXPECT_EQ(2 * sizeof(ThreadCache), stats.metadata_overhead);
339 
340     uint64_t this_thread_alloc_count =
341         g_root->thread_cache_for_testing()->stats_.alloc_count;
342     EXPECT_EQ(alloc_count + this_thread_alloc_count, stats.alloc_count);
343   })};
344 
345   PlatformThreadHandle thread_handle;
346   PlatformThread::Create(0, &delegate, &thread_handle);
347   PlatformThread::Join(thread_handle);
348 }
349 
350 #endif  // defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
351 
TEST_F(ThreadCacheTest,PurgeAll)352 TEST_F(ThreadCacheTest, PurgeAll) NO_THREAD_SAFETY_ANALYSIS {
353   std::atomic<bool> other_thread_started{false};
354   std::atomic<bool> purge_called{false};
355 
356   const size_t kTestSize = 100;
357   size_t bucket_index = FillThreadCacheAndReturnIndex(kTestSize);
358   ThreadCache* this_thread_tcache = g_root->thread_cache_for_testing();
359   ThreadCache* other_thread_tcache = nullptr;
360 
361   LambdaThreadDelegate delegate{
362       BindLambdaForTesting([&]() NO_THREAD_SAFETY_ANALYSIS {
363         FillThreadCacheAndReturnIndex(kTestSize);
364         other_thread_tcache = g_root->thread_cache_for_testing();
365 
366         other_thread_started.store(true, std::memory_order_release);
367         while (!purge_called.load(std::memory_order_acquire)) {
368         }
369 
370         // Purge() was not triggered from the other thread.
371         EXPECT_EQ(1u,
372                   other_thread_tcache->bucket_count_for_testing(bucket_index));
373         // Allocations do not trigger Purge().
374         void* data = g_root->Alloc(1, "");
375         EXPECT_EQ(1u,
376                   other_thread_tcache->bucket_count_for_testing(bucket_index));
377         // But deallocations do.
378         g_root->Free(data);
379         EXPECT_EQ(0u,
380                   other_thread_tcache->bucket_count_for_testing(bucket_index));
381       })};
382 
383   PlatformThreadHandle thread_handle;
384   PlatformThread::Create(0, &delegate, &thread_handle);
385 
386   while (!other_thread_started.load(std::memory_order_acquire)) {
387   }
388 
389   EXPECT_EQ(1u, this_thread_tcache->bucket_count_for_testing(bucket_index));
390   EXPECT_EQ(1u, other_thread_tcache->bucket_count_for_testing(bucket_index));
391 
392   ThreadCacheRegistry::Instance().PurgeAll();
393   // This thread is synchronously purged.
394   EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
395   // Not the other one.
396   EXPECT_EQ(1u, other_thread_tcache->bucket_count_for_testing(bucket_index));
397 
398   purge_called.store(true, std::memory_order_release);
399   PlatformThread::Join(thread_handle);
400 }
401 
402 }  // namespace internal
403 }  // namespace base
404 
405 #endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
406         // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
407         // defined(PA_THREAD_CACHE_SUPPORTED)
408