1 // Copyright 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "components/gwp_asan/client/guarded_page_allocator.h"
6 
7 #include <algorithm>
8 #include <array>
9 #include <set>
10 #include <utility>
11 #include <vector>
12 
13 #include "base/bits.h"
14 #include "base/process/process_metrics.h"
15 #include "base/test/bind_test_util.h"
16 #include "base/test/gtest_util.h"
17 #include "base/threading/simple_thread.h"
18 #include "build/build_config.h"
19 #include "testing/gtest/include/gtest/gtest.h"
20 
21 namespace gwp_asan {
22 namespace internal {
23 
24 static constexpr size_t kMaxMetadata = AllocatorState::kMaxMetadata;
25 static constexpr size_t kMaxSlots = AllocatorState::kMaxSlots;
26 
27 class BaseGpaTest : public testing::Test {
28  protected:
BaseGpaTest(size_t max_allocated_pages,bool is_partition_alloc)29   BaseGpaTest(size_t max_allocated_pages, bool is_partition_alloc) {
30     gpa_.Init(max_allocated_pages, kMaxMetadata, kMaxSlots,
31               base::BindLambdaForTesting(
32                   [&](size_t allocations) { allocator_oom_ = true; }),
33               is_partition_alloc);
34   }
35 
36   GuardedPageAllocator gpa_;
37   bool allocator_oom_ = false;
38 };
39 
40 class GuardedPageAllocatorTest : public BaseGpaTest,
41                                  public testing::WithParamInterface<bool> {
42  protected:
GuardedPageAllocatorTest()43   GuardedPageAllocatorTest() : BaseGpaTest(kMaxMetadata, GetParam()) {}
44 
45   // Get a left- or right- aligned allocation (or nullptr on error.)
GetAlignedAllocation(bool left_aligned,size_t sz,size_t align=0)46   char* GetAlignedAllocation(bool left_aligned, size_t sz, size_t align = 0) {
47     for (size_t i = 0; i < 100; i++) {
48       void* alloc = gpa_.Allocate(sz, align);
49       if (!alloc)
50         return nullptr;
51 
52       uintptr_t addr = reinterpret_cast<uintptr_t>(alloc);
53       bool is_left_aligned =
54           (base::bits::Align(addr, base::GetPageSize()) == addr);
55       if (is_left_aligned == left_aligned)
56         return reinterpret_cast<char*>(addr);
57 
58       gpa_.Deallocate(alloc);
59     }
60 
61     return nullptr;
62   }
63 
64   // Helper that returns the offset of a right-aligned allocation in the
65   // allocation's page.
GetRightAlignedAllocationOffset(size_t size,size_t align)66   uintptr_t GetRightAlignedAllocationOffset(size_t size, size_t align) {
67     const uintptr_t page_mask = base::GetPageSize() - 1;
68 
69     void* buf = GetAlignedAllocation(false, size, align);
70     CHECK(buf);
71     gpa_.Deallocate(buf);
72 
73     return reinterpret_cast<uintptr_t>(buf) & page_mask;
74   }
75 };
76 
77 INSTANTIATE_TEST_SUITE_P(VaryPartitionAlloc,
78                          GuardedPageAllocatorTest,
79                          testing::Values(false, true));
80 
TEST_P(GuardedPageAllocatorTest,SingleAllocDealloc)81 TEST_P(GuardedPageAllocatorTest, SingleAllocDealloc) {
82   char* buf = reinterpret_cast<char*>(gpa_.Allocate(base::GetPageSize()));
83   EXPECT_NE(buf, nullptr);
84   EXPECT_TRUE(gpa_.PointerIsMine(buf));
85   memset(buf, 'A', base::GetPageSize());
86   EXPECT_DEATH(buf[base::GetPageSize()] = 'A', "");
87   gpa_.Deallocate(buf);
88   EXPECT_DEATH(buf[0] = 'B', "");
89   EXPECT_DEATH(gpa_.Deallocate(buf), "");
90 }
91 
TEST_P(GuardedPageAllocatorTest,CrashOnBadDeallocPointer)92 TEST_P(GuardedPageAllocatorTest, CrashOnBadDeallocPointer) {
93   EXPECT_DEATH(gpa_.Deallocate(nullptr), "");
94   char* buf = reinterpret_cast<char*>(gpa_.Allocate(8));
95   EXPECT_DEATH(gpa_.Deallocate(buf + 1), "");
96   gpa_.Deallocate(buf);
97 }
98 
TEST_P(GuardedPageAllocatorTest,PointerIsMine)99 TEST_P(GuardedPageAllocatorTest, PointerIsMine) {
100   void* buf = gpa_.Allocate(1);
101   auto malloc_ptr = std::make_unique<char>();
102   EXPECT_TRUE(gpa_.PointerIsMine(buf));
103   gpa_.Deallocate(buf);
104   EXPECT_TRUE(gpa_.PointerIsMine(buf));
105   int stack_var;
106   EXPECT_FALSE(gpa_.PointerIsMine(&stack_var));
107   EXPECT_FALSE(gpa_.PointerIsMine(malloc_ptr.get()));
108 }
109 
TEST_P(GuardedPageAllocatorTest,GetRequestedSize)110 TEST_P(GuardedPageAllocatorTest, GetRequestedSize) {
111   void* buf = gpa_.Allocate(100);
112   EXPECT_EQ(gpa_.GetRequestedSize(buf), 100U);
113 #if !defined(OS_MACOSX)
114   EXPECT_DEATH({ gpa_.GetRequestedSize((char*)buf + 1); }, "");
115 #else
116   EXPECT_EQ(gpa_.GetRequestedSize((char*)buf + 1), 0U);
117 #endif
118 }
119 
TEST_P(GuardedPageAllocatorTest,LeftAlignedAllocation)120 TEST_P(GuardedPageAllocatorTest, LeftAlignedAllocation) {
121   char* buf = GetAlignedAllocation(true, 16);
122   ASSERT_NE(buf, nullptr);
123   EXPECT_DEATH(buf[-1] = 'A', "");
124   buf[0] = 'A';
125   buf[base::GetPageSize() - 1] = 'A';
126   gpa_.Deallocate(buf);
127 }
128 
TEST_P(GuardedPageAllocatorTest,RightAlignedAllocation)129 TEST_P(GuardedPageAllocatorTest, RightAlignedAllocation) {
130   char* buf =
131       GetAlignedAllocation(false, GuardedPageAllocator::kGpaAllocAlignment);
132   ASSERT_NE(buf, nullptr);
133   buf[-1] = 'A';
134   buf[0] = 'A';
135   EXPECT_DEATH(buf[GuardedPageAllocator::kGpaAllocAlignment] = 'A', "");
136   gpa_.Deallocate(buf);
137 }
138 
TEST_P(GuardedPageAllocatorTest,AllocationAlignment)139 TEST_P(GuardedPageAllocatorTest, AllocationAlignment) {
140   const uintptr_t page_size = base::GetPageSize();
141 
142   EXPECT_EQ(GetRightAlignedAllocationOffset(9, 1), page_size - 9);
143   EXPECT_EQ(GetRightAlignedAllocationOffset(9, 2), page_size - 10);
144   EXPECT_EQ(GetRightAlignedAllocationOffset(9, 4), page_size - 12);
145   EXPECT_EQ(GetRightAlignedAllocationOffset(9, 8), page_size - 16);
146 
147   EXPECT_EQ(GetRightAlignedAllocationOffset(513, 512), page_size - 1024);
148 
149   // Default alignment aligns up to the next lowest power of two.
150   EXPECT_EQ(GetRightAlignedAllocationOffset(5, 0), page_size - 8);
151   EXPECT_EQ(GetRightAlignedAllocationOffset(9, 0), page_size - 16);
152   // But only up to 16 bytes.
153   EXPECT_EQ(GetRightAlignedAllocationOffset(513, 0), page_size - (512 + 16));
154 
155   // We don't support aligning by more than a page.
156   EXPECT_EQ(GetAlignedAllocation(false, 5, page_size * 2), nullptr);
157 }
158 
TEST_P(GuardedPageAllocatorTest,OutOfMemoryCallback)159 TEST_P(GuardedPageAllocatorTest, OutOfMemoryCallback) {
160   for (size_t i = 0; i < kMaxMetadata; i++)
161     EXPECT_NE(gpa_.Allocate(1), nullptr);
162 
163   for (size_t i = 0; i < GuardedPageAllocator::kOutOfMemoryCount - 1; i++)
164     EXPECT_EQ(gpa_.Allocate(1), nullptr);
165   EXPECT_FALSE(allocator_oom_);
166   EXPECT_EQ(gpa_.Allocate(1), nullptr);
167   EXPECT_TRUE(allocator_oom_);
168 }
169 
170 class GuardedPageAllocatorParamTest
171     : public BaseGpaTest,
172       public testing::WithParamInterface<size_t> {
173  protected:
GuardedPageAllocatorParamTest()174   GuardedPageAllocatorParamTest() : BaseGpaTest(GetParam(), false) {}
175 };
176 
TEST_P(GuardedPageAllocatorParamTest,AllocDeallocAllPages)177 TEST_P(GuardedPageAllocatorParamTest, AllocDeallocAllPages) {
178   size_t num_allocations = GetParam();
179   char* bufs[kMaxMetadata];
180   for (size_t i = 0; i < num_allocations; i++) {
181     bufs[i] = reinterpret_cast<char*>(gpa_.Allocate(1));
182     EXPECT_NE(bufs[i], nullptr);
183     EXPECT_TRUE(gpa_.PointerIsMine(bufs[i]));
184   }
185   EXPECT_EQ(gpa_.Allocate(1), nullptr);
186   gpa_.Deallocate(bufs[0]);
187   bufs[0] = reinterpret_cast<char*>(gpa_.Allocate(1));
188   EXPECT_NE(bufs[0], nullptr);
189   EXPECT_TRUE(gpa_.PointerIsMine(bufs[0]));
190 
191   // Ensure that no allocation is returned twice.
192   std::set<char*> ptr_set;
193   for (size_t i = 0; i < num_allocations; i++)
194     ptr_set.insert(bufs[i]);
195   EXPECT_EQ(ptr_set.size(), num_allocations);
196 
197   for (size_t i = 0; i < num_allocations; i++) {
198     SCOPED_TRACE(i);
199     // Ensure all allocations are valid and writable.
200     bufs[i][0] = 'A';
201     gpa_.Deallocate(bufs[i]);
202     // Performing death tests post-allocation times out on Windows.
203   }
204 }
205 INSTANTIATE_TEST_SUITE_P(VaryNumPages,
206                          GuardedPageAllocatorParamTest,
207                          testing::Values(1, kMaxMetadata / 2, kMaxMetadata));
208 
209 class ThreadedAllocCountDelegate : public base::DelegateSimpleThread::Delegate {
210  public:
ThreadedAllocCountDelegate(GuardedPageAllocator * gpa,std::array<void *,kMaxMetadata> * allocations)211   ThreadedAllocCountDelegate(GuardedPageAllocator* gpa,
212                              std::array<void*, kMaxMetadata>* allocations)
213       : gpa_(gpa), allocations_(allocations) {}
214 
Run()215   void Run() override {
216     for (size_t i = 0; i < kMaxMetadata; i++) {
217       (*allocations_)[i] = gpa_->Allocate(1);
218     }
219   }
220 
221  private:
222   GuardedPageAllocator* gpa_;
223   std::array<void*, kMaxMetadata>* allocations_;
224 
225   DISALLOW_COPY_AND_ASSIGN(ThreadedAllocCountDelegate);
226 };
227 
228 // Test that no pages are double-allocated or left unallocated, and that no
229 // extra pages are allocated when there's concurrent calls to Allocate().
TEST_P(GuardedPageAllocatorTest,ThreadedAllocCount)230 TEST_P(GuardedPageAllocatorTest, ThreadedAllocCount) {
231   constexpr size_t num_threads = 2;
232   std::array<void*, kMaxMetadata> allocations[num_threads];
233   {
234     base::DelegateSimpleThreadPool threads("alloc_threads", num_threads);
235     threads.Start();
236 
237     std::vector<std::unique_ptr<ThreadedAllocCountDelegate>> delegates;
238     for (size_t i = 0; i < num_threads; i++) {
239       auto delegate =
240           std::make_unique<ThreadedAllocCountDelegate>(&gpa_, &allocations[i]);
241       threads.AddWork(delegate.get());
242       delegates.push_back(std::move(delegate));
243     }
244 
245     threads.JoinAll();
246   }
247   std::set<void*> allocations_set;
248   for (size_t i = 0; i < num_threads; i++) {
249     for (size_t j = 0; j < kMaxMetadata; j++) {
250       allocations_set.insert(allocations[i][j]);
251     }
252   }
253   allocations_set.erase(nullptr);
254   EXPECT_EQ(allocations_set.size(), kMaxMetadata);
255 }
256 
257 class ThreadedHighContentionDelegate
258     : public base::DelegateSimpleThread::Delegate {
259  public:
ThreadedHighContentionDelegate(GuardedPageAllocator * gpa)260   explicit ThreadedHighContentionDelegate(GuardedPageAllocator* gpa)
261       : gpa_(gpa) {}
262 
Run()263   void Run() override {
264     char* buf;
265     while ((buf = reinterpret_cast<char*>(gpa_->Allocate(1))) == nullptr) {
266       base::PlatformThread::Sleep(base::TimeDelta::FromNanoseconds(5000));
267     }
268 
269     // Verify that no other thread has access to this page.
270     EXPECT_EQ(buf[0], 0);
271 
272     // Mark this page and allow some time for another thread to potentially
273     // gain access to this page.
274     buf[0] = 'A';
275     base::PlatformThread::Sleep(base::TimeDelta::FromNanoseconds(10000));
276     EXPECT_EQ(buf[0], 'A');
277 
278     // Unmark this page and deallocate.
279     buf[0] = 0;
280     gpa_->Deallocate(buf);
281   }
282 
283  private:
284   GuardedPageAllocator* gpa_;
285 
286   DISALLOW_COPY_AND_ASSIGN(ThreadedHighContentionDelegate);
287 };
288 
289 // Test that allocator remains in consistent state under high contention and
290 // doesn't double-allocate pages or fail to deallocate pages.
TEST_P(GuardedPageAllocatorTest,ThreadedHighContention)291 TEST_P(GuardedPageAllocatorTest, ThreadedHighContention) {
292 #if defined(OS_ANDROID)
293   constexpr size_t num_threads = 200;
294 #else
295   constexpr size_t num_threads = 1000;
296 #endif
297   {
298     base::DelegateSimpleThreadPool threads("page_writers", num_threads);
299     threads.Start();
300 
301     std::vector<std::unique_ptr<ThreadedHighContentionDelegate>> delegates;
302     for (size_t i = 0; i < num_threads; i++) {
303       auto delegate = std::make_unique<ThreadedHighContentionDelegate>(&gpa_);
304       threads.AddWork(delegate.get());
305       delegates.push_back(std::move(delegate));
306     }
307 
308     threads.JoinAll();
309   }
310 
311   // Verify all pages have been deallocated now that all threads are done.
312   for (size_t i = 0; i < kMaxMetadata; i++)
313     EXPECT_NE(gpa_.Allocate(1), nullptr);
314 }
315 
316 class GuardedPageAllocatorPartitionAllocTest : public BaseGpaTest {
317  protected:
GuardedPageAllocatorPartitionAllocTest()318   GuardedPageAllocatorPartitionAllocTest() : BaseGpaTest(kMaxMetadata, true) {}
319 };
320 
TEST_F(GuardedPageAllocatorPartitionAllocTest,DifferentPartitionsNeverOverlap)321 TEST_F(GuardedPageAllocatorPartitionAllocTest,
322        DifferentPartitionsNeverOverlap) {
323   constexpr const char* kType1 = "fake type1";
324   constexpr const char* kType2 = "fake type2";
325 
326   std::set<void*> type1, type2;
327   for (size_t i = 0; i < kMaxSlots * 3; i++) {
328     void* alloc1 = gpa_.Allocate(1, 0, kType1);
329     ASSERT_NE(alloc1, nullptr);
330     void* alloc2 = gpa_.Allocate(1, 0, kType2);
331     ASSERT_NE(alloc2, nullptr);
332 
333     type1.insert(alloc1);
334     type2.insert(alloc2);
335 
336     gpa_.Deallocate(alloc1);
337     gpa_.Deallocate(alloc2);
338   }
339 
340   std::vector<void*> intersection;
341   std::set_intersection(type1.begin(), type1.end(), type2.begin(), type2.end(),
342                         std::back_inserter(intersection));
343 
344   EXPECT_EQ(intersection.size(), 0u);
345 }
346 
347 }  // namespace internal
348 }  // namespace gwp_asan
349