1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/partition_allocator/partition_alloc.h"
6 
7 #include <stdlib.h>
8 #include <string.h>
9 
10 #include <algorithm>
11 #include <cstddef>
12 #include <limits>
13 #include <memory>
14 #include <vector>
15 
16 #include "base/allocator/partition_allocator/address_space_randomization.h"
17 #include "base/allocator/partition_allocator/checked_ptr_support.h"
18 #include "base/allocator/partition_allocator/page_allocator_constants.h"
19 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
20 #include "base/allocator/partition_allocator/partition_alloc_features.h"
21 #include "base/allocator/partition_allocator/partition_page.h"
22 #include "base/allocator/partition_allocator/partition_ref_count.h"
23 #include "base/allocator/partition_allocator/partition_tag.h"
24 #include "base/allocator/partition_allocator/partition_tag_bitmap.h"
25 #include "base/logging.h"
26 #include "base/rand_util.h"
27 #include "base/stl_util.h"
28 #include "base/system/sys_info.h"
29 #include "base/test/scoped_feature_list.h"
30 #include "build/build_config.h"
31 #include "testing/gtest/include/gtest/gtest.h"
32 
33 #if defined(OS_POSIX)
34 #include <sys/mman.h>
35 #include <sys/resource.h>
36 #include <sys/time.h>
37 #endif  // defined(OS_POSIX)
38 
39 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
40 
41 namespace {
42 
IsLargeMemoryDevice()43 bool IsLargeMemoryDevice() {
44   // Treat any device with 2GiB or more of physical memory as a "large memory
45   // device". We check for slightly less than 2GiB so that devices with a small
46   // amount of memory not accessible to the OS still count as "large".
47   return base::SysInfo::AmountOfPhysicalMemory() >= 2040LL * 1024 * 1024;
48 }
49 
SetAddressSpaceLimit()50 bool SetAddressSpaceLimit() {
51 #if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX)
52   // 32 bits => address space is limited already.
53   return true;
54 #elif defined(OS_POSIX) && !defined(OS_APPLE)
55   // macOS will accept, but not enforce, |RLIMIT_AS| changes. See
56   // https://crbug.com/435269 and rdar://17576114.
57   //
58   // Note: This number must be not less than 6 GB, because with
59   // sanitizer_coverage_flags=edge, it reserves > 5 GB of address space. See
60   // https://crbug.com/674665.
61   const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024;
62   struct rlimit limit;
63   if (getrlimit(RLIMIT_AS, &limit) != 0)
64     return false;
65   if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
66     limit.rlim_cur = kAddressSpaceLimit;
67     if (setrlimit(RLIMIT_AS, &limit) != 0)
68       return false;
69   }
70   return true;
71 #else
72   return false;
73 #endif
74 }
75 
ClearAddressSpaceLimit()76 bool ClearAddressSpaceLimit() {
77 #if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX)
78   return true;
79 #elif defined(OS_POSIX)
80   struct rlimit limit;
81   if (getrlimit(RLIMIT_AS, &limit) != 0)
82     return false;
83   limit.rlim_cur = limit.rlim_max;
84   if (setrlimit(RLIMIT_AS, &limit) != 0)
85     return false;
86   return true;
87 #else
88   return false;
89 #endif
90 }
91 
92 const size_t kTestSizes[] = {
93     1,
94     17,
95     100,
96     base::SystemPageSize(),
97     base::SystemPageSize() + 1,
98     base::PartitionRoot<
99         base::internal::ThreadSafe>::Bucket::get_direct_map_size(100),
100     1 << 20,
101     1 << 21,
102 };
103 constexpr size_t kTestSizesCount = base::size(kTestSizes);
104 
AllocateRandomly(base::PartitionRoot<base::internal::ThreadSafe> * root,size_t count,int flags)105 void AllocateRandomly(base::PartitionRoot<base::internal::ThreadSafe>* root,
106                       size_t count,
107                       int flags) {
108   std::vector<void*> allocations(count, nullptr);
109   for (size_t i = 0; i < count; ++i) {
110     const size_t size = kTestSizes[base::RandGenerator(kTestSizesCount)];
111     allocations[i] = root->AllocFlags(flags, size, nullptr);
112     EXPECT_NE(nullptr, allocations[i]) << " size: " << size << " i: " << i;
113   }
114 
115   for (size_t i = 0; i < count; ++i) {
116     if (allocations[i])
117       root->Free(allocations[i]);
118   }
119 }
120 
HandleOOM(size_t unused_size)121 void HandleOOM(size_t unused_size) {
122   LOG(FATAL) << "Out of memory";
123 }
124 
125 }  // namespace
126 
127 namespace base {
128 
129 // NOTE: Though this test actually excercises interfaces inside the ::base
130 // namespace, the unittest is inside the ::base::internal spaces because a
131 // portion of the test expectations require inspecting objects and behavior
132 // in the ::base::internal namespace. An alternate formulation would be to
133 // explicitly add using statements for each inspected type but this felt more
134 // readable.
135 namespace internal {
136 
137 using SlotSpan = SlotSpanMetadata<ThreadSafe>;
138 
139 const size_t kTestAllocSize = 16;
140 #if !DCHECK_IS_ON()
141 const size_t kPointerOffset = kInSlotTagBufferSize + kInSlotRefCountBufferSize;
142 const size_t kExtraAllocSize = kInSlotTagBufferSize + kInSlotRefCountBufferSize;
143 #else
144 const size_t kPointerOffset =
145     kCookieSize + kInSlotTagBufferSize + kInSlotRefCountBufferSize;
146 const size_t kExtraAllocSize =
147     kCookieSize * 2 + kInSlotTagBufferSize + kInSlotRefCountBufferSize;
148 #endif
149 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
150 
151 const char* type_name = nullptr;
152 
153 class ScopedPageAllocation {
154  public:
ScopedPageAllocation(PartitionAllocator<base::internal::ThreadSafe> & allocator,base::CheckedNumeric<size_t> npages)155   ScopedPageAllocation(
156       PartitionAllocator<base::internal::ThreadSafe>& allocator,
157       base::CheckedNumeric<size_t> npages)
158       : allocator_(allocator),
159         npages_(npages),
160         ptr_(reinterpret_cast<char*>(allocator_.root()->Alloc(
161             (npages * SystemPageSize() - kExtraAllocSize).ValueOrDie(),
162             type_name))) {}
163 
~ScopedPageAllocation()164   ~ScopedPageAllocation() { allocator_.root()->Free(ptr_); }
165 
TouchAllPages()166   void TouchAllPages() {
167     memset(ptr_, 'A',
168            ((npages_ * SystemPageSize()) - kExtraAllocSize).ValueOrDie());
169   }
170 
PageAtIndex(size_t index)171   void* PageAtIndex(size_t index) {
172     return ptr_ - kPointerOffset + (SystemPageSize() * index);
173   }
174 
175  private:
176   PartitionAllocator<base::internal::ThreadSafe>& allocator_;
177   const base::CheckedNumeric<size_t> npages_;
178   char* ptr_;
179 };
180 
181 class PartitionAllocTest : public testing::Test {
182  protected:
183   PartitionAllocTest() = default;
184 
185   ~PartitionAllocTest() override = default;
186 
SetUp()187   void SetUp() override {
188     scoped_feature_list.InitWithFeatures({features::kPartitionAllocGigaCage},
189                                          {});
190     PartitionAllocGlobalInit(HandleOOM);
191     allocator.init({PartitionOptions::Alignment::kRegular});
192     aligned_allocator.init({PartitionOptions::Alignment::kAlignedAlloc});
193     test_bucket_index_ = SizeToIndex(kRealAllocSize);
194   }
195 
SizeToIndex(size_t size)196   size_t SizeToIndex(size_t size) {
197     return PartitionRoot<base::internal::ThreadSafe>::SizeToBucketIndex(size);
198   }
199 
TearDown()200   void TearDown() override {
201     allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans |
202                                   PartitionPurgeDiscardUnusedSystemPages);
203     PartitionAllocGlobalUninitForTesting();
204   }
205 
GetNumPagesPerSlotSpan(size_t size)206   size_t GetNumPagesPerSlotSpan(size_t size) {
207     size_t real_size = size + kExtraAllocSize;
208     size_t bucket_index = SizeToIndex(real_size);
209     PartitionRoot<ThreadSafe>::Bucket* bucket =
210         &allocator.root()->buckets[bucket_index];
211     // TODO(tasak): make get_pages_per_slot_span() available at
212     // partition_alloc_unittest.cc. Is it allowable to make the code from
213     // partition_bucet.cc to partition_bucket.h?
214     return (bucket->num_system_pages_per_slot_span +
215             (NumSystemPagesPerPartitionPage() - 1)) /
216            NumSystemPagesPerPartitionPage();
217   }
218 
GetFullSlotSpan(size_t size)219   SlotSpan* GetFullSlotSpan(size_t size) {
220     size_t real_size = size + kExtraAllocSize;
221     size_t bucket_index = SizeToIndex(real_size);
222     PartitionRoot<ThreadSafe>::Bucket* bucket =
223         &allocator.root()->buckets[bucket_index];
224     size_t num_slots =
225         (bucket->num_system_pages_per_slot_span * SystemPageSize()) /
226         bucket->slot_size;
227     void* first = nullptr;
228     void* last = nullptr;
229     size_t i;
230     for (i = 0; i < num_slots; ++i) {
231       void* ptr = allocator.root()->Alloc(size, type_name);
232       EXPECT_TRUE(ptr);
233       if (!i)
234         first = PartitionPointerAdjustSubtract(true, ptr);
235       else if (i == num_slots - 1)
236         last = PartitionPointerAdjustSubtract(true, ptr);
237     }
238     EXPECT_EQ(SlotSpan::FromPointer(first), SlotSpan::FromPointer(last));
239     if (bucket->num_system_pages_per_slot_span ==
240         NumSystemPagesPerPartitionPage())
241       EXPECT_EQ(reinterpret_cast<size_t>(first) & PartitionPageBaseMask(),
242                 reinterpret_cast<size_t>(last) & PartitionPageBaseMask());
243     EXPECT_EQ(num_slots,
244               static_cast<size_t>(
245                   bucket->active_slot_spans_head->num_allocated_slots));
246     EXPECT_EQ(nullptr, bucket->active_slot_spans_head->freelist_head);
247     EXPECT_TRUE(bucket->active_slot_spans_head);
248     EXPECT_TRUE(bucket->active_slot_spans_head !=
249                 SlotSpan::get_sentinel_slot_span());
250     return bucket->active_slot_spans_head;
251   }
252 
CycleFreeCache(size_t size)253   void CycleFreeCache(size_t size) {
254     for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
255       void* ptr = allocator.root()->Alloc(size, type_name);
256       auto* slot_span =
257           SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
258       auto* bucket = slot_span->bucket;
259       EXPECT_EQ(1, bucket->active_slot_spans_head->num_allocated_slots);
260       allocator.root()->Free(ptr);
261       EXPECT_EQ(0, bucket->active_slot_spans_head->num_allocated_slots);
262       EXPECT_NE(-1, bucket->active_slot_spans_head->empty_cache_index);
263     }
264   }
265 
266   enum ReturnNullTestMode {
267     kPartitionAllocFlags,
268     kPartitionReallocFlags,
269     kPartitionRootTryRealloc,
270   };
271 
DoReturnNullTest(size_t alloc_size,ReturnNullTestMode mode)272   void DoReturnNullTest(size_t alloc_size, ReturnNullTestMode mode) {
273     // TODO(crbug.com/678782): Where necessary and possible, disable the
274     // platform's OOM-killing behavior. OOM-killing makes this test flaky on
275     // low-memory devices.
276     if (!IsLargeMemoryDevice()) {
277       LOG(WARNING)
278           << "Skipping test on this device because of crbug.com/678782";
279       LOG(FATAL) << "DoReturnNullTest";
280     }
281 
282     ASSERT_TRUE(SetAddressSpaceLimit());
283 
284     // Work out the number of allocations for 6 GB of memory.
285     const int num_allocations = (6 * 1024 * 1024) / (alloc_size / 1024);
286 
287     void** ptrs = reinterpret_cast<void**>(
288         allocator.root()->Alloc(num_allocations * sizeof(void*), type_name));
289     int i;
290 
291     for (i = 0; i < num_allocations; ++i) {
292       switch (mode) {
293         case kPartitionAllocFlags: {
294           ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
295                                                  alloc_size, type_name);
296           break;
297         }
298         case kPartitionReallocFlags: {
299           ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1,
300                                                  type_name);
301           ptrs[i] = allocator.root()->ReallocFlags(
302               PartitionAllocReturnNull, ptrs[i], alloc_size, type_name);
303           break;
304         }
305         case kPartitionRootTryRealloc: {
306           ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1,
307                                                  type_name);
308           ptrs[i] =
309               allocator.root()->TryRealloc(ptrs[i], alloc_size, type_name);
310         }
311       }
312 
313       if (!i)
314         EXPECT_TRUE(ptrs[0]);
315       if (!ptrs[i]) {
316         ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
317                                                alloc_size, type_name);
318         EXPECT_FALSE(ptrs[i]);
319         break;
320       }
321     }
322 
323     // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
324     // we're not actually testing anything here.
325     EXPECT_LT(i, num_allocations);
326 
327     // Free, reallocate and free again each block we allocated. We do this to
328     // check that freeing memory also works correctly after a failed allocation.
329     for (--i; i >= 0; --i) {
330       allocator.root()->Free(ptrs[i]);
331       ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
332                                              alloc_size, type_name);
333       EXPECT_TRUE(ptrs[i]);
334       allocator.root()->Free(ptrs[i]);
335     }
336 
337     allocator.root()->Free(ptrs);
338 
339     EXPECT_TRUE(ClearAddressSpaceLimit());
340     LOG(FATAL) << "DoReturnNullTest";
341   }
342 
343   base::test::ScopedFeatureList scoped_feature_list;
344   PartitionAllocator<base::internal::ThreadSafe> allocator;
345   PartitionAllocator<base::internal::ThreadSafe> aligned_allocator;
346   size_t test_bucket_index_;
347 };
348 
349 class PartitionAllocDeathTest : public PartitionAllocTest {};
350 
351 namespace {
352 
FreeFullSlotSpan(PartitionRoot<base::internal::ThreadSafe> * root,SlotSpan * slot_span)353 void FreeFullSlotSpan(PartitionRoot<base::internal::ThreadSafe>* root,
354                       SlotSpan* slot_span) {
355   size_t size = slot_span->bucket->slot_size;
356   size_t num_slots =
357       (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
358       size;
359   EXPECT_EQ(num_slots,
360             static_cast<size_t>(std::abs(slot_span->num_allocated_slots)));
361   char* ptr = reinterpret_cast<char*>(SlotSpan::ToPointer(slot_span));
362   size_t i;
363   for (i = 0; i < num_slots; ++i) {
364     root->Free(ptr + kPointerOffset);
365     ptr += size;
366   }
367 }
368 
369 #if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_BSD)
CheckPageInCore(void * ptr,bool in_core)370 bool CheckPageInCore(void* ptr, bool in_core) {
371 #if defined(OS_BSD)
372   char ret = 0;
373 #else
374   unsigned char ret = 0;
375 #endif
376   EXPECT_EQ(0, mincore(ptr, SystemPageSize(), &ret));
377   return in_core == (ret & 1);
378 }
379 
380 #define CHECK_PAGE_IN_CORE(ptr, in_core) \
381   EXPECT_TRUE(CheckPageInCore(ptr, in_core))
382 #else
383 #define CHECK_PAGE_IN_CORE(ptr, in_core) (void)(0)
384 #endif  // defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_BSD)
385 
386 class MockPartitionStatsDumper : public PartitionStatsDumper {
387  public:
MockPartitionStatsDumper()388   MockPartitionStatsDumper()
389       : total_resident_bytes(0),
390         total_active_bytes(0),
391         total_decommittable_bytes(0),
392         total_discardable_bytes(0) {}
393 
PartitionDumpTotals(const char * partition_name,const PartitionMemoryStats * stats)394   void PartitionDumpTotals(const char* partition_name,
395                            const PartitionMemoryStats* stats) override {
396     EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes);
397     EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes);
398     EXPECT_EQ(total_active_bytes, stats->total_active_bytes);
399     EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes);
400     EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes);
401   }
402 
PartitionsDumpBucketStats(const char * partition_name,const PartitionBucketMemoryStats * stats)403   void PartitionsDumpBucketStats(
404       const char* partition_name,
405       const PartitionBucketMemoryStats* stats) override {
406     (void)partition_name;
407     EXPECT_TRUE(stats->is_valid);
408     EXPECT_EQ(0u, stats->bucket_slot_size & sizeof(void*));
409     bucket_stats.push_back(*stats);
410     total_resident_bytes += stats->resident_bytes;
411     total_active_bytes += stats->active_bytes;
412     total_decommittable_bytes += stats->decommittable_bytes;
413     total_discardable_bytes += stats->discardable_bytes;
414   }
415 
IsMemoryAllocationRecorded()416   bool IsMemoryAllocationRecorded() {
417     return total_resident_bytes != 0 && total_active_bytes != 0;
418   }
419 
GetBucketStats(size_t bucket_size)420   const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) {
421     for (size_t i = 0; i < bucket_stats.size(); ++i) {
422       if (bucket_stats[i].bucket_slot_size == bucket_size)
423         return &bucket_stats[i];
424     }
425     return nullptr;
426   }
427 
428  private:
429   size_t total_resident_bytes;
430   size_t total_active_bytes;
431   size_t total_decommittable_bytes;
432   size_t total_discardable_bytes;
433 
434   std::vector<PartitionBucketMemoryStats> bucket_stats;
435 };
436 
437 }  // namespace
438 
439 // Check that the most basic of allocate / free pairs work.
TEST_F(PartitionAllocTest,Basic)440 TEST_F(PartitionAllocTest, Basic) {
441   PartitionRoot<ThreadSafe>::Bucket* bucket =
442       &allocator.root()->buckets[test_bucket_index_];
443   auto* seed_slot_span = SlotSpan::get_sentinel_slot_span();
444 
445   EXPECT_FALSE(bucket->empty_slot_spans_head);
446   EXPECT_FALSE(bucket->decommitted_slot_spans_head);
447   EXPECT_EQ(seed_slot_span, bucket->active_slot_spans_head);
448   EXPECT_EQ(nullptr, bucket->active_slot_spans_head->next_slot_span);
449 
450   void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
451   EXPECT_TRUE(ptr);
452   EXPECT_EQ(kPointerOffset,
453             reinterpret_cast<size_t>(ptr) & PartitionPageOffsetMask());
454   // Check that the offset appears to include a guard page.
455   EXPECT_EQ(PartitionPageSize() + kPointerOffset + ReservedTagBitmapSize(),
456             reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask);
457 
458   allocator.root()->Free(ptr);
459   // Expect that the last active slot span gets noticed as empty but doesn't get
460   // decommitted.
461   EXPECT_TRUE(bucket->empty_slot_spans_head);
462   EXPECT_FALSE(bucket->decommitted_slot_spans_head);
463 }
464 
465 // Test multiple allocations, and freelist handling.
TEST_F(PartitionAllocTest,MultiAlloc)466 TEST_F(PartitionAllocTest, MultiAlloc) {
467   char* ptr1 = reinterpret_cast<char*>(
468       allocator.root()->Alloc(kTestAllocSize, type_name));
469   char* ptr2 = reinterpret_cast<char*>(
470       allocator.root()->Alloc(kTestAllocSize, type_name));
471   EXPECT_TRUE(ptr1);
472   EXPECT_TRUE(ptr2);
473   ptrdiff_t diff = ptr2 - ptr1;
474   EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
475 
476   // Check that we re-use the just-freed slot.
477   allocator.root()->Free(ptr2);
478   ptr2 = reinterpret_cast<char*>(
479       allocator.root()->Alloc(kTestAllocSize, type_name));
480   EXPECT_TRUE(ptr2);
481   diff = ptr2 - ptr1;
482   EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
483   allocator.root()->Free(ptr1);
484   ptr1 = reinterpret_cast<char*>(
485       allocator.root()->Alloc(kTestAllocSize, type_name));
486   EXPECT_TRUE(ptr1);
487   diff = ptr2 - ptr1;
488   EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
489 
490   char* ptr3 = reinterpret_cast<char*>(
491       allocator.root()->Alloc(kTestAllocSize, type_name));
492   EXPECT_TRUE(ptr3);
493   diff = ptr3 - ptr1;
494   EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
495 
496   allocator.root()->Free(ptr1);
497   allocator.root()->Free(ptr2);
498   allocator.root()->Free(ptr3);
499 }
500 
501 // Test a bucket with multiple slot spans.
TEST_F(PartitionAllocTest,MultiSlotSpans)502 TEST_F(PartitionAllocTest, MultiSlotSpans) {
503   PartitionRoot<ThreadSafe>::Bucket* bucket =
504       &allocator.root()->buckets[test_bucket_index_];
505 
506   auto* slot_span = GetFullSlotSpan(kTestAllocSize);
507   FreeFullSlotSpan(allocator.root(), slot_span);
508   EXPECT_TRUE(bucket->empty_slot_spans_head);
509   EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
510   EXPECT_EQ(nullptr, slot_span->next_slot_span);
511   EXPECT_EQ(0, slot_span->num_allocated_slots);
512 
513   slot_span = GetFullSlotSpan(kTestAllocSize);
514   auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
515 
516   EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
517   EXPECT_EQ(nullptr, slot_span2->next_slot_span);
518   EXPECT_EQ(reinterpret_cast<uintptr_t>(SlotSpan::ToPointer(slot_span)) &
519                 kSuperPageBaseMask,
520             reinterpret_cast<uintptr_t>(SlotSpan::ToPointer(slot_span2)) &
521                 kSuperPageBaseMask);
522 
523   // Fully free the non-current slot span. This will leave us with no current
524   // active slot span because one is empty and the other is full.
525   FreeFullSlotSpan(allocator.root(), slot_span);
526   EXPECT_EQ(0, slot_span->num_allocated_slots);
527   EXPECT_TRUE(bucket->empty_slot_spans_head);
528   EXPECT_EQ(SlotSpanMetadata<ThreadSafe>::get_sentinel_slot_span(),
529             bucket->active_slot_spans_head);
530 
531   // Allocate a new slot span, it should pull from the freelist.
532   slot_span = GetFullSlotSpan(kTestAllocSize);
533   EXPECT_FALSE(bucket->empty_slot_spans_head);
534   EXPECT_EQ(slot_span, bucket->active_slot_spans_head);
535 
536   FreeFullSlotSpan(allocator.root(), slot_span);
537   FreeFullSlotSpan(allocator.root(), slot_span2);
538   EXPECT_EQ(0, slot_span->num_allocated_slots);
539   EXPECT_EQ(0, slot_span2->num_allocated_slots);
540   EXPECT_EQ(0, slot_span2->num_unprovisioned_slots);
541   EXPECT_NE(-1, slot_span2->empty_cache_index);
542 }
543 
544 // Test some finer aspects of internal slot span transitions.
TEST_F(PartitionAllocTest,SlotSpanTransitions)545 TEST_F(PartitionAllocTest, SlotSpanTransitions) {
546   PartitionRoot<ThreadSafe>::Bucket* bucket =
547       &allocator.root()->buckets[test_bucket_index_];
548 
549   auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
550   EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
551   EXPECT_EQ(nullptr, slot_span1->next_slot_span);
552   auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
553   EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
554   EXPECT_EQ(nullptr, slot_span2->next_slot_span);
555 
556   // Bounce slot_span1 back into the non-full list then fill it up again.
557   char* ptr =
558       reinterpret_cast<char*>(SlotSpan::ToPointer(slot_span1)) + kPointerOffset;
559   allocator.root()->Free(ptr);
560   EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
561   (void)allocator.root()->Alloc(kTestAllocSize, type_name);
562   EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
563   EXPECT_EQ(slot_span2, bucket->active_slot_spans_head->next_slot_span);
564 
565   // Allocating another slot span at this point should cause us to scan over
566   // slot_span1 (which is both full and NOT our current slot span), and evict it
567   // from the freelist. Older code had a O(n^2) condition due to failure to do
568   // this.
569   auto* slot_span3 = GetFullSlotSpan(kTestAllocSize);
570   EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
571   EXPECT_EQ(nullptr, slot_span3->next_slot_span);
572 
573   // Work out a pointer into slot_span2 and free it.
574   ptr =
575       reinterpret_cast<char*>(SlotSpan::ToPointer(slot_span2)) + kPointerOffset;
576   allocator.root()->Free(ptr);
577   // Trying to allocate at this time should cause us to cycle around to
578   // slot_span2 and find the recently freed slot.
579   char* new_ptr = reinterpret_cast<char*>(
580       allocator.root()->Alloc(kTestAllocSize, type_name));
581   EXPECT_EQ(ptr, new_ptr);
582   EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
583   EXPECT_EQ(slot_span3, slot_span2->next_slot_span);
584 
585   // Work out a pointer into slot_span1 and free it. This should pull the slot
586   // span back into the list of available slot spans.
587   ptr =
588       reinterpret_cast<char*>(SlotSpan::ToPointer(slot_span1)) + kPointerOffset;
589   allocator.root()->Free(ptr);
590   // This allocation should be satisfied by slot_span1.
591   new_ptr = reinterpret_cast<char*>(
592       allocator.root()->Alloc(kTestAllocSize, type_name));
593   EXPECT_EQ(ptr, new_ptr);
594   EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
595   EXPECT_EQ(slot_span2, slot_span1->next_slot_span);
596 
597   FreeFullSlotSpan(allocator.root(), slot_span3);
598   FreeFullSlotSpan(allocator.root(), slot_span2);
599   FreeFullSlotSpan(allocator.root(), slot_span1);
600 
601   // Allocating whilst in this state exposed a bug, so keep the test.
602   ptr = reinterpret_cast<char*>(
603       allocator.root()->Alloc(kTestAllocSize, type_name));
604   allocator.root()->Free(ptr);
605 }
606 
607 // Test some corner cases relating to slot span transitions in the internal
608 // free slot span list metadata bucket.
TEST_F(PartitionAllocTest,FreeSlotSpanListSlotSpanTransitions)609 TEST_F(PartitionAllocTest, FreeSlotSpanListSlotSpanTransitions) {
610   PartitionRoot<ThreadSafe>::Bucket* bucket =
611       &allocator.root()->buckets[test_bucket_index_];
612 
613   size_t num_to_fill_free_list_slot_span =
614       PartitionPageSize() / (sizeof(SlotSpan) + kExtraAllocSize);
615   // The +1 is because we need to account for the fact that the current slot
616   // span never gets thrown on the freelist.
617   ++num_to_fill_free_list_slot_span;
618   auto slot_spans =
619       std::make_unique<SlotSpan*[]>(num_to_fill_free_list_slot_span);
620 
621   size_t i;
622   for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
623     slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
624   }
625   EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
626             bucket->active_slot_spans_head);
627   for (i = 0; i < num_to_fill_free_list_slot_span; ++i)
628     FreeFullSlotSpan(allocator.root(), slot_spans[i]);
629   EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
630   EXPECT_TRUE(bucket->empty_slot_spans_head);
631 
632   // Allocate / free in a different bucket size so we get control of a
633   // different free slot span list. We need two slot spans because one will be
634   // the last active slot span and not get freed.
635   auto* slot_span1 = GetFullSlotSpan(kTestAllocSize * 2);
636   auto* slot_span2 = GetFullSlotSpan(kTestAllocSize * 2);
637   FreeFullSlotSpan(allocator.root(), slot_span1);
638   FreeFullSlotSpan(allocator.root(), slot_span2);
639 
640   for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
641     slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
642   }
643   EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
644             bucket->active_slot_spans_head);
645 
646   for (i = 0; i < num_to_fill_free_list_slot_span; ++i)
647     FreeFullSlotSpan(allocator.root(), slot_spans[i]);
648   EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
649   EXPECT_TRUE(bucket->empty_slot_spans_head);
650 }
651 
652 // Test a large series of allocations that cross more than one underlying
653 // super page.
TEST_F(PartitionAllocTest,MultiPageAllocs)654 TEST_F(PartitionAllocTest, MultiPageAllocs) {
655   size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
656   // 1 super page has 2 guard partition pages.
657   size_t num_slot_spans_needed =
658       (NumPartitionPagesPerSuperPage() - NumPartitionPagesPerTagBitmap() - 2) /
659       num_pages_per_slot_span;
660 
661   // We need one more slot span in order to cross super page boundary.
662   ++num_slot_spans_needed;
663 
664   EXPECT_GT(num_slot_spans_needed, 1u);
665   auto slot_spans = std::make_unique<SlotSpan*[]>(num_slot_spans_needed);
666   uintptr_t first_super_page_base = 0;
667   size_t i;
668   for (i = 0; i < num_slot_spans_needed; ++i) {
669     slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
670     void* storage_ptr = SlotSpan::ToPointer(slot_spans[i]);
671     if (!i)
672       first_super_page_base =
673           reinterpret_cast<uintptr_t>(storage_ptr) & kSuperPageBaseMask;
674     if (i == num_slot_spans_needed - 1) {
675       uintptr_t second_super_page_base =
676           reinterpret_cast<uintptr_t>(storage_ptr) & kSuperPageBaseMask;
677       uintptr_t second_super_page_offset =
678           reinterpret_cast<uintptr_t>(storage_ptr) & kSuperPageOffsetMask;
679       EXPECT_FALSE(second_super_page_base == first_super_page_base);
680       // Check that we allocated a guard page for the second page.
681       EXPECT_EQ(PartitionPageSize() + ReservedTagBitmapSize(),
682                 second_super_page_offset);
683     }
684   }
685   for (i = 0; i < num_slot_spans_needed; ++i)
686     FreeFullSlotSpan(allocator.root(), slot_spans[i]);
687 }
688 
689 // Test the generic allocation functions that can handle arbitrary sizes and
690 // reallocing etc.
TEST_F(PartitionAllocTest,Alloc)691 TEST_F(PartitionAllocTest, Alloc) {
692   void* ptr = allocator.root()->Alloc(1, type_name);
693   EXPECT_TRUE(ptr);
694   allocator.root()->Free(ptr);
695   ptr = allocator.root()->Alloc(kMaxBucketed + 1, type_name);
696   EXPECT_TRUE(ptr);
697   allocator.root()->Free(ptr);
698 
699   ptr = allocator.root()->Alloc(1, type_name);
700   EXPECT_TRUE(ptr);
701   void* orig_ptr = ptr;
702   char* char_ptr = static_cast<char*>(ptr);
703   *char_ptr = 'A';
704 
705   // Change the size of the realloc, remaining inside the same bucket.
706   void* new_ptr = allocator.root()->Realloc(ptr, 2, type_name);
707   EXPECT_EQ(ptr, new_ptr);
708   new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
709   EXPECT_EQ(ptr, new_ptr);
710   new_ptr = allocator.root()->Realloc(ptr, kSmallestBucket, type_name);
711   EXPECT_EQ(ptr, new_ptr);
712 
713   // Change the size of the realloc, switching buckets.
714   new_ptr = allocator.root()->Realloc(ptr, kSmallestBucket + 1, type_name);
715   EXPECT_NE(new_ptr, ptr);
716   // Check that the realloc copied correctly.
717   char* new_char_ptr = static_cast<char*>(new_ptr);
718   EXPECT_EQ(*new_char_ptr, 'A');
719 #if DCHECK_IS_ON()
720   // Subtle: this checks for an old bug where we copied too much from the
721   // source of the realloc. The condition can be detected by a trashing of
722   // the uninitialized value in the space of the upsized allocation.
723   EXPECT_EQ(kUninitializedByte,
724             static_cast<unsigned char>(*(new_char_ptr + kSmallestBucket)));
725 #endif
726   *new_char_ptr = 'B';
727   // The realloc moved. To check that the old allocation was freed, we can
728   // do an alloc of the old allocation size and check that the old allocation
729   // address is at the head of the freelist and reused.
730   void* reused_ptr = allocator.root()->Alloc(1, type_name);
731   EXPECT_EQ(reused_ptr, orig_ptr);
732   allocator.root()->Free(reused_ptr);
733 
734   // Downsize the realloc.
735   ptr = new_ptr;
736   new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
737   EXPECT_EQ(new_ptr, orig_ptr);
738   new_char_ptr = static_cast<char*>(new_ptr);
739   EXPECT_EQ(*new_char_ptr, 'B');
740   *new_char_ptr = 'C';
741 
742   // Upsize the realloc to outside the partition.
743   ptr = new_ptr;
744   new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed + 1, type_name);
745   EXPECT_NE(new_ptr, ptr);
746   new_char_ptr = static_cast<char*>(new_ptr);
747   EXPECT_EQ(*new_char_ptr, 'C');
748   *new_char_ptr = 'D';
749 
750   // Upsize and downsize the realloc, remaining outside the partition.
751   ptr = new_ptr;
752   new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 10, type_name);
753   new_char_ptr = static_cast<char*>(new_ptr);
754   EXPECT_EQ(*new_char_ptr, 'D');
755   *new_char_ptr = 'E';
756   ptr = new_ptr;
757   new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 2, type_name);
758   new_char_ptr = static_cast<char*>(new_ptr);
759   EXPECT_EQ(*new_char_ptr, 'E');
760   *new_char_ptr = 'F';
761 
762   // Downsize the realloc to inside the partition.
763   ptr = new_ptr;
764   new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
765   EXPECT_NE(new_ptr, ptr);
766   EXPECT_EQ(new_ptr, orig_ptr);
767   new_char_ptr = static_cast<char*>(new_ptr);
768   EXPECT_EQ(*new_char_ptr, 'F');
769 
770   allocator.root()->Free(new_ptr);
771 }
772 
773 // Test the generic allocation functions can handle some specific sizes of
774 // interest.
TEST_F(PartitionAllocTest,AllocSizes)775 TEST_F(PartitionAllocTest, AllocSizes) {
776   void* ptr = allocator.root()->Alloc(0, type_name);
777   EXPECT_TRUE(ptr);
778   allocator.root()->Free(ptr);
779 
780   // PartitionPageSize() is interesting because it results in just one
781   // allocation per page, which tripped up some corner cases.
782   size_t size = PartitionPageSize() - kExtraAllocSize;
783   ptr = allocator.root()->Alloc(size, type_name);
784   EXPECT_TRUE(ptr);
785   void* ptr2 = allocator.root()->Alloc(size, type_name);
786   EXPECT_TRUE(ptr2);
787   allocator.root()->Free(ptr);
788   // Should be freeable at this point.
789   auto* slot_span =
790       SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
791   EXPECT_NE(-1, slot_span->empty_cache_index);
792   allocator.root()->Free(ptr2);
793 
794   size = (((PartitionPageSize() * kMaxPartitionPagesPerSlotSpan) -
795            SystemPageSize()) /
796           2) -
797          kExtraAllocSize;
798   ptr = allocator.root()->Alloc(size, type_name);
799   EXPECT_TRUE(ptr);
800   memset(ptr, 'A', size);
801   ptr2 = allocator.root()->Alloc(size, type_name);
802   EXPECT_TRUE(ptr2);
803   void* ptr3 = allocator.root()->Alloc(size, type_name);
804   EXPECT_TRUE(ptr3);
805   void* ptr4 = allocator.root()->Alloc(size, type_name);
806   EXPECT_TRUE(ptr4);
807 
808   slot_span = SlotSpanMetadata<base::internal::ThreadSafe>::FromPointer(
809       PartitionPointerAdjustSubtract(true, ptr));
810   auto* slot_span2 =
811       SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr3));
812   EXPECT_NE(slot_span, slot_span2);
813 
814   allocator.root()->Free(ptr);
815   allocator.root()->Free(ptr3);
816   allocator.root()->Free(ptr2);
817   // Should be freeable at this point.
818   EXPECT_NE(-1, slot_span->empty_cache_index);
819   EXPECT_EQ(0, slot_span->num_allocated_slots);
820   EXPECT_EQ(0, slot_span->num_unprovisioned_slots);
821   void* new_ptr = allocator.root()->Alloc(size, type_name);
822   EXPECT_EQ(ptr3, new_ptr);
823   new_ptr = allocator.root()->Alloc(size, type_name);
824   EXPECT_EQ(ptr2, new_ptr);
825 
826   allocator.root()->Free(new_ptr);
827   allocator.root()->Free(ptr3);
828   allocator.root()->Free(ptr4);
829 
830 #if DCHECK_IS_ON()
831   // |SlotSpanMetadata::Free| must poison the slot's contents with |kFreedByte|.
832   EXPECT_EQ(kFreedByte,
833             *(reinterpret_cast<unsigned char*>(new_ptr) + (size - 1)));
834 #endif
835 
836   // Can we allocate a massive (512MB) size?
837   // Allocate 512MB, but +1, to test for cookie writing alignment issues.
838   // Test this only if the device has enough memory or it might fail due
839   // to OOM.
840   if (IsLargeMemoryDevice()) {
841     ptr = allocator.root()->Alloc(512 * 1024 * 1024 + 1, type_name);
842     allocator.root()->Free(ptr);
843   }
844 
845   // Check a more reasonable, but still direct mapped, size.
846   // Chop a system page and a byte off to test for rounding errors.
847   size = 20 * 1024 * 1024;
848   size -= SystemPageSize();
849   size -= 1;
850   ptr = allocator.root()->Alloc(size, type_name);
851   char* char_ptr = reinterpret_cast<char*>(ptr);
852   *(char_ptr + (size - 1)) = 'A';
853   allocator.root()->Free(ptr);
854 
855   // Can we free null?
856   allocator.root()->Free(nullptr);
857 
858   // Do we correctly get a null for a failed allocation?
859   EXPECT_EQ(nullptr,
860             allocator.root()->AllocFlags(PartitionAllocReturnNull,
861                                          3u * 1024 * 1024 * 1024, type_name));
862 }
863 
864 // Test that we can fetch the real allocated size after an allocation.
TEST_F(PartitionAllocTest,AllocGetSizeAndOffset)865 TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) {
866   void* ptr;
867   size_t requested_size, actual_size, predicted_size;
868 
869   // Allocate something small.
870   requested_size = 511 - kExtraAllocSize;
871   predicted_size = allocator.root()->ActualSize(requested_size);
872   ptr = allocator.root()->Alloc(requested_size, type_name);
873   EXPECT_TRUE(ptr);
874   actual_size = allocator.root()->GetSize(ptr);
875   EXPECT_EQ(predicted_size, actual_size);
876   EXPECT_LT(requested_size, actual_size);
877 #if defined(PA_HAS_64_BITS_POINTERS)
878   if (features::IsPartitionAllocGigaCageEnabled()) {
879     for (size_t offset = 0; offset < requested_size; ++offset) {
880       EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
881                 offset);
882     }
883   }
884 #endif
885   allocator.root()->Free(ptr);
886 
887   // Allocate a size that should be a perfect match for a bucket, because it
888   // is an exact power of 2.
889   requested_size = (256 * 1024) - kExtraAllocSize;
890   predicted_size = allocator.root()->ActualSize(requested_size);
891   ptr = allocator.root()->Alloc(requested_size, type_name);
892   EXPECT_TRUE(ptr);
893   actual_size = allocator.root()->GetSize(ptr);
894   EXPECT_EQ(predicted_size, actual_size);
895   EXPECT_EQ(requested_size, actual_size);
896 #if defined(PA_HAS_64_BITS_POINTERS)
897   if (features::IsPartitionAllocGigaCageEnabled()) {
898     for (size_t offset = 0; offset < requested_size; offset += 877) {
899       EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
900                 offset);
901     }
902   }
903 #endif
904   allocator.root()->Free(ptr);
905 
906   // Allocate a size that is a system page smaller than a bucket. GetSize()
907   // should return a larger size than we asked for now.
908   size_t num = 64;
909   while (num * SystemPageSize() >= 1024 * 1024) {
910     num /= 2;
911   }
912   requested_size = num * SystemPageSize() - SystemPageSize() - kExtraAllocSize;
913   predicted_size = allocator.root()->ActualSize(requested_size);
914   ptr = allocator.root()->Alloc(requested_size, type_name);
915   EXPECT_TRUE(ptr);
916   actual_size = allocator.root()->GetSize(ptr);
917   EXPECT_EQ(predicted_size, actual_size);
918   EXPECT_EQ(requested_size + SystemPageSize(), actual_size);
919 #if defined(PA_HAS_64_BITS_POINTERS)
920   if (features::IsPartitionAllocGigaCageEnabled()) {
921     for (size_t offset = 0; offset < requested_size; offset += 4999) {
922       EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
923                 offset);
924     }
925   }
926 #endif
927 
928   // Allocate the maximum allowed bucketed size.
929   requested_size = kMaxBucketed - kExtraAllocSize;
930   predicted_size = allocator.root()->ActualSize(requested_size);
931   ptr = allocator.root()->Alloc(requested_size, type_name);
932   EXPECT_TRUE(ptr);
933   actual_size = allocator.root()->GetSize(ptr);
934   EXPECT_EQ(predicted_size, actual_size);
935   EXPECT_EQ(requested_size, actual_size);
936 #if defined(PA_HAS_64_BITS_POINTERS)
937   if (features::IsPartitionAllocGigaCageEnabled()) {
938     for (size_t offset = 0; offset < requested_size; offset += 4999) {
939       EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
940                 offset);
941     }
942   }
943 #endif
944 
945   // Check that we can write at the end of the reported size too.
946   char* char_ptr = reinterpret_cast<char*>(ptr);
947   *(char_ptr + (actual_size - 1)) = 'A';
948   allocator.root()->Free(ptr);
949 
950   // Allocate something very large, and uneven.
951   if (IsLargeMemoryDevice()) {
952     requested_size = 512 * 1024 * 1024 - 1;
953     predicted_size = allocator.root()->ActualSize(requested_size);
954     ptr = allocator.root()->Alloc(requested_size, type_name);
955     EXPECT_TRUE(ptr);
956     actual_size = allocator.root()->GetSize(ptr);
957     EXPECT_EQ(predicted_size, actual_size);
958     EXPECT_LT(requested_size, actual_size);
959     // Unlike above, don't test for PartitionAllocGetSlotOffset. Such large
960     // allocations are direct-mapped, for which one can't easily obtain the
961     // offset.
962     allocator.root()->Free(ptr);
963   }
964 
965   // Too large allocation.
966   requested_size = MaxDirectMapped() + 1;
967   predicted_size = allocator.root()->ActualSize(requested_size);
968   EXPECT_EQ(requested_size, predicted_size);
969 }
970 
971 #if defined(PA_HAS_64_BITS_POINTERS)
TEST_F(PartitionAllocTest,GetOffsetMultiplePages)972 TEST_F(PartitionAllocTest, GetOffsetMultiplePages) {
973   if (!features::IsPartitionAllocGigaCageEnabled())
974     return;
975 
976   const size_t real_size = 80;
977   const size_t requested_size = real_size - kExtraAllocSize;
978   // Double check we don't end up with 0 or negative size.
979   EXPECT_GT(requested_size, 0u);
980   EXPECT_LE(requested_size, real_size);
981   PartitionBucket<ThreadSafe>* bucket =
982       allocator.root()->buckets + SizeToIndex(real_size);
983   // Make sure the test is testing multiple partition pages case.
984   EXPECT_GT(bucket->num_system_pages_per_slot_span,
985             PartitionPageSize() / SystemPageSize());
986   size_t num_slots =
987       (bucket->num_system_pages_per_slot_span * SystemPageSize()) / real_size;
988   std::vector<void*> ptrs;
989   for (size_t i = 0; i < num_slots; ++i) {
990     ptrs.push_back(allocator.root()->Alloc(requested_size, type_name));
991   }
992   for (size_t i = 0; i < num_slots; ++i) {
993     char* ptr = static_cast<char*>(ptrs[i]);
994     for (size_t offset = 0; offset < requested_size; offset += 13) {
995       EXPECT_EQ(allocator.root()->GetSize(ptr), requested_size);
996       EXPECT_EQ(PartitionAllocGetSlotOffset(ptr + offset), offset);
997     }
998     allocator.root()->Free(ptr);
999   }
1000 }
1001 #endif  // defined(PA_HAS_64_BITS_POINTERS)
1002 
1003 // Test the realloc() contract.
TEST_F(PartitionAllocTest,Realloc)1004 TEST_F(PartitionAllocTest, Realloc) {
1005   // realloc(0, size) should be equivalent to malloc().
1006   void* ptr = allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
1007   memset(ptr, 'A', kTestAllocSize);
1008   auto* slot_span =
1009       SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
1010   // realloc(ptr, 0) should be equivalent to free().
1011   void* ptr2 = allocator.root()->Realloc(ptr, 0, type_name);
1012   EXPECT_EQ(nullptr, ptr2);
1013   EXPECT_EQ(PartitionPointerAdjustSubtract(true, ptr),
1014             slot_span->freelist_head);
1015 
1016   // Test that growing an allocation with realloc() copies everything from the
1017   // old allocation.
1018   size_t size = SystemPageSize() - kExtraAllocSize;
1019   EXPECT_EQ(size, allocator.root()->ActualSize(size));
1020   ptr = allocator.root()->Alloc(size, type_name);
1021   memset(ptr, 'A', size);
1022   ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name);
1023   EXPECT_NE(ptr, ptr2);
1024   char* char_ptr2 = static_cast<char*>(ptr2);
1025   EXPECT_EQ('A', char_ptr2[0]);
1026   EXPECT_EQ('A', char_ptr2[size - 1]);
1027 #if DCHECK_IS_ON()
1028   EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
1029 #endif
1030 
1031   // Test that shrinking an allocation with realloc() also copies everything
1032   // from the old allocation.
1033   ptr = allocator.root()->Realloc(ptr2, size - 1, type_name);
1034   EXPECT_NE(ptr2, ptr);
1035   char* char_ptr = static_cast<char*>(ptr);
1036   EXPECT_EQ('A', char_ptr[0]);
1037   EXPECT_EQ('A', char_ptr[size - 2]);
1038 #if DCHECK_IS_ON()
1039   EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr[size - 1]));
1040 #endif
1041 
1042   allocator.root()->Free(ptr);
1043 
1044   // Test that shrinking a direct mapped allocation happens in-place.
1045   size = kMaxBucketed + 16 * SystemPageSize();
1046   ptr = allocator.root()->Alloc(size, type_name);
1047   size_t actual_size = allocator.root()->GetSize(ptr);
1048   ptr2 = allocator.root()->Realloc(ptr, kMaxBucketed + 8 * SystemPageSize(),
1049                                    type_name);
1050   EXPECT_EQ(ptr, ptr2);
1051   EXPECT_EQ(actual_size - 8 * SystemPageSize(),
1052             allocator.root()->GetSize(ptr2));
1053 
1054   // Test that a previously in-place shrunk direct mapped allocation can be
1055   // expanded up again within its original size.
1056   ptr = allocator.root()->Realloc(ptr2, size - SystemPageSize(), type_name);
1057   EXPECT_EQ(ptr2, ptr);
1058   EXPECT_EQ(actual_size - SystemPageSize(), allocator.root()->GetSize(ptr));
1059 
1060   // Test that a direct mapped allocation is performed not in-place when the
1061   // new size is small enough.
1062   ptr2 = allocator.root()->Realloc(ptr, SystemPageSize(), type_name);
1063   EXPECT_NE(ptr, ptr2);
1064 
1065   allocator.root()->Free(ptr2);
1066 }
1067 
1068 // Tests the handing out of freelists for partial slot spans.
TEST_F(PartitionAllocTest,PartialPageFreelists)1069 TEST_F(PartitionAllocTest, PartialPageFreelists) {
1070   size_t big_size = SystemPageSize() - kExtraAllocSize;
1071   size_t bucket_index = SizeToIndex(big_size + kExtraAllocSize);
1072   PartitionRoot<ThreadSafe>::Bucket* bucket =
1073       &allocator.root()->buckets[bucket_index];
1074   EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1075 
1076   void* ptr = allocator.root()->Alloc(big_size, type_name);
1077   EXPECT_TRUE(ptr);
1078 
1079   auto* slot_span =
1080       SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
1081   size_t total_slots =
1082       (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1083       (big_size + kExtraAllocSize);
1084   EXPECT_EQ(4u, total_slots);
1085   // The freelist should have one entry, because we were able to exactly fit
1086   // one object slot and one freelist pointer (the null that the head points
1087   // to) into a system page.
1088   EXPECT_FALSE(slot_span->freelist_head);
1089   EXPECT_EQ(1, slot_span->num_allocated_slots);
1090   EXPECT_EQ(3, slot_span->num_unprovisioned_slots);
1091 
1092   void* ptr2 = allocator.root()->Alloc(big_size, type_name);
1093   EXPECT_TRUE(ptr2);
1094   EXPECT_FALSE(slot_span->freelist_head);
1095   EXPECT_EQ(2, slot_span->num_allocated_slots);
1096   EXPECT_EQ(2, slot_span->num_unprovisioned_slots);
1097 
1098   void* ptr3 = allocator.root()->Alloc(big_size, type_name);
1099   EXPECT_TRUE(ptr3);
1100   EXPECT_FALSE(slot_span->freelist_head);
1101   EXPECT_EQ(3, slot_span->num_allocated_slots);
1102   EXPECT_EQ(1, slot_span->num_unprovisioned_slots);
1103 
1104   void* ptr4 = allocator.root()->Alloc(big_size, type_name);
1105   EXPECT_TRUE(ptr4);
1106   EXPECT_FALSE(slot_span->freelist_head);
1107   EXPECT_EQ(4, slot_span->num_allocated_slots);
1108   EXPECT_EQ(0, slot_span->num_unprovisioned_slots);
1109 
1110   void* ptr5 = allocator.root()->Alloc(big_size, type_name);
1111   EXPECT_TRUE(ptr5);
1112 
1113   auto* slot_span2 =
1114       SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr5));
1115   EXPECT_EQ(1, slot_span2->num_allocated_slots);
1116 
1117   // Churn things a little whilst there's a partial slot span freelist.
1118   allocator.root()->Free(ptr);
1119   ptr = allocator.root()->Alloc(big_size, type_name);
1120   void* ptr6 = allocator.root()->Alloc(big_size, type_name);
1121 
1122   allocator.root()->Free(ptr);
1123   allocator.root()->Free(ptr2);
1124   allocator.root()->Free(ptr3);
1125   allocator.root()->Free(ptr4);
1126   allocator.root()->Free(ptr5);
1127   allocator.root()->Free(ptr6);
1128   EXPECT_NE(-1, slot_span->empty_cache_index);
1129   EXPECT_NE(-1, slot_span2->empty_cache_index);
1130   EXPECT_TRUE(slot_span2->freelist_head);
1131   EXPECT_EQ(0, slot_span2->num_allocated_slots);
1132 
1133   // And test a couple of sizes that do not cross SystemPageSize() with a single
1134   // allocation.
1135   size_t medium_size = (SystemPageSize() / 2) - kExtraAllocSize;
1136   bucket_index = SizeToIndex(medium_size + kExtraAllocSize);
1137   bucket = &allocator.root()->buckets[bucket_index];
1138   EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1139 
1140   ptr = allocator.root()->Alloc(medium_size, type_name);
1141   EXPECT_TRUE(ptr);
1142   slot_span = SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
1143   EXPECT_EQ(1, slot_span->num_allocated_slots);
1144   total_slots =
1145       (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1146       (medium_size + kExtraAllocSize);
1147   size_t first_slot_span_slots =
1148       SystemPageSize() / (medium_size + kExtraAllocSize);
1149   EXPECT_EQ(2u, first_slot_span_slots);
1150   EXPECT_EQ(total_slots - first_slot_span_slots,
1151             slot_span->num_unprovisioned_slots);
1152 
1153   allocator.root()->Free(ptr);
1154 
1155   size_t small_size = (SystemPageSize() / 4) - kExtraAllocSize;
1156   bucket_index = SizeToIndex(small_size + kExtraAllocSize);
1157   bucket = &allocator.root()->buckets[bucket_index];
1158   EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1159 
1160   ptr = allocator.root()->Alloc(small_size, type_name);
1161   EXPECT_TRUE(ptr);
1162   slot_span = SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
1163   EXPECT_EQ(1, slot_span->num_allocated_slots);
1164   total_slots =
1165       (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1166       (small_size + kExtraAllocSize);
1167   first_slot_span_slots = SystemPageSize() / (small_size + kExtraAllocSize);
1168   EXPECT_EQ(total_slots - first_slot_span_slots,
1169             slot_span->num_unprovisioned_slots);
1170 
1171   allocator.root()->Free(ptr);
1172   EXPECT_TRUE(slot_span->freelist_head);
1173   EXPECT_EQ(0, slot_span->num_allocated_slots);
1174 
1175   size_t very_small_size = (kExtraAllocSize <= 32) ? (32 - kExtraAllocSize) : 0;
1176   bucket_index = SizeToIndex(very_small_size + kExtraAllocSize);
1177   bucket = &allocator.root()->buckets[bucket_index];
1178   EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1179 
1180   ptr = allocator.root()->Alloc(very_small_size, type_name);
1181   EXPECT_TRUE(ptr);
1182   slot_span = SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
1183   EXPECT_EQ(1, slot_span->num_allocated_slots);
1184   total_slots =
1185       (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1186       (very_small_size + kExtraAllocSize);
1187   first_slot_span_slots =
1188       (SystemPageSize() + very_small_size + kExtraAllocSize - 1) /
1189       (very_small_size + kExtraAllocSize);
1190   EXPECT_EQ(total_slots - first_slot_span_slots,
1191             slot_span->num_unprovisioned_slots);
1192 
1193   allocator.root()->Free(ptr);
1194   EXPECT_TRUE(slot_span->freelist_head);
1195   EXPECT_EQ(0, slot_span->num_allocated_slots);
1196 
1197   // And try an allocation size (against the generic allocator) that is
1198   // larger than a system page.
1199   size_t page_and_a_half_size =
1200       (SystemPageSize() + (SystemPageSize() / 2)) - kExtraAllocSize;
1201   ptr = allocator.root()->Alloc(page_and_a_half_size, type_name);
1202   EXPECT_TRUE(ptr);
1203   slot_span = SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
1204   EXPECT_EQ(1, slot_span->num_allocated_slots);
1205   EXPECT_TRUE(slot_span->freelist_head);
1206   total_slots =
1207       (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1208       (page_and_a_half_size + kExtraAllocSize);
1209   EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
1210   allocator.root()->Free(ptr);
1211 
1212   // And then make sure than exactly the page size only faults one page.
1213   size_t page_size = SystemPageSize() - kExtraAllocSize;
1214   ptr = allocator.root()->Alloc(page_size, type_name);
1215   EXPECT_TRUE(ptr);
1216   slot_span = SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
1217   EXPECT_EQ(1, slot_span->num_allocated_slots);
1218   EXPECT_TRUE(slot_span->freelist_head);
1219   total_slots =
1220       (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1221       (page_size + kExtraAllocSize);
1222   EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
1223   allocator.root()->Free(ptr);
1224 }
1225 
1226 // Test some of the fragmentation-resistant properties of the allocator.
TEST_F(PartitionAllocTest,SlotSpanRefilling)1227 TEST_F(PartitionAllocTest, SlotSpanRefilling) {
1228   PartitionRoot<ThreadSafe>::Bucket* bucket =
1229       &allocator.root()->buckets[test_bucket_index_];
1230 
1231   // Grab two full slot spans and a non-full slot span.
1232   auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
1233   auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
1234   void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
1235   EXPECT_TRUE(ptr);
1236   EXPECT_NE(slot_span1, bucket->active_slot_spans_head);
1237   EXPECT_NE(slot_span2, bucket->active_slot_spans_head);
1238   auto* slot_span =
1239       SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
1240   EXPECT_EQ(1, slot_span->num_allocated_slots);
1241 
1242   // Work out a pointer into slot_span2 and free it; and then slot_span1 and
1243   // free it.
1244   char* ptr2 =
1245       reinterpret_cast<char*>(SlotSpan::ToPointer(slot_span1)) + kPointerOffset;
1246   allocator.root()->Free(ptr2);
1247   ptr2 =
1248       reinterpret_cast<char*>(SlotSpan::ToPointer(slot_span2)) + kPointerOffset;
1249   allocator.root()->Free(ptr2);
1250 
1251   // If we perform two allocations from the same bucket now, we expect to
1252   // refill both the nearly full slot spans.
1253   (void)allocator.root()->Alloc(kTestAllocSize, type_name);
1254   (void)allocator.root()->Alloc(kTestAllocSize, type_name);
1255   EXPECT_EQ(1, slot_span->num_allocated_slots);
1256 
1257   FreeFullSlotSpan(allocator.root(), slot_span2);
1258   FreeFullSlotSpan(allocator.root(), slot_span1);
1259   allocator.root()->Free(ptr);
1260 }
1261 
1262 // Basic tests to ensure that allocations work for partial page buckets.
TEST_F(PartitionAllocTest,PartialPages)1263 TEST_F(PartitionAllocTest, PartialPages) {
1264   // Find a size that is backed by a partial partition page.
1265   size_t size = sizeof(void*);
1266   size_t bucket_index;
1267 
1268   PartitionRoot<ThreadSafe>::Bucket* bucket = nullptr;
1269   while (size < 1000u) {
1270     bucket_index = SizeToIndex(size + kExtraAllocSize);
1271     bucket = &allocator.root()->buckets[bucket_index];
1272     if (bucket->num_system_pages_per_slot_span %
1273         NumSystemPagesPerPartitionPage())
1274       break;
1275     size += sizeof(void*);
1276   }
1277   EXPECT_LT(size, 1000u);
1278 
1279   auto* slot_span1 = GetFullSlotSpan(size);
1280   auto* slot_span2 = GetFullSlotSpan(size);
1281   FreeFullSlotSpan(allocator.root(), slot_span2);
1282   FreeFullSlotSpan(allocator.root(), slot_span1);
1283 }
1284 
1285 // Test correct handling if our mapping collides with another.
TEST_F(PartitionAllocTest,MappingCollision)1286 TEST_F(PartitionAllocTest, MappingCollision) {
1287   size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
1288   // The -2 is because the first and last partition pages in a super page are
1289   // guard pages.
1290   size_t num_slot_span_needed =
1291       (NumPartitionPagesPerSuperPage() - NumPartitionPagesPerTagBitmap() - 2) /
1292       num_pages_per_slot_span;
1293   size_t num_partition_pages_needed =
1294       num_slot_span_needed * num_pages_per_slot_span;
1295 
1296   auto first_super_page_pages =
1297       std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
1298   auto second_super_page_pages =
1299       std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
1300 
1301   size_t i;
1302   for (i = 0; i < num_partition_pages_needed; ++i)
1303     first_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
1304 
1305   char* page_base =
1306       reinterpret_cast<char*>(SlotSpan::ToPointer(first_super_page_pages[0]));
1307   EXPECT_EQ(PartitionPageSize() + ReservedTagBitmapSize(),
1308             reinterpret_cast<uintptr_t>(page_base) & kSuperPageOffsetMask);
1309   page_base -= PartitionPageSize() - ReservedTagBitmapSize();
1310   // Map a single system page either side of the mapping for our allocations,
1311   // with the goal of tripping up alignment of the next mapping.
1312   void* map1 = AllocPages(
1313       page_base - PageAllocationGranularity(), PageAllocationGranularity(),
1314       PageAllocationGranularity(), PageInaccessible, PageTag::kPartitionAlloc);
1315   EXPECT_TRUE(map1);
1316   void* map2 = AllocPages(
1317       page_base + kSuperPageSize, PageAllocationGranularity(),
1318       PageAllocationGranularity(), PageInaccessible, PageTag::kPartitionAlloc);
1319   EXPECT_TRUE(map2);
1320 
1321   for (i = 0; i < num_partition_pages_needed; ++i)
1322     second_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
1323 
1324   FreePages(map1, PageAllocationGranularity());
1325   FreePages(map2, PageAllocationGranularity());
1326 
1327   page_base =
1328       reinterpret_cast<char*>(SlotSpan::ToPointer(second_super_page_pages[0]));
1329   EXPECT_EQ(PartitionPageSize() + ReservedTagBitmapSize(),
1330             reinterpret_cast<uintptr_t>(page_base) & kSuperPageOffsetMask);
1331   page_base -= PartitionPageSize() - ReservedTagBitmapSize();
1332   // Map a single system page either side of the mapping for our allocations,
1333   // with the goal of tripping up alignment of the next mapping.
1334   map1 = AllocPages(page_base - PageAllocationGranularity(),
1335                     PageAllocationGranularity(), PageAllocationGranularity(),
1336                     PageReadWrite, PageTag::kPartitionAlloc);
1337   EXPECT_TRUE(map1);
1338   map2 = AllocPages(page_base + kSuperPageSize, PageAllocationGranularity(),
1339                     PageAllocationGranularity(), PageReadWrite,
1340                     PageTag::kPartitionAlloc);
1341   EXPECT_TRUE(map2);
1342   EXPECT_TRUE(TrySetSystemPagesAccess(map1, PageAllocationGranularity(),
1343                                       PageInaccessible));
1344   EXPECT_TRUE(TrySetSystemPagesAccess(map2, PageAllocationGranularity(),
1345                                       PageInaccessible));
1346 
1347   auto* slot_span_in_third_super_page = GetFullSlotSpan(kTestAllocSize);
1348   FreePages(map1, PageAllocationGranularity());
1349   FreePages(map2, PageAllocationGranularity());
1350 
1351   EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
1352                     SlotSpan::ToPointer(slot_span_in_third_super_page)) &
1353                     PartitionPageOffsetMask());
1354 
1355   // And make sure we really did get a page in a new superpage.
1356   EXPECT_NE(reinterpret_cast<uintptr_t>(
1357                 SlotSpan::ToPointer(first_super_page_pages[0])) &
1358                 kSuperPageBaseMask,
1359             reinterpret_cast<uintptr_t>(
1360                 SlotSpan::ToPointer(slot_span_in_third_super_page)) &
1361                 kSuperPageBaseMask);
1362   EXPECT_NE(reinterpret_cast<uintptr_t>(
1363                 SlotSpan::ToPointer(second_super_page_pages[0])) &
1364                 kSuperPageBaseMask,
1365             reinterpret_cast<uintptr_t>(
1366                 SlotSpan::ToPointer(slot_span_in_third_super_page)) &
1367                 kSuperPageBaseMask);
1368 
1369   FreeFullSlotSpan(allocator.root(), slot_span_in_third_super_page);
1370   for (i = 0; i < num_partition_pages_needed; ++i) {
1371     FreeFullSlotSpan(allocator.root(), first_super_page_pages[i]);
1372     FreeFullSlotSpan(allocator.root(), second_super_page_pages[i]);
1373   }
1374 }
1375 
1376 // Tests that slot spans in the free slot span cache do get freed as
1377 // appropriate.
TEST_F(PartitionAllocTest,FreeCache)1378 TEST_F(PartitionAllocTest, FreeCache) {
1379   EXPECT_EQ(0U, allocator.root()->get_total_size_of_committed_pages());
1380 
1381   size_t big_size = 1000 - kExtraAllocSize;
1382   size_t bucket_index = SizeToIndex(big_size + kExtraAllocSize);
1383   PartitionBucket<base::internal::ThreadSafe>* bucket =
1384       &allocator.root()->buckets[bucket_index];
1385 
1386   void* ptr = allocator.root()->Alloc(big_size, type_name);
1387   EXPECT_TRUE(ptr);
1388   auto* slot_span =
1389       SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr));
1390   EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1391   EXPECT_EQ(1, slot_span->num_allocated_slots);
1392   size_t expected_committed_size = PartitionPageSize();
1393   EXPECT_EQ(expected_committed_size,
1394             allocator.root()->get_total_size_of_committed_pages());
1395   allocator.root()->Free(ptr);
1396   EXPECT_EQ(0, slot_span->num_allocated_slots);
1397   EXPECT_NE(-1, slot_span->empty_cache_index);
1398   EXPECT_TRUE(slot_span->freelist_head);
1399 
1400   CycleFreeCache(kTestAllocSize);
1401 
1402   // Flushing the cache should have really freed the unused slot spans.
1403   EXPECT_FALSE(slot_span->freelist_head);
1404   EXPECT_EQ(-1, slot_span->empty_cache_index);
1405   EXPECT_EQ(0, slot_span->num_allocated_slots);
1406   PartitionBucket<base::internal::ThreadSafe>* cycle_free_cache_bucket =
1407       &allocator.root()->buckets[test_bucket_index_];
1408   size_t expected_size =
1409       cycle_free_cache_bucket->num_system_pages_per_slot_span *
1410       SystemPageSize();
1411   EXPECT_EQ(expected_size,
1412             allocator.root()->get_total_size_of_committed_pages());
1413 
1414   // Check that an allocation works ok whilst in this state (a free'd slot span
1415   // as the active slot spans head).
1416   ptr = allocator.root()->Alloc(big_size, type_name);
1417   EXPECT_FALSE(bucket->empty_slot_spans_head);
1418   allocator.root()->Free(ptr);
1419 
1420   // Also check that a slot span that is bouncing immediately between empty and
1421   // used does not get freed.
1422   for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
1423     ptr = allocator.root()->Alloc(big_size, type_name);
1424     EXPECT_TRUE(slot_span->freelist_head);
1425     allocator.root()->Free(ptr);
1426     EXPECT_TRUE(slot_span->freelist_head);
1427   }
1428   EXPECT_EQ(expected_committed_size,
1429             allocator.root()->get_total_size_of_committed_pages());
1430 }
1431 
1432 // Tests for a bug we had with losing references to free slot spans.
TEST_F(PartitionAllocTest,LostFreeSlotSpansBug)1433 TEST_F(PartitionAllocTest, LostFreeSlotSpansBug) {
1434   size_t size = PartitionPageSize() - kExtraAllocSize;
1435 
1436   void* ptr = allocator.root()->Alloc(size, type_name);
1437   EXPECT_TRUE(ptr);
1438   void* ptr2 = allocator.root()->Alloc(size, type_name);
1439   EXPECT_TRUE(ptr2);
1440 
1441   SlotSpanMetadata<base::internal::ThreadSafe>* slot_span =
1442       SlotSpanMetadata<base::internal::ThreadSafe>::FromPointer(
1443           PartitionPointerAdjustSubtract(true, ptr));
1444   SlotSpanMetadata<base::internal::ThreadSafe>* slot_span2 =
1445       SlotSpanMetadata<base::internal::ThreadSafe>::FromPointer(
1446           PartitionPointerAdjustSubtract(true, ptr2));
1447   PartitionBucket<base::internal::ThreadSafe>* bucket = slot_span->bucket;
1448 
1449   EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1450   EXPECT_EQ(-1, slot_span->num_allocated_slots);
1451   EXPECT_EQ(1, slot_span2->num_allocated_slots);
1452 
1453   allocator.root()->Free(ptr);
1454   allocator.root()->Free(ptr2);
1455 
1456   EXPECT_TRUE(bucket->empty_slot_spans_head);
1457   EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
1458   EXPECT_EQ(0, slot_span->num_allocated_slots);
1459   EXPECT_EQ(0, slot_span2->num_allocated_slots);
1460   EXPECT_TRUE(slot_span->freelist_head);
1461   EXPECT_TRUE(slot_span2->freelist_head);
1462 
1463   CycleFreeCache(kTestAllocSize);
1464 
1465   EXPECT_FALSE(slot_span->freelist_head);
1466   EXPECT_FALSE(slot_span2->freelist_head);
1467 
1468   EXPECT_TRUE(bucket->empty_slot_spans_head);
1469   EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
1470   EXPECT_EQ(
1471       SlotSpanMetadata<base::internal::ThreadSafe>::get_sentinel_slot_span(),
1472       bucket->active_slot_spans_head);
1473 
1474   // At this moment, we have two decommitted slot spans, on the empty list.
1475   ptr = allocator.root()->Alloc(size, type_name);
1476   EXPECT_TRUE(ptr);
1477   allocator.root()->Free(ptr);
1478 
1479   EXPECT_EQ(
1480       SlotSpanMetadata<base::internal::ThreadSafe>::get_sentinel_slot_span(),
1481       bucket->active_slot_spans_head);
1482   EXPECT_TRUE(bucket->empty_slot_spans_head);
1483   EXPECT_TRUE(bucket->decommitted_slot_spans_head);
1484 
1485   CycleFreeCache(kTestAllocSize);
1486 
1487   // We're now set up to trigger a historical bug by scanning over the active
1488   // slot spans list. The current code gets into a different state, but we'll
1489   // keep the test as being an interesting corner case.
1490   ptr = allocator.root()->Alloc(size, type_name);
1491   EXPECT_TRUE(ptr);
1492   allocator.root()->Free(ptr);
1493 
1494   EXPECT_TRUE(bucket->active_slot_spans_head);
1495   EXPECT_TRUE(bucket->empty_slot_spans_head);
1496   EXPECT_TRUE(bucket->decommitted_slot_spans_head);
1497 }
1498 
1499 // Death tests misbehave on Android, http://crbug.com/643760.
1500 #if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
1501 
1502 // Unit tests that check if an allocation fails in "return null" mode,
1503 // repeating it doesn't crash, and still returns null. The tests need to
1504 // stress memory subsystem limits to do so, hence they try to allocate
1505 // 6 GB of memory, each with a different per-allocation block sizes.
1506 //
1507 // On 64-bit systems we need to restrict the address space to force allocation
1508 // failure, so these tests run only on POSIX systems that provide setrlimit(),
1509 // and use it to limit address space to 6GB.
1510 //
1511 // Disable these tests on Android because, due to the allocation-heavy behavior,
1512 // they tend to get OOM-killed rather than pass.
1513 // TODO(https://crbug.com/779645): Fuchsia currently sets OS_POSIX, but does
1514 // not provide a working setrlimit().
1515 //
1516 // Disable these test on Windows, since they run slower, so tend to timout and
1517 // cause flake.
1518 #if !defined(OS_WIN) &&            \
1519     (!defined(ARCH_CPU_64_BITS) || \
1520      (defined(OS_POSIX) && !(defined(OS_APPLE) || defined(OS_ANDROID))))
1521 
1522 // The following four tests wrap a called function in an expect death statement
1523 // to perform their test, because they are non-hermetic. Specifically they are
1524 // going to attempt to exhaust the allocatable memory, which leaves the
1525 // allocator in a bad global state.
1526 // Performing them as death tests causes them to be forked into their own
1527 // process, so they won't pollute other tests.
TEST_F(PartitionAllocDeathTest,RepeatedAllocReturnNullDirect)1528 TEST_F(PartitionAllocDeathTest, RepeatedAllocReturnNullDirect) {
1529   // A direct-mapped allocation size.
1530   EXPECT_DEATH(DoReturnNullTest(32 * 1024 * 1024, kPartitionAllocFlags),
1531                "DoReturnNullTest");
1532 }
1533 
1534 // Repeating above test with Realloc
TEST_F(PartitionAllocDeathTest,RepeatedReallocReturnNullDirect)1535 TEST_F(PartitionAllocDeathTest, RepeatedReallocReturnNullDirect) {
1536   EXPECT_DEATH(DoReturnNullTest(32 * 1024 * 1024, kPartitionReallocFlags),
1537                "DoReturnNullTest");
1538 }
1539 
1540 // Repeating above test with TryRealloc
TEST_F(PartitionAllocDeathTest,RepeatedTryReallocReturnNullDirect)1541 TEST_F(PartitionAllocDeathTest, RepeatedTryReallocReturnNullDirect) {
1542   EXPECT_DEATH(DoReturnNullTest(32 * 1024 * 1024, kPartitionRootTryRealloc),
1543                "DoReturnNullTest");
1544 }
1545 
1546 // Test "return null" with a 512 kB block size.
TEST_F(PartitionAllocDeathTest,RepeatedAllocReturnNull)1547 TEST_F(PartitionAllocDeathTest, RepeatedAllocReturnNull) {
1548   // A single-slot but non-direct-mapped allocation size.
1549   EXPECT_DEATH(DoReturnNullTest(512 * 1024, kPartitionAllocFlags),
1550                "DoReturnNullTest");
1551 }
1552 
1553 // Repeating above test with Realloc.
TEST_F(PartitionAllocDeathTest,RepeatedReallocReturnNull)1554 TEST_F(PartitionAllocDeathTest, RepeatedReallocReturnNull) {
1555   EXPECT_DEATH(DoReturnNullTest(512 * 1024, kPartitionReallocFlags),
1556                "DoReturnNullTest");
1557 }
1558 
1559 // Repeating above test with TryRealloc.
TEST_F(PartitionAllocDeathTest,RepeatedTryReallocReturnNull)1560 TEST_F(PartitionAllocDeathTest, RepeatedTryReallocReturnNull) {
1561   EXPECT_DEATH(DoReturnNullTest(512 * 1024, kPartitionRootTryRealloc),
1562                "DoReturnNullTest");
1563 }
1564 
1565 #endif  // !defined(ARCH_CPU_64_BITS) || (defined(OS_POSIX) &&
1566         // !(defined(OS_APPLE) || defined(OS_ANDROID)))
1567 
1568 // Make sure that malloc(-1) dies.
1569 // In the past, we had an integer overflow that would alias malloc(-1) to
1570 // malloc(0), which is not good.
TEST_F(PartitionAllocDeathTest,LargeAllocs)1571 TEST_F(PartitionAllocDeathTest, LargeAllocs) {
1572   // Largest alloc.
1573   EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
1574   // And the smallest allocation we expect to die.
1575   EXPECT_DEATH(allocator.root()->Alloc(MaxDirectMapped() + 1, type_name), "");
1576 }
1577 
1578 // TODO(glazunov): make BackupRefPtr compatible with the double-free detection.
1579 #if !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
1580 
1581 // Check that our immediate double-free detection works.
TEST_F(PartitionAllocDeathTest,ImmediateDoubleFree)1582 TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) {
1583   void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
1584   EXPECT_TRUE(ptr);
1585   allocator.root()->Free(ptr);
1586   EXPECT_DEATH(allocator.root()->Free(ptr), "");
1587 }
1588 
1589 // Check that our refcount-based double-free detection works.
TEST_F(PartitionAllocDeathTest,RefcountDoubleFree)1590 TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) {
1591   void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
1592   EXPECT_TRUE(ptr);
1593   void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
1594   EXPECT_TRUE(ptr2);
1595   allocator.root()->Free(ptr);
1596   allocator.root()->Free(ptr2);
1597   // This is not an immediate double-free so our immediate detection won't
1598   // fire. However, it does take the "refcount" of the to -1, which is illegal
1599   // and should be trapped.
1600   EXPECT_DEATH(allocator.root()->Free(ptr), "");
1601 }
1602 
1603 #endif  // !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
1604 
1605 // Check that guard pages are present where expected.
TEST_F(PartitionAllocDeathTest,GuardPages)1606 TEST_F(PartitionAllocDeathTest, GuardPages) {
1607 // PartitionAlloc adds PartitionPageSize() to the requested size
1608 // (for metadata), and then rounds that size to PageAllocationGranularity().
1609 // To be able to reliably write one past a direct allocation, choose a size
1610 // that's
1611 // a) larger than kMaxBucketed (to make the allocation direct)
1612 // b) aligned at PageAllocationGranularity() boundaries after
1613 //    PartitionPageSize() has been added to it.
1614 // (On 32-bit, PartitionAlloc adds another SystemPageSize() to the
1615 // allocation size before rounding, but there it marks the memory right
1616 // after size as inaccessible, so it's fine to write 1 past the size we
1617 // hand to PartitionAlloc and we don't need to worry about allocation
1618 // granularities.)
1619 #define ALIGN(N, A) (((N) + (A)-1) / (A) * (A))
1620   const size_t kSize = ALIGN(kMaxBucketed + 1 + PartitionPageSize(),
1621                              PageAllocationGranularity()) -
1622                        PartitionPageSize();
1623 #undef ALIGN
1624   EXPECT_GT(kSize, kMaxBucketed)
1625       << "allocation not large enough for direct allocation";
1626   size_t size = kSize - kExtraAllocSize;
1627   void* ptr = allocator.root()->Alloc(size, type_name);
1628 
1629   EXPECT_TRUE(ptr);
1630   char* char_ptr = reinterpret_cast<char*>(ptr) - kPointerOffset;
1631 
1632   EXPECT_DEATH(*(char_ptr - 1) = 'A', "");
1633   EXPECT_DEATH(*(char_ptr + size + kExtraAllocSize) = 'A', "");
1634 
1635   allocator.root()->Free(ptr);
1636 }
1637 
1638 #endif  // !defined(OS_ANDROID) && !defined(OS_IOS)
1639 
1640 // Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
1641 // crashing and return non-zero values when memory is allocated.
TEST_F(PartitionAllocTest,DumpMemoryStats)1642 TEST_F(PartitionAllocTest, DumpMemoryStats) {
1643   {
1644     void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
1645     MockPartitionStatsDumper mock_stats_dumper;
1646     allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1647                                 &mock_stats_dumper);
1648     EXPECT_TRUE(mock_stats_dumper.IsMemoryAllocationRecorded());
1649     allocator.root()->Free(ptr);
1650   }
1651 
1652   // This series of tests checks the active -> empty -> decommitted states.
1653   {
1654     {
1655       void* ptr = allocator.root()->Alloc(2048 - kExtraAllocSize, type_name);
1656       MockPartitionStatsDumper dumper;
1657       allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1658                                   &dumper);
1659       EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
1660 
1661       const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
1662       EXPECT_TRUE(stats);
1663       EXPECT_TRUE(stats->is_valid);
1664       EXPECT_EQ(2048u, stats->bucket_slot_size);
1665       EXPECT_EQ(2048u, stats->active_bytes);
1666       EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
1667       EXPECT_EQ(0u, stats->decommittable_bytes);
1668       EXPECT_EQ(0u, stats->discardable_bytes);
1669       EXPECT_EQ(0u, stats->num_full_slot_spans);
1670       EXPECT_EQ(1u, stats->num_active_slot_spans);
1671       EXPECT_EQ(0u, stats->num_empty_slot_spans);
1672       EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
1673       allocator.root()->Free(ptr);
1674     }
1675 
1676     {
1677       MockPartitionStatsDumper dumper;
1678       allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1679                                   &dumper);
1680       EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
1681 
1682       const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
1683       EXPECT_TRUE(stats);
1684       EXPECT_TRUE(stats->is_valid);
1685       EXPECT_EQ(2048u, stats->bucket_slot_size);
1686       EXPECT_EQ(0u, stats->active_bytes);
1687       EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
1688       EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
1689       EXPECT_EQ(0u, stats->discardable_bytes);
1690       EXPECT_EQ(0u, stats->num_full_slot_spans);
1691       EXPECT_EQ(0u, stats->num_active_slot_spans);
1692       EXPECT_EQ(1u, stats->num_empty_slot_spans);
1693       EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
1694     }
1695 
1696     // TODO(crbug.com/722911): Commenting this out causes this test to fail when
1697     // run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not
1698     // when run with the others (--gtest_filter=PartitionAllocTest.*).
1699     CycleFreeCache(kTestAllocSize);
1700 
1701     {
1702       MockPartitionStatsDumper dumper;
1703       allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1704                                   &dumper);
1705       EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
1706 
1707       const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
1708       EXPECT_TRUE(stats);
1709       EXPECT_TRUE(stats->is_valid);
1710       EXPECT_EQ(2048u, stats->bucket_slot_size);
1711       EXPECT_EQ(0u, stats->active_bytes);
1712       EXPECT_EQ(0u, stats->resident_bytes);
1713       EXPECT_EQ(0u, stats->decommittable_bytes);
1714       EXPECT_EQ(0u, stats->discardable_bytes);
1715       EXPECT_EQ(0u, stats->num_full_slot_spans);
1716       EXPECT_EQ(0u, stats->num_active_slot_spans);
1717       EXPECT_EQ(0u, stats->num_empty_slot_spans);
1718       EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
1719     }
1720   }
1721 
1722   // This test checks for correct empty slot span list accounting.
1723   {
1724     size_t size = PartitionPageSize() - kExtraAllocSize;
1725     void* ptr1 = allocator.root()->Alloc(size, type_name);
1726     void* ptr2 = allocator.root()->Alloc(size, type_name);
1727     allocator.root()->Free(ptr1);
1728     allocator.root()->Free(ptr2);
1729 
1730     CycleFreeCache(kTestAllocSize);
1731 
1732     ptr1 = allocator.root()->Alloc(size, type_name);
1733 
1734     {
1735       MockPartitionStatsDumper dumper;
1736       allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1737                                   &dumper);
1738       EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
1739 
1740       const PartitionBucketMemoryStats* stats =
1741           dumper.GetBucketStats(PartitionPageSize());
1742       EXPECT_TRUE(stats);
1743       EXPECT_TRUE(stats->is_valid);
1744       EXPECT_EQ(PartitionPageSize(), stats->bucket_slot_size);
1745       EXPECT_EQ(PartitionPageSize(), stats->active_bytes);
1746       EXPECT_EQ(PartitionPageSize(), stats->resident_bytes);
1747       EXPECT_EQ(0u, stats->decommittable_bytes);
1748       EXPECT_EQ(0u, stats->discardable_bytes);
1749       EXPECT_EQ(1u, stats->num_full_slot_spans);
1750       EXPECT_EQ(0u, stats->num_active_slot_spans);
1751       EXPECT_EQ(0u, stats->num_empty_slot_spans);
1752       EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
1753     }
1754     allocator.root()->Free(ptr1);
1755   }
1756 
1757   // This test checks for correct direct mapped accounting.
1758   {
1759     size_t size_smaller = kMaxBucketed + 1;
1760     size_t size_bigger = (kMaxBucketed * 2) + 1;
1761     size_t real_size_smaller =
1762         (size_smaller + SystemPageOffsetMask()) & SystemPageBaseMask();
1763     size_t real_size_bigger =
1764         (size_bigger + SystemPageOffsetMask()) & SystemPageBaseMask();
1765     void* ptr = allocator.root()->Alloc(size_smaller, type_name);
1766     void* ptr2 = allocator.root()->Alloc(size_bigger, type_name);
1767 
1768     {
1769       MockPartitionStatsDumper dumper;
1770       allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1771                                   &dumper);
1772       EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
1773 
1774       const PartitionBucketMemoryStats* stats =
1775           dumper.GetBucketStats(real_size_smaller);
1776       EXPECT_TRUE(stats);
1777       EXPECT_TRUE(stats->is_valid);
1778       EXPECT_TRUE(stats->is_direct_map);
1779       EXPECT_EQ(real_size_smaller, stats->bucket_slot_size);
1780       EXPECT_EQ(real_size_smaller, stats->active_bytes);
1781       EXPECT_EQ(real_size_smaller, stats->resident_bytes);
1782       EXPECT_EQ(0u, stats->decommittable_bytes);
1783       EXPECT_EQ(0u, stats->discardable_bytes);
1784       EXPECT_EQ(1u, stats->num_full_slot_spans);
1785       EXPECT_EQ(0u, stats->num_active_slot_spans);
1786       EXPECT_EQ(0u, stats->num_empty_slot_spans);
1787       EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
1788 
1789       stats = dumper.GetBucketStats(real_size_bigger);
1790       EXPECT_TRUE(stats);
1791       EXPECT_TRUE(stats->is_valid);
1792       EXPECT_TRUE(stats->is_direct_map);
1793       EXPECT_EQ(real_size_bigger, stats->bucket_slot_size);
1794       EXPECT_EQ(real_size_bigger, stats->active_bytes);
1795       EXPECT_EQ(real_size_bigger, stats->resident_bytes);
1796       EXPECT_EQ(0u, stats->decommittable_bytes);
1797       EXPECT_EQ(0u, stats->discardable_bytes);
1798       EXPECT_EQ(1u, stats->num_full_slot_spans);
1799       EXPECT_EQ(0u, stats->num_active_slot_spans);
1800       EXPECT_EQ(0u, stats->num_empty_slot_spans);
1801       EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
1802     }
1803 
1804     allocator.root()->Free(ptr2);
1805     allocator.root()->Free(ptr);
1806 
1807     // Whilst we're here, allocate again and free with different ordering to
1808     // give a workout to our linked list code.
1809     ptr = allocator.root()->Alloc(size_smaller, type_name);
1810     ptr2 = allocator.root()->Alloc(size_bigger, type_name);
1811     allocator.root()->Free(ptr);
1812     allocator.root()->Free(ptr2);
1813   }
1814 
1815   // This test checks large-but-not-quite-direct allocations.
1816   {
1817     const size_t requested_size = 16 * SystemPageSize();
1818     void* ptr = allocator.root()->Alloc(requested_size + 1, type_name);
1819 
1820     {
1821       MockPartitionStatsDumper dumper;
1822       allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1823                                   &dumper);
1824       EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
1825 
1826       size_t slot_size =
1827           requested_size + (requested_size / kNumBucketsPerOrder);
1828       const PartitionBucketMemoryStats* stats =
1829           dumper.GetBucketStats(slot_size);
1830       EXPECT_TRUE(stats);
1831       EXPECT_TRUE(stats->is_valid);
1832       EXPECT_FALSE(stats->is_direct_map);
1833       EXPECT_EQ(slot_size, stats->bucket_slot_size);
1834       EXPECT_EQ(requested_size + 1 + kExtraAllocSize, stats->active_bytes);
1835       EXPECT_EQ(slot_size, stats->resident_bytes);
1836       EXPECT_EQ(0u, stats->decommittable_bytes);
1837       EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
1838       EXPECT_EQ(1u, stats->num_full_slot_spans);
1839       EXPECT_EQ(0u, stats->num_active_slot_spans);
1840       EXPECT_EQ(0u, stats->num_empty_slot_spans);
1841       EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
1842     }
1843 
1844     allocator.root()->Free(ptr);
1845 
1846     {
1847       MockPartitionStatsDumper dumper;
1848       allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1849                                   &dumper);
1850       EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
1851 
1852       size_t slot_size =
1853           requested_size + (requested_size / kNumBucketsPerOrder);
1854       const PartitionBucketMemoryStats* stats =
1855           dumper.GetBucketStats(slot_size);
1856       EXPECT_TRUE(stats);
1857       EXPECT_TRUE(stats->is_valid);
1858       EXPECT_FALSE(stats->is_direct_map);
1859       EXPECT_EQ(slot_size, stats->bucket_slot_size);
1860       EXPECT_EQ(0u, stats->active_bytes);
1861       EXPECT_EQ(slot_size, stats->resident_bytes);
1862       EXPECT_EQ(slot_size, stats->decommittable_bytes);
1863       EXPECT_EQ(0u, stats->num_full_slot_spans);
1864       EXPECT_EQ(0u, stats->num_active_slot_spans);
1865       EXPECT_EQ(1u, stats->num_empty_slot_spans);
1866       EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
1867     }
1868 
1869     void* ptr2 = allocator.root()->Alloc(requested_size + SystemPageSize() + 1,
1870                                          type_name);
1871     EXPECT_EQ(ptr, ptr2);
1872 
1873     {
1874       MockPartitionStatsDumper dumper;
1875       allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1876                                   &dumper);
1877       EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
1878 
1879       size_t slot_size =
1880           requested_size + (requested_size / kNumBucketsPerOrder);
1881       const PartitionBucketMemoryStats* stats =
1882           dumper.GetBucketStats(slot_size);
1883       EXPECT_TRUE(stats);
1884       EXPECT_TRUE(stats->is_valid);
1885       EXPECT_FALSE(stats->is_direct_map);
1886       EXPECT_EQ(slot_size, stats->bucket_slot_size);
1887       EXPECT_EQ(requested_size + SystemPageSize() + 1 + kExtraAllocSize,
1888                 stats->active_bytes);
1889       EXPECT_EQ(slot_size, stats->resident_bytes);
1890       EXPECT_EQ(0u, stats->decommittable_bytes);
1891       EXPECT_EQ(0u, stats->discardable_bytes);
1892       EXPECT_EQ(1u, stats->num_full_slot_spans);
1893       EXPECT_EQ(0u, stats->num_active_slot_spans);
1894       EXPECT_EQ(0u, stats->num_empty_slot_spans);
1895       EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
1896     }
1897 
1898     allocator.root()->Free(ptr2);
1899   }
1900 }
1901 
1902 // Tests the API to purge freeable memory.
TEST_F(PartitionAllocTest,Purge)1903 TEST_F(PartitionAllocTest, Purge) {
1904   char* ptr = reinterpret_cast<char*>(
1905       allocator.root()->Alloc(2048 - kExtraAllocSize, type_name));
1906   allocator.root()->Free(ptr);
1907   {
1908     MockPartitionStatsDumper dumper;
1909     allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1910                                 &dumper);
1911     EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
1912 
1913     const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
1914     EXPECT_TRUE(stats);
1915     EXPECT_TRUE(stats->is_valid);
1916     EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
1917     EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
1918   }
1919   allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans);
1920   {
1921     MockPartitionStatsDumper dumper;
1922     allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
1923                                 &dumper);
1924     EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
1925 
1926     const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
1927     EXPECT_TRUE(stats);
1928     EXPECT_TRUE(stats->is_valid);
1929     EXPECT_EQ(0u, stats->decommittable_bytes);
1930     EXPECT_EQ(0u, stats->resident_bytes);
1931   }
1932   // Calling purge again here is a good way of testing we didn't mess up the
1933   // state of the free cache ring.
1934   allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans);
1935 
1936   char* big_ptr =
1937       reinterpret_cast<char*>(allocator.root()->Alloc(256 * 1024, type_name));
1938   allocator.root()->Free(big_ptr);
1939   allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans);
1940 
1941   CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
1942   CHECK_PAGE_IN_CORE(big_ptr - kPointerOffset, false);
1943 }
1944 
1945 // Tests that we prefer to allocate into a non-empty partition page over an
1946 // empty one. This is an important aspect of minimizing memory usage for some
1947 // allocation sizes, particularly larger ones.
TEST_F(PartitionAllocTest,PreferActiveOverEmpty)1948 TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
1949   size_t size = (SystemPageSize() * 2) - kExtraAllocSize;
1950   // Allocate 3 full slot spans worth of 8192-byte allocations.
1951   // Each slot span for this size is 16384 bytes, or 1 partition page and 2
1952   // slots.
1953   void* ptr1 = allocator.root()->Alloc(size, type_name);
1954   void* ptr2 = allocator.root()->Alloc(size, type_name);
1955   void* ptr3 = allocator.root()->Alloc(size, type_name);
1956   void* ptr4 = allocator.root()->Alloc(size, type_name);
1957   void* ptr5 = allocator.root()->Alloc(size, type_name);
1958   void* ptr6 = allocator.root()->Alloc(size, type_name);
1959 
1960   SlotSpanMetadata<base::internal::ThreadSafe>* slot_span1 =
1961       SlotSpanMetadata<base::internal::ThreadSafe>::FromPointer(
1962           PartitionPointerAdjustSubtract(true, ptr1));
1963   SlotSpanMetadata<base::internal::ThreadSafe>* slot_span2 =
1964       SlotSpanMetadata<base::internal::ThreadSafe>::FromPointer(
1965           PartitionPointerAdjustSubtract(true, ptr3));
1966   SlotSpanMetadata<base::internal::ThreadSafe>* slot_span3 =
1967       SlotSpanMetadata<base::internal::ThreadSafe>::FromPointer(
1968           PartitionPointerAdjustSubtract(true, ptr6));
1969   EXPECT_NE(slot_span1, slot_span2);
1970   EXPECT_NE(slot_span2, slot_span3);
1971   PartitionBucket<base::internal::ThreadSafe>* bucket = slot_span1->bucket;
1972   EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
1973 
1974   // Free up the 2nd slot in each slot span.
1975   // This leaves the active list containing 3 slot spans, each with 1 used and 1
1976   // free slot. The active slot span will be the one containing ptr1.
1977   allocator.root()->Free(ptr6);
1978   allocator.root()->Free(ptr4);
1979   allocator.root()->Free(ptr2);
1980   EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
1981 
1982   // Empty the middle slot span in the active list.
1983   allocator.root()->Free(ptr3);
1984   EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
1985 
1986   // Empty the first slot span in the active list -- also the current slot span.
1987   allocator.root()->Free(ptr1);
1988 
1989   // A good choice here is to re-fill the third slot span since the first two
1990   // are empty. We used to fail that.
1991   void* ptr7 = allocator.root()->Alloc(size, type_name);
1992   EXPECT_EQ(ptr6, ptr7);
1993   EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
1994 
1995   allocator.root()->Free(ptr5);
1996   allocator.root()->Free(ptr7);
1997 }
1998 
1999 // Tests the API to purge discardable memory.
TEST_F(PartitionAllocTest,PurgeDiscardableSecondPage)2000 TEST_F(PartitionAllocTest, PurgeDiscardableSecondPage) {
2001   // Free the second of two 4096 byte allocations and then purge.
2002   void* ptr1 =
2003       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2004   char* ptr2 = reinterpret_cast<char*>(
2005       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name));
2006   allocator.root()->Free(ptr2);
2007   SlotSpanMetadata<base::internal::ThreadSafe>* slot_span =
2008       SlotSpanMetadata<base::internal::ThreadSafe>::FromPointer(
2009           PartitionPointerAdjustSubtract(true, ptr1));
2010   EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
2011   {
2012     MockPartitionStatsDumper dumper;
2013     allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2014                                 &dumper);
2015     EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2016 
2017     const PartitionBucketMemoryStats* stats =
2018         dumper.GetBucketStats(SystemPageSize());
2019     EXPECT_TRUE(stats);
2020     EXPECT_TRUE(stats->is_valid);
2021     EXPECT_EQ(0u, stats->decommittable_bytes);
2022     EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
2023     EXPECT_EQ(SystemPageSize(), stats->active_bytes);
2024     EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
2025   }
2026   CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
2027   allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
2028   CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
2029   EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
2030 
2031   allocator.root()->Free(ptr1);
2032 }
2033 
TEST_F(PartitionAllocTest,PurgeDiscardableFirstPage)2034 TEST_F(PartitionAllocTest, PurgeDiscardableFirstPage) {
2035   // Free the first of two 4096 byte allocations and then purge.
2036   char* ptr1 = reinterpret_cast<char*>(
2037       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name));
2038   void* ptr2 =
2039       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2040   allocator.root()->Free(ptr1);
2041   {
2042     MockPartitionStatsDumper dumper;
2043     allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2044                                 &dumper);
2045     EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2046 
2047     const PartitionBucketMemoryStats* stats =
2048         dumper.GetBucketStats(SystemPageSize());
2049     EXPECT_TRUE(stats);
2050     EXPECT_TRUE(stats->is_valid);
2051     EXPECT_EQ(0u, stats->decommittable_bytes);
2052 #if defined(OS_WIN)
2053     EXPECT_EQ(0u, stats->discardable_bytes);
2054 #else
2055     EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
2056 #endif
2057     EXPECT_EQ(SystemPageSize(), stats->active_bytes);
2058     EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
2059   }
2060   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
2061   allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
2062   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
2063 
2064   allocator.root()->Free(ptr2);
2065 }
2066 
TEST_F(PartitionAllocTest,PurgeDiscardableNonPageSizedAlloc)2067 TEST_F(PartitionAllocTest, PurgeDiscardableNonPageSizedAlloc) {
2068   const size_t requested_size = 2.25 * SystemPageSize();
2069   char* ptr1 = reinterpret_cast<char*>(
2070       allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name));
2071   void* ptr2 =
2072       allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name);
2073   void* ptr3 =
2074       allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name);
2075   void* ptr4 =
2076       allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name);
2077   memset(ptr1, 'A', requested_size - kExtraAllocSize);
2078   memset(ptr2, 'A', requested_size - kExtraAllocSize);
2079   allocator.root()->Free(ptr2);
2080   allocator.root()->Free(ptr1);
2081   {
2082     MockPartitionStatsDumper dumper;
2083     allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2084                                 &dumper);
2085     EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2086 
2087     const PartitionBucketMemoryStats* stats =
2088         dumper.GetBucketStats(requested_size);
2089     EXPECT_TRUE(stats);
2090     EXPECT_TRUE(stats->is_valid);
2091     EXPECT_EQ(0u, stats->decommittable_bytes);
2092     EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
2093     EXPECT_EQ(requested_size * 2, stats->active_bytes);
2094     EXPECT_EQ(9 * SystemPageSize(), stats->resident_bytes);
2095   }
2096   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
2097   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
2098   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
2099   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
2100   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
2101   allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
2102   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
2103   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
2104   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
2105   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
2106   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
2107 
2108   allocator.root()->Free(ptr3);
2109   allocator.root()->Free(ptr4);
2110 }
2111 
TEST_F(PartitionAllocTest,PurgeDiscardableManyPages)2112 TEST_F(PartitionAllocTest, PurgeDiscardableManyPages) {
2113   // On systems with large pages, use less pages because:
2114   // 1) There must be a bucket for kFirstAllocPages * SystemPageSize(), and
2115   // 2) On low-end systems, using too many large pages can OOM during the test
2116   const bool kHasLargePages = SystemPageSize() > 4096;
2117   const size_t kFirstAllocPages = kHasLargePages ? 32 : 64;
2118   const size_t kSecondAllocPages = kHasLargePages ? 31 : 61;
2119 
2120   // Detect case (1) from above.
2121   DCHECK_LT(kFirstAllocPages * SystemPageSize(), 1UL << kMaxBucketedOrder);
2122 
2123   const size_t kDeltaPages = kFirstAllocPages - kSecondAllocPages;
2124 
2125   {
2126     ScopedPageAllocation p(allocator, kFirstAllocPages);
2127     p.TouchAllPages();
2128   }
2129 
2130   ScopedPageAllocation p(allocator, kSecondAllocPages);
2131 
2132   MockPartitionStatsDumper dumper;
2133   allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2134                               &dumper);
2135   EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2136 
2137   const PartitionBucketMemoryStats* stats =
2138       dumper.GetBucketStats(kFirstAllocPages * SystemPageSize());
2139   EXPECT_TRUE(stats);
2140   EXPECT_TRUE(stats->is_valid);
2141   EXPECT_EQ(0u, stats->decommittable_bytes);
2142   EXPECT_EQ(kDeltaPages * SystemPageSize(), stats->discardable_bytes);
2143   EXPECT_EQ(kSecondAllocPages * SystemPageSize(), stats->active_bytes);
2144   EXPECT_EQ(kFirstAllocPages * SystemPageSize(), stats->resident_bytes);
2145 
2146   for (size_t i = 0; i < kFirstAllocPages; i++)
2147     CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
2148 
2149   allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
2150 
2151   for (size_t i = 0; i < kSecondAllocPages; i++)
2152     CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
2153   for (size_t i = kSecondAllocPages; i < kFirstAllocPages; i++)
2154     CHECK_PAGE_IN_CORE(p.PageAtIndex(i), false);
2155 }
2156 
TEST_F(PartitionAllocTest,PurgeDiscardableWithFreeListRewrite)2157 TEST_F(PartitionAllocTest, PurgeDiscardableWithFreeListRewrite) {
2158   // This sub-test tests truncation of the provisioned slots in a trickier
2159   // case where the freelist is rewritten.
2160   allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans);
2161   char* ptr1 = reinterpret_cast<char*>(
2162       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name));
2163   void* ptr2 =
2164       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2165   void* ptr3 =
2166       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2167   void* ptr4 =
2168       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2169   ptr1[0] = 'A';
2170   ptr1[SystemPageSize()] = 'A';
2171   ptr1[SystemPageSize() * 2] = 'A';
2172   ptr1[SystemPageSize() * 3] = 'A';
2173   SlotSpanMetadata<base::internal::ThreadSafe>* slot_span =
2174       SlotSpanMetadata<base::internal::ThreadSafe>::FromPointer(
2175           PartitionPointerAdjustSubtract(true, ptr1));
2176   allocator.root()->Free(ptr2);
2177   allocator.root()->Free(ptr4);
2178   allocator.root()->Free(ptr1);
2179   EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
2180 
2181   {
2182     MockPartitionStatsDumper dumper;
2183     allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2184                                 &dumper);
2185     EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2186 
2187     const PartitionBucketMemoryStats* stats =
2188         dumper.GetBucketStats(SystemPageSize());
2189     EXPECT_TRUE(stats);
2190     EXPECT_TRUE(stats->is_valid);
2191     EXPECT_EQ(0u, stats->decommittable_bytes);
2192 #if defined(OS_WIN)
2193     EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
2194 #else
2195     EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
2196 #endif
2197     EXPECT_EQ(SystemPageSize(), stats->active_bytes);
2198     EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
2199   }
2200   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
2201   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
2202   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
2203   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
2204   allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
2205   EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
2206   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
2207   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
2208   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
2209   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
2210 
2211   // Let's check we didn't brick the freelist.
2212   void* ptr1b =
2213       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2214   EXPECT_EQ(ptr1, ptr1b);
2215   void* ptr2b =
2216       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2217   EXPECT_EQ(ptr2, ptr2b);
2218   EXPECT_FALSE(slot_span->freelist_head);
2219 
2220   allocator.root()->Free(ptr1);
2221   allocator.root()->Free(ptr2);
2222   allocator.root()->Free(ptr3);
2223 }
2224 
TEST_F(PartitionAllocTest,PurgeDiscardableDoubleTruncateFreeList)2225 TEST_F(PartitionAllocTest, PurgeDiscardableDoubleTruncateFreeList) {
2226   // This sub-test is similar, but tests a double-truncation.
2227   allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans);
2228   char* ptr1 = reinterpret_cast<char*>(
2229       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name));
2230   void* ptr2 =
2231       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2232   void* ptr3 =
2233       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2234   void* ptr4 =
2235       allocator.root()->Alloc(SystemPageSize() - kExtraAllocSize, type_name);
2236   ptr1[0] = 'A';
2237   ptr1[SystemPageSize()] = 'A';
2238   ptr1[SystemPageSize() * 2] = 'A';
2239   ptr1[SystemPageSize() * 3] = 'A';
2240   SlotSpanMetadata<base::internal::ThreadSafe>* slot_span =
2241       SlotSpanMetadata<base::internal::ThreadSafe>::FromPointer(
2242           PartitionPointerAdjustSubtract(true, ptr1));
2243   allocator.root()->Free(ptr4);
2244   allocator.root()->Free(ptr3);
2245   EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
2246 
2247   {
2248     MockPartitionStatsDumper dumper;
2249     allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2250                                 &dumper);
2251     EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2252 
2253     const PartitionBucketMemoryStats* stats =
2254         dumper.GetBucketStats(SystemPageSize());
2255     EXPECT_TRUE(stats);
2256     EXPECT_TRUE(stats->is_valid);
2257     EXPECT_EQ(0u, stats->decommittable_bytes);
2258     EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
2259     EXPECT_EQ(2 * SystemPageSize(), stats->active_bytes);
2260     EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
2261   }
2262   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
2263   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
2264   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
2265   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
2266   allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
2267   EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
2268   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
2269   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
2270   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
2271   CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
2272 
2273   EXPECT_FALSE(slot_span->freelist_head);
2274 
2275   allocator.root()->Free(ptr1);
2276   allocator.root()->Free(ptr2);
2277 }
2278 
TEST_F(PartitionAllocTest,ReallocMovesCookies)2279 TEST_F(PartitionAllocTest, ReallocMovesCookies) {
2280   // Resize so as to be sure to hit a "resize in place" case, and ensure that
2281   // use of the entire result is compatible with the debug mode's cookies, even
2282   // when the bucket size is large enough to span more than one partition page
2283   // and we can track the "raw" size. See https://crbug.com/709271
2284   static const size_t kSize =
2285       base::MaxSystemPagesPerSlotSpan() * base::SystemPageSize();
2286   void* ptr = allocator.root()->Alloc(kSize + 1, type_name);
2287   EXPECT_TRUE(ptr);
2288 
2289   memset(ptr, 0xbd, kSize + 1);
2290   ptr = allocator.root()->Realloc(ptr, kSize + 2, type_name);
2291   EXPECT_TRUE(ptr);
2292 
2293   memset(ptr, 0xbd, kSize + 2);
2294   allocator.root()->Free(ptr);
2295 }
2296 
TEST_F(PartitionAllocTest,SmallReallocDoesNotMoveTrailingCookie)2297 TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
2298   // For crbug.com/781473
2299   static constexpr size_t kSize = 264;
2300   void* ptr = allocator.root()->Alloc(kSize, type_name);
2301   EXPECT_TRUE(ptr);
2302 
2303   ptr = allocator.root()->Realloc(ptr, kSize + 16, type_name);
2304   EXPECT_TRUE(ptr);
2305 
2306   allocator.root()->Free(ptr);
2307 }
2308 
TEST_F(PartitionAllocTest,ZeroFill)2309 TEST_F(PartitionAllocTest, ZeroFill) {
2310   constexpr static size_t kAllZerosSentinel =
2311       std::numeric_limits<size_t>::max();
2312   for (size_t size : kTestSizes) {
2313     char* p = static_cast<char*>(
2314         allocator.root()->AllocFlags(PartitionAllocZeroFill, size, nullptr));
2315     size_t non_zero_position = kAllZerosSentinel;
2316     for (size_t i = 0; i < size; ++i) {
2317       if (0 != p[i]) {
2318         non_zero_position = i;
2319         break;
2320       }
2321     }
2322     EXPECT_EQ(kAllZerosSentinel, non_zero_position)
2323         << "test allocation size: " << size;
2324     allocator.root()->Free(p);
2325   }
2326 
2327   for (int i = 0; i < 10; ++i) {
2328     SCOPED_TRACE(i);
2329     AllocateRandomly(allocator.root(), 250, PartitionAllocZeroFill);
2330   }
2331 }
2332 
TEST_F(PartitionAllocTest,Bug_897585)2333 TEST_F(PartitionAllocTest, Bug_897585) {
2334   // Need sizes big enough to be direct mapped and a delta small enough to
2335   // allow re-use of the slot span when cookied. These numbers fall out of the
2336   // test case in the indicated bug.
2337   size_t kInitialSize = 983040;
2338   size_t kDesiredSize = 983100;
2339   void* ptr = allocator.root()->AllocFlags(PartitionAllocReturnNull,
2340                                            kInitialSize, nullptr);
2341   ASSERT_NE(nullptr, ptr);
2342   ptr = allocator.root()->ReallocFlags(PartitionAllocReturnNull, ptr,
2343                                        kDesiredSize, nullptr);
2344   ASSERT_NE(nullptr, ptr);
2345   memset(ptr, 0xbd, kDesiredSize);
2346   allocator.root()->Free(ptr);
2347 }
2348 
TEST_F(PartitionAllocTest,OverrideHooks)2349 TEST_F(PartitionAllocTest, OverrideHooks) {
2350   constexpr size_t kOverriddenSize = 1234;
2351   constexpr const char* kOverriddenType = "Overridden type";
2352   constexpr unsigned char kOverriddenChar = 'A';
2353 
2354   // Marked static so that we can use them in non-capturing lambdas below.
2355   // (Non-capturing lambdas convert directly to function pointers.)
2356   static volatile bool free_called = false;
2357   static void* overridden_allocation = malloc(kOverriddenSize);
2358   memset(overridden_allocation, kOverriddenChar, kOverriddenSize);
2359 
2360   PartitionAllocHooks::SetOverrideHooks(
2361       [](void** out, int flags, size_t size, const char* type_name) -> bool {
2362         if (size == kOverriddenSize && type_name == kOverriddenType) {
2363           *out = overridden_allocation;
2364           return true;
2365         }
2366         return false;
2367       },
2368       [](void* address) -> bool {
2369         if (address == overridden_allocation) {
2370           free_called = true;
2371           return true;
2372         }
2373         return false;
2374       },
2375       [](size_t* out, void* address) -> bool {
2376         if (address == overridden_allocation) {
2377           *out = kOverriddenSize;
2378           return true;
2379         }
2380         return false;
2381       });
2382 
2383   void* ptr = allocator.root()->AllocFlags(PartitionAllocReturnNull,
2384                                            kOverriddenSize, kOverriddenType);
2385   ASSERT_EQ(ptr, overridden_allocation);
2386 
2387   allocator.root()->Free(ptr);
2388   EXPECT_TRUE(free_called);
2389 
2390   // overridden_allocation has not actually been freed so we can now immediately
2391   // realloc it.
2392   free_called = false;
2393   ptr =
2394       allocator.root()->ReallocFlags(PartitionAllocReturnNull, ptr, 1, nullptr);
2395   ASSERT_NE(ptr, nullptr);
2396   EXPECT_NE(ptr, overridden_allocation);
2397   EXPECT_TRUE(free_called);
2398   EXPECT_EQ(*(char*)ptr, kOverriddenChar);
2399   allocator.root()->Free(ptr);
2400 
2401   PartitionAllocHooks::SetOverrideHooks(nullptr, nullptr, nullptr);
2402   free(overridden_allocation);
2403 }
2404 
TEST_F(PartitionAllocTest,Alignment)2405 TEST_F(PartitionAllocTest, Alignment) {
2406   std::vector<void*> allocated_ptrs;
2407 
2408   for (size_t size = 1; size <= base::SystemPageSize(); size <<= 1) {
2409     // All allocations which are not direct-mapped occupy contiguous slots of a
2410     // span, starting on a page boundary. This means that allocations are first
2411     // rounded up to the nearest bucket size, then have an address of the form:
2412     //
2413     // (page-aligned address) + i * bucket_size.
2414 
2415     // All powers of two are bucket sizes, meaning that all power of two
2416     // allocations smaller than a page will be aligned on the allocation size.
2417     size_t expected_alignment = size;
2418 #if DCHECK_IS_ON()
2419     // When DCHECK_IS_ON(), a kCookieSize cookie is added on both sides before
2420     // rounding up the allocation size. The returned pointer points after the
2421     // cookie.
2422     expected_alignment = std::min(expected_alignment, kCookieSize);
2423 #endif
2424 #if ENABLE_TAG_FOR_CHECKED_PTR2 || ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
2425     // When ENABLE_TAG_FOR_CHECKED_PTR2, a kInSlotTagBufferSize is added before
2426     // rounding up the allocation size. The returned pointer points after the
2427     // partition tag.
2428     expected_alignment = std::min(
2429         {expected_alignment, kInSlotTagBufferSize + kInSlotRefCountBufferSize});
2430 #endif
2431     for (int index = 0; index < 3; index++) {
2432       void* ptr = allocator.root()->Alloc(size, "");
2433       allocated_ptrs.push_back(ptr);
2434       EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) % expected_alignment)
2435           << index << "-th allocation of size = " << size;
2436     }
2437   }
2438 
2439   for (void* ptr : allocated_ptrs)
2440     allocator.root()->Free(ptr);
2441 }
2442 
TEST_F(PartitionAllocTest,FundamentalAlignment)2443 TEST_F(PartitionAllocTest, FundamentalAlignment) {
2444   // See the test above for details. Essentially, checking the bucket size is
2445   // sufficient to ensure that alignment will always be respected, as long as
2446   // the fundamental alignment is <= 16 bytes.
2447   size_t fundamental_alignment = base::kAlignment;
2448   for (size_t size = 0; size < base::SystemPageSize(); size++) {
2449     // Allocate several pointers, as the first one in use in a size class will
2450     // be aligned on a page boundary.
2451     void* ptr = allocator.root()->Alloc(size, "");
2452     void* ptr2 = allocator.root()->Alloc(size, "");
2453     void* ptr3 = allocator.root()->Alloc(size, "");
2454 
2455     EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % fundamental_alignment,
2456               static_cast<uintptr_t>(0));
2457     EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr2) % fundamental_alignment,
2458               static_cast<uintptr_t>(0));
2459     EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % fundamental_alignment,
2460               static_cast<uintptr_t>(0));
2461 
2462     EXPECT_EQ(allocator.root()->GetSize(ptr) % fundamental_alignment,
2463               static_cast<uintptr_t>(0));
2464 
2465     allocator.root()->Free(ptr);
2466     allocator.root()->Free(ptr2);
2467     allocator.root()->Free(ptr3);
2468   }
2469 }
2470 
TEST_F(PartitionAllocTest,AlignedAllocations)2471 TEST_F(PartitionAllocTest, AlignedAllocations) {
2472   size_t alloc_sizes[] = {1, 10, 100, 1000, 100000, 1000000};
2473   size_t alignemnts[] = {8, 16, 32, 64, 1024, 4096};
2474 
2475   for (size_t alloc_size : alloc_sizes) {
2476     for (size_t alignment : alignemnts) {
2477       void* ptr =
2478           aligned_allocator.root()->AlignedAllocFlags(0, alignment, alloc_size);
2479       ASSERT_TRUE(ptr);
2480       EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, 0ull);
2481       allocator.root()->Free(ptr);
2482     }
2483   }
2484 }
2485 
2486 #if ENABLE_TAG_FOR_CHECKED_PTR2 || ENABLE_TAG_FOR_MTE_CHECKED_PTR || \
2487     ENABLE_TAG_FOR_SINGLE_TAG_CHECKED_PTR
2488 
TEST_F(PartitionAllocTest,TagBasic)2489 TEST_F(PartitionAllocTest, TagBasic) {
2490   size_t alloc_size = 64 - kExtraAllocSize;
2491   void* ptr1 = allocator.root()->Alloc(alloc_size, type_name);
2492   void* ptr2 = allocator.root()->Alloc(alloc_size, type_name);
2493   void* ptr3 = allocator.root()->Alloc(alloc_size, type_name);
2494   EXPECT_TRUE(ptr1);
2495   EXPECT_TRUE(ptr2);
2496   EXPECT_TRUE(ptr3);
2497 
2498   auto* slot_span =
2499       SlotSpan::FromPointer(PartitionPointerAdjustSubtract(true, ptr1));
2500   EXPECT_TRUE(slot_span);
2501 
2502   char* char_ptr1 = reinterpret_cast<char*>(ptr1);
2503   char* char_ptr2 = reinterpret_cast<char*>(ptr2);
2504   char* char_ptr3 = reinterpret_cast<char*>(ptr3);
2505   EXPECT_LT(kTestAllocSize, slot_span->bucket->slot_size);
2506   EXPECT_EQ(char_ptr1 + slot_span->bucket->slot_size, char_ptr2);
2507   EXPECT_EQ(char_ptr2 + slot_span->bucket->slot_size, char_ptr3);
2508 
2509 #if !ENABLE_TAG_FOR_SINGLE_TAG_CHECKED_PTR
2510   constexpr PartitionTag kTag1 = static_cast<PartitionTag>(0xBADA);
2511   constexpr PartitionTag kTag2 = static_cast<PartitionTag>(0xDB8A);
2512   constexpr PartitionTag kTag3 = static_cast<PartitionTag>(0xA3C4);
2513 #else
2514   // The in-memory tag will always be kFixedTagValue no matter what we set.
2515   constexpr PartitionTag kTag1 = static_cast<PartitionTag>(kFixedTagValue);
2516   constexpr PartitionTag kTag2 = static_cast<PartitionTag>(kFixedTagValue);
2517   constexpr PartitionTag kTag3 = static_cast<PartitionTag>(kFixedTagValue);
2518 #endif
2519   PartitionTagSetValue(ptr1, slot_span->bucket->slot_size, kTag1);
2520   PartitionTagSetValue(ptr2, slot_span->bucket->slot_size, kTag2);
2521   PartitionTagSetValue(ptr3, slot_span->bucket->slot_size, kTag3);
2522 
2523   memset(ptr1, 0, alloc_size);
2524   memset(ptr2, 0, alloc_size);
2525   memset(ptr3, 0, alloc_size);
2526 
2527   EXPECT_EQ(kTag1, PartitionTagGetValue(ptr1));
2528   EXPECT_EQ(kTag2, PartitionTagGetValue(ptr2));
2529   EXPECT_EQ(kTag3, PartitionTagGetValue(ptr3));
2530 
2531   EXPECT_TRUE(!memchr(ptr1, static_cast<uint8_t>(kTag1), alloc_size));
2532   EXPECT_TRUE(!memchr(ptr2, static_cast<uint8_t>(kTag2), alloc_size));
2533   if (sizeof(PartitionTag) > 1) {
2534     EXPECT_TRUE(!memchr(ptr1, static_cast<uint8_t>(kTag1 >> 8), alloc_size));
2535     EXPECT_TRUE(!memchr(ptr2, static_cast<uint8_t>(kTag2 >> 8), alloc_size));
2536   }
2537 
2538   allocator.root()->Free(ptr1);
2539   EXPECT_EQ(kTag2, PartitionTagGetValue(ptr2));
2540 
2541   size_t request_size = slot_span->bucket->slot_size - kExtraAllocSize;
2542   void* new_ptr2 = allocator.root()->Realloc(ptr2, request_size, type_name);
2543   EXPECT_EQ(ptr2, new_ptr2);
2544   EXPECT_EQ(kTag3, PartitionTagGetValue(ptr3));
2545 
2546   // Add 1B to ensure the object is rellocated to a larger slot.
2547   request_size = slot_span->bucket->slot_size - kExtraAllocSize + 1;
2548   new_ptr2 = allocator.root()->Realloc(ptr2, request_size, type_name);
2549   EXPECT_TRUE(new_ptr2);
2550   EXPECT_NE(ptr2, new_ptr2);
2551 
2552   allocator.root()->Free(new_ptr2);
2553 
2554   EXPECT_EQ(kTag3, PartitionTagGetValue(ptr3));
2555   allocator.root()->Free(ptr3);
2556 }
2557 
2558 #endif
2559 
2560 // Test that the optimized `GetSlotOffset` implementation produces valid
2561 // results.
TEST_F(PartitionAllocTest,OptimizedGetSlotOffset)2562 TEST_F(PartitionAllocTest, OptimizedGetSlotOffset) {
2563   auto* current_bucket = allocator.root()->buckets;
2564 
2565   for (size_t i = 0; i < kNumBuckets; ++i, ++current_bucket) {
2566     for (size_t offset = 0; offset <= kMaxBucketed; offset += 4999) {
2567       EXPECT_EQ(offset % current_bucket->slot_size,
2568                 current_bucket->GetSlotOffset(offset));
2569     }
2570   }
2571 }
2572 
2573 // Test that the optimized `GetSlotNumber` implementation produces valid
2574 // results.
TEST_F(PartitionAllocTest,OptimizedGetSlotNumber)2575 TEST_F(PartitionAllocTest, OptimizedGetSlotNumber) {
2576   for (auto& bucket : allocator.root()->buckets) {
2577     for (size_t slot = 0, offset = bucket.slot_size / 2;
2578          slot < bucket.get_slots_per_span();
2579          ++slot, offset += bucket.slot_size) {
2580       EXPECT_EQ(slot, bucket.GetSlotNumber(offset));
2581     }
2582   }
2583 }
2584 
TEST_F(PartitionAllocTest,GetUsableSize)2585 TEST_F(PartitionAllocTest, GetUsableSize) {
2586   size_t delta = SystemPageSize() + 1;
2587   for (size_t size = 1; size <= kMinDirectMappedDownsize; size += delta) {
2588     void* ptr = allocator.root()->Alloc(size, "");
2589     EXPECT_TRUE(ptr);
2590     size_t usable_size = PartitionRoot<ThreadSafe>::GetUsableSize(ptr);
2591     EXPECT_LE(size, usable_size);
2592     memset(ptr, 0xDE, usable_size);
2593     // Should not crash when free the ptr.
2594     allocator.root()->Free(ptr);
2595   }
2596 }
2597 
2598 #if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
2599 
TEST_F(PartitionAllocTest,RefCountBasic)2600 TEST_F(PartitionAllocTest, RefCountBasic) {
2601   constexpr uint64_t kCookie = 0x1234567890ABCDEF;
2602 
2603   size_t alloc_size = 64 - kExtraAllocSize;
2604   uint64_t* ptr1 = reinterpret_cast<uint64_t*>(
2605       allocator.root()->Alloc(alloc_size, type_name));
2606   EXPECT_TRUE(ptr1);
2607 
2608   *ptr1 = kCookie;
2609 
2610   auto* ref_count = PartitionRefCountPointer(ptr1);
2611 
2612   ref_count->AddRef();
2613   ref_count->Release();
2614   EXPECT_TRUE(ref_count->HasOneRef());
2615   EXPECT_EQ(*ptr1, kCookie);
2616 
2617   ref_count->AddRef();
2618   EXPECT_FALSE(ref_count->HasOneRef());
2619 
2620   allocator.root()->Free(ptr1);
2621   EXPECT_NE(*ptr1, kCookie);
2622 
2623   // The allocator should not reuse the original slot since its reference count
2624   // doesn't equal zero.
2625   uint64_t* ptr2 = reinterpret_cast<uint64_t*>(
2626       allocator.root()->Alloc(alloc_size, type_name));
2627   EXPECT_NE(ptr1, ptr2);
2628   allocator.root()->Free(ptr2);
2629 
2630   // When the last reference is released, the slot should become reusable.
2631   ref_count->Release();
2632   uint64_t* ptr3 = reinterpret_cast<uint64_t*>(
2633       allocator.root()->Alloc(alloc_size, type_name));
2634   EXPECT_EQ(ptr1, ptr3);
2635   allocator.root()->Free(ptr3);
2636 }
2637 
2638 #endif
2639 
2640 }  // namespace internal
2641 }  // namespace base
2642 
2643 #endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
2644