1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
7 
8 // DESCRIPTION
9 // PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() /
10 // PartitionRootGeneric::Free() are approximately analagous to malloc() and
11 // free().
12 //
13 // The main difference is that a PartitionRoot / PartitionRootGeneric object
14 // must be supplied to these functions, representing a specific "heap partition"
15 // that will be used to satisfy the allocation. Different partitions are
16 // guaranteed to exist in separate address spaces, including being separate from
17 // the main system heap. If the contained objects are all freed, physical memory
18 // is returned to the system but the address space remains reserved.
19 // See PartitionAlloc.md for other security properties PartitionAlloc provides.
20 //
21 // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
22 // SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
23 // minimize the instruction count to the fullest extent possible, the
24 // PartitionRoot is really just a header adjacent to other data areas provided
25 // by the allocator class.
26 //
27 // The PartitionRoot::Alloc() variant of the API has the following caveats:
28 // - Allocations and frees against a single partition must be single threaded.
29 // - Allocations must not exceed a max size, chosen at compile-time via a
30 // templated parameter to PartitionAllocator.
31 // - Allocation sizes must be aligned to the system pointer size.
32 // - Allocations are bucketed exactly according to size.
33 //
34 // And for PartitionRootGeneric::Alloc():
35 // - Multi-threaded use against a single partition is ok; locking is handled.
36 // - Allocations of any arbitrary size can be handled (subject to a limit of
37 // INT_MAX bytes for security reasons).
38 // - Bucketing is by approximate size, for example an allocation of 4000 bytes
39 // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
40 // keep worst-case waste to ~10%.
41 //
42 // The allocators are designed to be extremely fast, thanks to the following
43 // properties and design:
44 // - Just two single (reasonably predicatable) branches in the hot / fast path
45 //   for both allocating and (significantly) freeing.
46 // - A minimal number of operations in the hot / fast path, with the slow paths
47 //   in separate functions, leading to the possibility of inlining.
48 // - Each partition page (which is usually multiple physical pages) has a
49 //   metadata structure which allows fast mapping of free() address to an
50 //   underlying bucket.
51 // - Supports a lock-free API for fast performance in single-threaded cases.
52 // - The freelist for a given bucket is split across a number of partition
53 //   pages, enabling various simple tricks to try and minimize fragmentation.
54 // - Fine-grained bucket sizes leading to less waste and better packing.
55 //
56 // The following security properties could be investigated in the future:
57 // - Per-object bucketing (instead of per-size) is mostly available at the API,
58 // but not used yet.
59 // - No randomness of freelist entries or bucket position.
60 // - Better checking for wild pointers in free().
61 // - Better freelist masking function to guarantee fault on 32-bit.
62 
63 #include <limits.h>
64 #include <string.h>
65 
66 #include "base/allocator/partition_allocator/memory_reclaimer.h"
67 #include "base/allocator/partition_allocator/page_allocator.h"
68 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
69 #include "base/allocator/partition_allocator/partition_bucket.h"
70 #include "base/allocator/partition_allocator/partition_cookie.h"
71 #include "base/allocator/partition_allocator/partition_page.h"
72 #include "base/allocator/partition_allocator/partition_root_base.h"
73 #include "base/allocator/partition_allocator/spin_lock.h"
74 #include "base/base_export.h"
75 #include "base/bits.h"
76 #include "base/compiler_specific.h"
77 #include "base/logging.h"
78 #include "base/stl_util.h"
79 #include "base/sys_byteorder.h"
80 #include "build/build_config.h"
81 
82 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
83 #include <stdlib.h>
84 #endif
85 
86 // We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
87 // size as other alloc code.
88 #define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
89   if (size > kGenericMaxDirectMapped) {               \
90     if (flags & PartitionAllocReturnNull) {           \
91       return nullptr;                                 \
92     }                                                 \
93     CHECK(false);                                     \
94   }
95 
96 namespace base {
97 
98 class PartitionStatsDumper;
99 
100 enum PartitionPurgeFlags {
101   // Decommitting the ring list of empty pages is reasonably fast.
102   PartitionPurgeDecommitEmptyPages = 1 << 0,
103   // Discarding unused system pages is slower, because it involves walking all
104   // freelists in all active partition pages of all buckets >= system page
105   // size. It often frees a similar amount of memory to decommitting the empty
106   // pages, though.
107   PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
108 };
109 
110 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
111 struct BASE_EXPORT PartitionRoot : public internal::PartitionRootBase {
112   PartitionRoot();
113   ~PartitionRoot() override;
114   // This references the buckets OFF the edge of this struct. All uses of
115   // PartitionRoot must have the bucket array come right after.
116   //
117   // The PartitionAlloc templated class ensures the following is correct.
bucketsPartitionRoot118   ALWAYS_INLINE internal::PartitionBucket* buckets() {
119     return reinterpret_cast<internal::PartitionBucket*>(this + 1);
120   }
bucketsPartitionRoot121   ALWAYS_INLINE const internal::PartitionBucket* buckets() const {
122     return reinterpret_cast<const internal::PartitionBucket*>(this + 1);
123   }
124 
125   void Init(size_t bucket_count, size_t maximum_allocation);
126 
127   ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
128   ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
129 
130   void PurgeMemory(int flags) override;
131 
132   void DumpStats(const char* partition_name,
133                  bool is_light_dump,
134                  PartitionStatsDumper* dumper);
135 };
136 
137 // Never instantiate a PartitionRootGeneric directly, instead use
138 // PartitionAllocatorGeneric.
139 struct BASE_EXPORT PartitionRootGeneric : public internal::PartitionRootBase {
140   PartitionRootGeneric();
141   ~PartitionRootGeneric() override;
142   subtle::SpinLock lock;
143   // Some pre-computed constants.
144   size_t order_index_shifts[kBitsPerSizeT + 1] = {};
145   size_t order_sub_index_masks[kBitsPerSizeT + 1] = {};
146   // The bucket lookup table lets us map a size_t to a bucket quickly.
147   // The trailing +1 caters for the overflow case for very large allocation
148   // sizes.  It is one flat array instead of a 2D array because in the 2D
149   // world, we'd need to index array[blah][max+1] which risks undefined
150   // behavior.
151   internal::PartitionBucket*
152       bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] =
153           {};
154   internal::PartitionBucket buckets[kGenericNumBuckets] = {};
155 
156   // Public API.
157   void Init();
158 
159   ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
160   ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
161   ALWAYS_INLINE void Free(void* ptr);
162 
163   NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name);
164   // Overload that may return nullptr if reallocation isn't possible. In this
165   // case, |ptr| remains valid.
166   NOINLINE void* TryRealloc(void* ptr, size_t new_size, const char* type_name);
167 
168   ALWAYS_INLINE size_t ActualSize(size_t size);
169 
170   void PurgeMemory(int flags) override;
171 
172   void DumpStats(const char* partition_name,
173                  bool is_light_dump,
174                  PartitionStatsDumper* partition_stats_dumper);
175 };
176 
177 // Struct used to retrieve total memory usage of a partition. Used by
178 // PartitionStatsDumper implementation.
179 struct PartitionMemoryStats {
180   size_t total_mmapped_bytes;    // Total bytes mmaped from the system.
181   size_t total_committed_bytes;  // Total size of commmitted pages.
182   size_t total_resident_bytes;   // Total bytes provisioned by the partition.
183   size_t total_active_bytes;     // Total active bytes in the partition.
184   size_t total_decommittable_bytes;  // Total bytes that could be decommitted.
185   size_t total_discardable_bytes;    // Total bytes that could be discarded.
186 };
187 
188 // Struct used to retrieve memory statistics about a partition bucket. Used by
189 // PartitionStatsDumper implementation.
190 struct PartitionBucketMemoryStats {
191   bool is_valid;       // Used to check if the stats is valid.
192   bool is_direct_map;  // True if this is a direct mapping; size will not be
193                        // unique.
194   uint32_t bucket_slot_size;     // The size of the slot in bytes.
195   uint32_t allocated_page_size;  // Total size the partition page allocated from
196                                  // the system.
197   uint32_t active_bytes;         // Total active bytes used in the bucket.
198   uint32_t resident_bytes;       // Total bytes provisioned in the bucket.
199   uint32_t decommittable_bytes;  // Total bytes that could be decommitted.
200   uint32_t discardable_bytes;    // Total bytes that could be discarded.
201   uint32_t num_full_pages;       // Number of pages with all slots allocated.
202   uint32_t num_active_pages;     // Number of pages that have at least one
203                                  // provisioned slot.
204   uint32_t num_empty_pages;      // Number of pages that are empty
205                                  // but not decommitted.
206   uint32_t num_decommitted_pages;  // Number of pages that are empty
207                                    // and decommitted.
208 };
209 
210 // Interface that is passed to PartitionDumpStats and
211 // PartitionDumpStatsGeneric for using the memory statistics.
212 class BASE_EXPORT PartitionStatsDumper {
213  public:
214   // Called to dump total memory used by partition, once per partition.
215   virtual void PartitionDumpTotals(const char* partition_name,
216                                    const PartitionMemoryStats*) = 0;
217 
218   // Called to dump stats about buckets, for each bucket.
219   virtual void PartitionsDumpBucketStats(const char* partition_name,
220                                          const PartitionBucketMemoryStats*) = 0;
221 };
222 
223 BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
224 
225 // PartitionAlloc supports setting hooks to observe allocations/frees as they
226 // occur as well as 'override' hooks that allow overriding those operations.
227 class BASE_EXPORT PartitionAllocHooks {
228  public:
229   // Log allocation and free events.
230   typedef void AllocationObserverHook(void* address,
231                                       size_t size,
232                                       const char* type_name);
233   typedef void FreeObserverHook(void* address);
234 
235   // If it returns true, the allocation has been overridden with the pointer in
236   // *out.
237   typedef bool AllocationOverrideHook(void** out,
238                                       int flags,
239                                       size_t size,
240                                       const char* type_name);
241   // If it returns true, then the allocation was overridden and has been freed.
242   typedef bool FreeOverrideHook(void* address);
243   // If it returns true, the underlying allocation is overridden and *out holds
244   // the size of the underlying allocation.
245   typedef bool ReallocOverrideHook(size_t* out, void* address);
246 
247   // To unhook, call Set*Hooks with nullptrs.
248   static void SetObserverHooks(AllocationObserverHook* alloc_hook,
249                                FreeObserverHook* free_hook);
250   static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
251                                FreeOverrideHook* free_hook,
252                                ReallocOverrideHook realloc_hook);
253 
254   // Helper method to check whether hooks are enabled. This is an optimization
255   // so that if a function needs to call observer and override hooks in two
256   // different places this value can be cached and only loaded once.
AreHooksEnabled()257   static bool AreHooksEnabled() {
258     return hooks_enabled_.load(std::memory_order_relaxed);
259   }
260 
261   static void AllocationObserverHookIfEnabled(void* address,
262                                               size_t size,
263                                               const char* type_name);
264   static bool AllocationOverrideHookIfEnabled(void** out,
265                                               int flags,
266                                               size_t size,
267                                               const char* type_name);
268 
269   static void FreeObserverHookIfEnabled(void* address);
270   static bool FreeOverrideHookIfEnabled(void* address);
271 
272   static void ReallocObserverHookIfEnabled(void* old_address,
273                                            void* new_address,
274                                            size_t size,
275                                            const char* type_name);
276   static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
277 
278  private:
279   // Single bool that is used to indicate whether observer or allocation hooks
280   // are set to reduce the numbers of loads required to check whether hooking is
281   // enabled.
282   static std::atomic<bool> hooks_enabled_;
283 
284   // Lock used to synchronize Set*Hooks calls.
285   static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
286   static std::atomic<FreeObserverHook*> free_observer_hook_;
287 
288   static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
289   static std::atomic<FreeOverrideHook*> free_override_hook_;
290   static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
291 };
292 
Alloc(size_t size,const char * type_name)293 ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) {
294   return AllocFlags(0, size, type_name);
295 }
296 
AllocFlags(int flags,size_t size,const char * type_name)297 ALWAYS_INLINE void* PartitionRoot::AllocFlags(int flags,
298                                               size_t size,
299                                               const char* type_name) {
300 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
301   CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
302   void* result = malloc(size);
303   CHECK(result);
304   return result;
305 #else
306   DCHECK(max_allocation == 0 || size <= max_allocation);
307   void* result;
308   const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
309   if (UNLIKELY(hooks_enabled)) {
310     if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&result, flags,
311                                                              size, type_name)) {
312       PartitionAllocHooks::AllocationObserverHookIfEnabled(result, size,
313                                                            type_name);
314       return result;
315     }
316   }
317   size_t requested_size = size;
318   size = internal::PartitionCookieSizeAdjustAdd(size);
319   DCHECK(initialized);
320   size_t index = size >> kBucketShift;
321   DCHECK(index < num_buckets);
322   DCHECK(size == index << kBucketShift);
323   internal::PartitionBucket* bucket = &buckets()[index];
324   result = AllocFromBucket(bucket, flags, size);
325   if (UNLIKELY(hooks_enabled)) {
326     PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
327                                                          type_name);
328   }
329   return result;
330 #endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
331 }
332 
PartitionAllocSupportsGetSize()333 ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
334 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
335   return false;
336 #else
337   return true;
338 #endif
339 }
340 
PartitionAllocGetSize(void * ptr)341 ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
342   // No need to lock here. Only |ptr| being freed by another thread could
343   // cause trouble, and the caller is responsible for that not happening.
344   DCHECK(PartitionAllocSupportsGetSize());
345   ptr = internal::PartitionCookieFreePointerAdjust(ptr);
346   internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
347   // TODO(palmer): See if we can afford to make this a CHECK.
348   DCHECK(internal::PartitionRootBase::IsValidPage(page));
349   size_t size = page->bucket->slot_size;
350   return internal::PartitionCookieSizeAdjustSubtract(size);
351 }
352 
PartitionFree(void * ptr)353 ALWAYS_INLINE void PartitionFree(void* ptr) {
354 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
355   free(ptr);
356 #else
357   // TODO(palmer): Check ptr alignment before continuing. Shall we do the check
358   // inside PartitionCookieFreePointerAdjust?
359   if (PartitionAllocHooks::AreHooksEnabled()) {
360     PartitionAllocHooks::FreeObserverHookIfEnabled(ptr);
361     if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr))
362       return;
363   }
364 
365   ptr = internal::PartitionCookieFreePointerAdjust(ptr);
366   internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
367   // TODO(palmer): See if we can afford to make this a CHECK.
368   DCHECK(internal::PartitionRootBase::IsValidPage(page));
369   internal::DeferredUnmap deferred_unmap = page->Free(ptr);
370   deferred_unmap.Run();
371 #endif
372 }
373 
PartitionGenericSizeToBucket(PartitionRootGeneric * root,size_t size)374 ALWAYS_INLINE internal::PartitionBucket* PartitionGenericSizeToBucket(
375     PartitionRootGeneric* root,
376     size_t size) {
377   size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
378   // The order index is simply the next few bits after the most significant bit.
379   size_t order_index = (size >> root->order_index_shifts[order]) &
380                        (kGenericNumBucketsPerOrder - 1);
381   // And if the remaining bits are non-zero we must bump the bucket up.
382   size_t sub_order_index = size & root->order_sub_index_masks[order];
383   internal::PartitionBucket* bucket =
384       root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
385                            order_index + !!sub_order_index];
386   CHECK(bucket);
387   DCHECK(!bucket->slot_size || bucket->slot_size >= size);
388   DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
389   return bucket;
390 }
391 
PartitionAllocGenericFlags(PartitionRootGeneric * root,int flags,size_t size,const char * type_name)392 ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
393                                                int flags,
394                                                size_t size,
395                                                const char* type_name) {
396   DCHECK_LT(flags, PartitionAllocLastFlag << 1);
397 
398 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
399   CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
400   const bool zero_fill = flags & PartitionAllocZeroFill;
401   void* result = zero_fill ? calloc(1, size) : malloc(size);
402   CHECK(result || flags & PartitionAllocReturnNull);
403   return result;
404 #else
405   DCHECK(root->initialized);
406   // Only SizeSpecificPartitionAllocator should use max_allocation.
407   DCHECK(root->max_allocation == 0);
408   void* result;
409   const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
410   if (UNLIKELY(hooks_enabled)) {
411     if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&result, flags,
412                                                              size, type_name)) {
413       PartitionAllocHooks::AllocationObserverHookIfEnabled(result, size,
414                                                            type_name);
415       return result;
416     }
417   }
418   size_t requested_size = size;
419   size = internal::PartitionCookieSizeAdjustAdd(size);
420   internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
421   {
422     subtle::SpinLock::Guard guard(root->lock);
423     result = root->AllocFromBucket(bucket, flags, size);
424   }
425   if (UNLIKELY(hooks_enabled)) {
426     PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
427                                                          type_name);
428   }
429 
430   return result;
431 #endif
432 }
433 
Alloc(size_t size,const char * type_name)434 ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size,
435                                                 const char* type_name) {
436   return PartitionAllocGenericFlags(this, 0, size, type_name);
437 }
438 
AllocFlags(int flags,size_t size,const char * type_name)439 ALWAYS_INLINE void* PartitionRootGeneric::AllocFlags(int flags,
440                                                      size_t size,
441                                                      const char* type_name) {
442   return PartitionAllocGenericFlags(this, flags, size, type_name);
443 }
444 
Free(void * ptr)445 ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
446 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
447   free(ptr);
448 #else
449   DCHECK(initialized);
450 
451   if (UNLIKELY(!ptr))
452     return;
453 
454   if (PartitionAllocHooks::AreHooksEnabled()) {
455     PartitionAllocHooks::FreeObserverHookIfEnabled(ptr);
456     if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr))
457       return;
458   }
459 
460   ptr = internal::PartitionCookieFreePointerAdjust(ptr);
461   internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
462   // TODO(palmer): See if we can afford to make this a CHECK.
463   DCHECK(IsValidPage(page));
464   internal::DeferredUnmap deferred_unmap;
465   {
466     subtle::SpinLock::Guard guard(lock);
467     deferred_unmap = page->Free(ptr);
468   }
469   deferred_unmap.Run();
470 #endif
471 }
472 
473 BASE_EXPORT void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
474                                                int flags,
475                                                void* ptr,
476                                                size_t new_size,
477                                                const char* type_name);
478 
ActualSize(size_t size)479 ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
480 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
481   return size;
482 #else
483   DCHECK(initialized);
484   size = internal::PartitionCookieSizeAdjustAdd(size);
485   internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
486   if (LIKELY(!bucket->is_direct_mapped())) {
487     size = bucket->slot_size;
488   } else if (size > kGenericMaxDirectMapped) {
489     // Too large to allocate => return the size unchanged.
490   } else {
491     size = internal::PartitionBucket::get_direct_map_size(size);
492   }
493   return internal::PartitionCookieSizeAdjustSubtract(size);
494 #endif
495 }
496 
497 template <size_t N>
498 class SizeSpecificPartitionAllocator {
499  public:
SizeSpecificPartitionAllocator()500   SizeSpecificPartitionAllocator() {
501     memset(actual_buckets_, 0,
502            sizeof(internal::PartitionBucket) * base::size(actual_buckets_));
503   }
~SizeSpecificPartitionAllocator()504   ~SizeSpecificPartitionAllocator() {
505     PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
506         &partition_root_);
507   }
508   static const size_t kMaxAllocation = N - kAllocationGranularity;
509   static const size_t kNumBuckets = N / kAllocationGranularity;
init()510   void init() {
511     partition_root_.Init(kNumBuckets, kMaxAllocation);
512     PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
513         &partition_root_);
514   }
root()515   ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
516 
517  private:
518   PartitionRoot partition_root_;
519   internal::PartitionBucket actual_buckets_[kNumBuckets];
520 };
521 
522 class BASE_EXPORT PartitionAllocatorGeneric {
523  public:
524   PartitionAllocatorGeneric();
~PartitionAllocatorGeneric()525   ~PartitionAllocatorGeneric() {
526     PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
527         &partition_root_);
528   }
529 
init()530   void init() {
531     partition_root_.Init();
532     PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
533         &partition_root_);
534   }
root()535   ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
536 
537  private:
538   PartitionRootGeneric partition_root_;
539 };
540 
541 }  // namespace base
542 
543 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
544