10b57cec5SDimitry Andric //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // Part of the Sanitizer Allocator.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric #ifndef SANITIZER_ALLOCATOR_H
130b57cec5SDimitry Andric #error This file must be included inside sanitizer_allocator.h
140b57cec5SDimitry Andric #endif
150b57cec5SDimitry Andric 
160b57cec5SDimitry Andric // Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
170b57cec5SDimitry Andric // allocated chunks. To be used in memory constrained or not memory hungry cases
180b57cec5SDimitry Andric // (currently, 32 bits and internal allocator).
190b57cec5SDimitry Andric class LargeMmapAllocatorPtrArrayStatic {
200b57cec5SDimitry Andric  public:
Init()21e8d8bef9SDimitry Andric   inline void *Init() { return &p_[0]; }
EnsureSpace(uptr n)22e8d8bef9SDimitry Andric   inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
230b57cec5SDimitry Andric  private:
240b57cec5SDimitry Andric   static const int kMaxNumChunks = 1 << 15;
250b57cec5SDimitry Andric   uptr p_[kMaxNumChunks];
260b57cec5SDimitry Andric };
270b57cec5SDimitry Andric 
280b57cec5SDimitry Andric // Much less restricted LargeMmapAllocator chunks list (comparing to
290b57cec5SDimitry Andric // PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
300b57cec5SDimitry Andric // ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
310b57cec5SDimitry Andric // same functionality in Fuchsia case, which does not support MAP_NORESERVE.
320b57cec5SDimitry Andric class LargeMmapAllocatorPtrArrayDynamic {
330b57cec5SDimitry Andric  public:
Init()34e8d8bef9SDimitry Andric   inline void *Init() {
350b57cec5SDimitry Andric     uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
360b57cec5SDimitry Andric                                  SecondaryAllocatorName);
370b57cec5SDimitry Andric     CHECK(p);
380b57cec5SDimitry Andric     return reinterpret_cast<void*>(p);
390b57cec5SDimitry Andric   }
400b57cec5SDimitry Andric 
EnsureSpace(uptr n)41e8d8bef9SDimitry Andric   inline void EnsureSpace(uptr n) {
420b57cec5SDimitry Andric     CHECK_LT(n, kMaxNumChunks);
430b57cec5SDimitry Andric     DCHECK(n <= n_reserved_);
440b57cec5SDimitry Andric     if (UNLIKELY(n == n_reserved_)) {
450b57cec5SDimitry Andric       address_range_.MapOrDie(
460b57cec5SDimitry Andric           reinterpret_cast<uptr>(address_range_.base()) +
470b57cec5SDimitry Andric               n_reserved_ * sizeof(uptr),
480b57cec5SDimitry Andric           kChunksBlockCount * sizeof(uptr));
490b57cec5SDimitry Andric       n_reserved_ += kChunksBlockCount;
500b57cec5SDimitry Andric     }
510b57cec5SDimitry Andric   }
520b57cec5SDimitry Andric 
530b57cec5SDimitry Andric  private:
540b57cec5SDimitry Andric   static const int kMaxNumChunks = 1 << 20;
550b57cec5SDimitry Andric   static const int kChunksBlockCount = 1 << 14;
560b57cec5SDimitry Andric   ReservedAddressRange address_range_;
570b57cec5SDimitry Andric   uptr n_reserved_;
580b57cec5SDimitry Andric };
590b57cec5SDimitry Andric 
600b57cec5SDimitry Andric #if SANITIZER_WORDSIZE == 32
610b57cec5SDimitry Andric typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
620b57cec5SDimitry Andric #else
630b57cec5SDimitry Andric typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
640b57cec5SDimitry Andric #endif
650b57cec5SDimitry Andric 
660b57cec5SDimitry Andric // This class can (de)allocate only large chunks of memory using mmap/unmap.
670b57cec5SDimitry Andric // The main purpose of this allocator is to cover large and rare allocation
680b57cec5SDimitry Andric // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
690b57cec5SDimitry Andric template <class MapUnmapCallback = NoOpMapUnmapCallback,
700b57cec5SDimitry Andric           class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,
710b57cec5SDimitry Andric           class AddressSpaceViewTy = LocalAddressSpaceView>
720b57cec5SDimitry Andric class LargeMmapAllocator {
730b57cec5SDimitry Andric  public:
740b57cec5SDimitry Andric   using AddressSpaceView = AddressSpaceViewTy;
InitLinkerInitialized()750b57cec5SDimitry Andric   void InitLinkerInitialized() {
760b57cec5SDimitry Andric     page_size_ = GetPageSizeCached();
770b57cec5SDimitry Andric     chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
780b57cec5SDimitry Andric   }
790b57cec5SDimitry Andric 
Init()800b57cec5SDimitry Andric   void Init() {
810b57cec5SDimitry Andric     internal_memset(this, 0, sizeof(*this));
820b57cec5SDimitry Andric     InitLinkerInitialized();
830b57cec5SDimitry Andric   }
840b57cec5SDimitry Andric 
Allocate(AllocatorStats * stat,const uptr size,uptr alignment)8506c3fb27SDimitry Andric   void *Allocate(AllocatorStats *stat, const uptr size, uptr alignment) {
860b57cec5SDimitry Andric     CHECK(IsPowerOfTwo(alignment));
870b57cec5SDimitry Andric     uptr map_size = RoundUpMapSize(size);
880b57cec5SDimitry Andric     if (alignment > page_size_)
890b57cec5SDimitry Andric       map_size += alignment;
900b57cec5SDimitry Andric     // Overflow.
910b57cec5SDimitry Andric     if (map_size < size) {
920b57cec5SDimitry Andric       Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
930b57cec5SDimitry Andric              "0x%zx bytes with 0x%zx alignment requested\n",
940b57cec5SDimitry Andric              SanitizerToolName, map_size, alignment);
950b57cec5SDimitry Andric       return nullptr;
960b57cec5SDimitry Andric     }
970b57cec5SDimitry Andric     uptr map_beg = reinterpret_cast<uptr>(
980b57cec5SDimitry Andric         MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
990b57cec5SDimitry Andric     if (!map_beg)
1000b57cec5SDimitry Andric       return nullptr;
1010b57cec5SDimitry Andric     CHECK(IsAligned(map_beg, page_size_));
1020b57cec5SDimitry Andric     uptr map_end = map_beg + map_size;
1030b57cec5SDimitry Andric     uptr res = map_beg + page_size_;
1040b57cec5SDimitry Andric     if (res & (alignment - 1))  // Align.
1050b57cec5SDimitry Andric       res += alignment - (res & (alignment - 1));
10606c3fb27SDimitry Andric     MapUnmapCallback().OnMapSecondary(map_beg, map_size, res, size);
1070b57cec5SDimitry Andric     CHECK(IsAligned(res, alignment));
1080b57cec5SDimitry Andric     CHECK(IsAligned(res, page_size_));
1090b57cec5SDimitry Andric     CHECK_GE(res + size, map_beg);
1100b57cec5SDimitry Andric     CHECK_LE(res + size, map_end);
1110b57cec5SDimitry Andric     Header *h = GetHeader(res);
1120b57cec5SDimitry Andric     h->size = size;
1130b57cec5SDimitry Andric     h->map_beg = map_beg;
1140b57cec5SDimitry Andric     h->map_size = map_size;
1150b57cec5SDimitry Andric     uptr size_log = MostSignificantSetBitIndex(map_size);
1160b57cec5SDimitry Andric     CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
1170b57cec5SDimitry Andric     {
1180b57cec5SDimitry Andric       SpinMutexLock l(&mutex_);
1190b57cec5SDimitry Andric       ptr_array_.EnsureSpace(n_chunks_);
1200b57cec5SDimitry Andric       uptr idx = n_chunks_++;
1210b57cec5SDimitry Andric       h->chunk_idx = idx;
1220b57cec5SDimitry Andric       chunks_[idx] = h;
1230b57cec5SDimitry Andric       chunks_sorted_ = false;
1240b57cec5SDimitry Andric       stats.n_allocs++;
1250b57cec5SDimitry Andric       stats.currently_allocated += map_size;
1260b57cec5SDimitry Andric       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
1270b57cec5SDimitry Andric       stats.by_size_log[size_log]++;
1280b57cec5SDimitry Andric       stat->Add(AllocatorStatAllocated, map_size);
1290b57cec5SDimitry Andric       stat->Add(AllocatorStatMapped, map_size);
1300b57cec5SDimitry Andric     }
1310b57cec5SDimitry Andric     return reinterpret_cast<void*>(res);
1320b57cec5SDimitry Andric   }
1330b57cec5SDimitry Andric 
Deallocate(AllocatorStats * stat,void * p)1340b57cec5SDimitry Andric   void Deallocate(AllocatorStats *stat, void *p) {
1350b57cec5SDimitry Andric     Header *h = GetHeader(p);
1360b57cec5SDimitry Andric     {
1370b57cec5SDimitry Andric       SpinMutexLock l(&mutex_);
1380b57cec5SDimitry Andric       uptr idx = h->chunk_idx;
1390b57cec5SDimitry Andric       CHECK_EQ(chunks_[idx], h);
1400b57cec5SDimitry Andric       CHECK_LT(idx, n_chunks_);
1410b57cec5SDimitry Andric       chunks_[idx] = chunks_[--n_chunks_];
1420b57cec5SDimitry Andric       chunks_[idx]->chunk_idx = idx;
1430b57cec5SDimitry Andric       chunks_sorted_ = false;
1440b57cec5SDimitry Andric       stats.n_frees++;
1450b57cec5SDimitry Andric       stats.currently_allocated -= h->map_size;
1460b57cec5SDimitry Andric       stat->Sub(AllocatorStatAllocated, h->map_size);
1470b57cec5SDimitry Andric       stat->Sub(AllocatorStatMapped, h->map_size);
1480b57cec5SDimitry Andric     }
1490b57cec5SDimitry Andric     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
1500b57cec5SDimitry Andric     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
1510b57cec5SDimitry Andric   }
1520b57cec5SDimitry Andric 
TotalMemoryUsed()1530b57cec5SDimitry Andric   uptr TotalMemoryUsed() {
1540b57cec5SDimitry Andric     SpinMutexLock l(&mutex_);
1550b57cec5SDimitry Andric     uptr res = 0;
1560b57cec5SDimitry Andric     for (uptr i = 0; i < n_chunks_; i++) {
1570b57cec5SDimitry Andric       Header *h = chunks_[i];
1580b57cec5SDimitry Andric       CHECK_EQ(h->chunk_idx, i);
1590b57cec5SDimitry Andric       res += RoundUpMapSize(h->size);
1600b57cec5SDimitry Andric     }
1610b57cec5SDimitry Andric     return res;
1620b57cec5SDimitry Andric   }
1630b57cec5SDimitry Andric 
PointerIsMine(const void * p)164349cc55cSDimitry Andric   bool PointerIsMine(const void *p) const {
1650b57cec5SDimitry Andric     return GetBlockBegin(p) != nullptr;
1660b57cec5SDimitry Andric   }
1670b57cec5SDimitry Andric 
GetActuallyAllocatedSize(void * p)1680b57cec5SDimitry Andric   uptr GetActuallyAllocatedSize(void *p) {
1690b57cec5SDimitry Andric     return RoundUpTo(GetHeader(p)->size, page_size_);
1700b57cec5SDimitry Andric   }
1710b57cec5SDimitry Andric 
1720b57cec5SDimitry Andric   // At least page_size_/2 metadata bytes is available.
GetMetaData(const void * p)1730b57cec5SDimitry Andric   void *GetMetaData(const void *p) {
1740b57cec5SDimitry Andric     // Too slow: CHECK_EQ(p, GetBlockBegin(p));
1750b57cec5SDimitry Andric     if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
1760b57cec5SDimitry Andric       Printf("%s: bad pointer %p\n", SanitizerToolName, p);
1770b57cec5SDimitry Andric       CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
1780b57cec5SDimitry Andric     }
1790b57cec5SDimitry Andric     return GetHeader(p) + 1;
1800b57cec5SDimitry Andric   }
1810b57cec5SDimitry Andric 
GetBlockBegin(const void * ptr)182349cc55cSDimitry Andric   void *GetBlockBegin(const void *ptr) const {
1830b57cec5SDimitry Andric     uptr p = reinterpret_cast<uptr>(ptr);
1840b57cec5SDimitry Andric     SpinMutexLock l(&mutex_);
1850b57cec5SDimitry Andric     uptr nearest_chunk = 0;
1860b57cec5SDimitry Andric     Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
1870b57cec5SDimitry Andric     // Cache-friendly linear search.
1880b57cec5SDimitry Andric     for (uptr i = 0; i < n_chunks_; i++) {
1890b57cec5SDimitry Andric       uptr ch = reinterpret_cast<uptr>(chunks[i]);
1900b57cec5SDimitry Andric       if (p < ch) continue;  // p is at left to this chunk, skip it.
1910b57cec5SDimitry Andric       if (p - ch < p - nearest_chunk)
1920b57cec5SDimitry Andric         nearest_chunk = ch;
1930b57cec5SDimitry Andric     }
1940b57cec5SDimitry Andric     if (!nearest_chunk)
1950b57cec5SDimitry Andric       return nullptr;
1960b57cec5SDimitry Andric     const Header *h =
1970b57cec5SDimitry Andric         AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
1980b57cec5SDimitry Andric     Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
1990b57cec5SDimitry Andric     CHECK_GE(nearest_chunk, h->map_beg);
2000b57cec5SDimitry Andric     CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
2010b57cec5SDimitry Andric     CHECK_LE(nearest_chunk, p);
2020b57cec5SDimitry Andric     if (h->map_beg + h->map_size <= p)
2030b57cec5SDimitry Andric       return nullptr;
2040b57cec5SDimitry Andric     return GetUser(h_ptr);
2050b57cec5SDimitry Andric   }
2060b57cec5SDimitry Andric 
EnsureSortedChunks()2070b57cec5SDimitry Andric   void EnsureSortedChunks() {
2080b57cec5SDimitry Andric     if (chunks_sorted_) return;
2090b57cec5SDimitry Andric     Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);
2100b57cec5SDimitry Andric     Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);
2110b57cec5SDimitry Andric     for (uptr i = 0; i < n_chunks_; i++)
2120b57cec5SDimitry Andric       AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;
2130b57cec5SDimitry Andric     chunks_sorted_ = true;
2140b57cec5SDimitry Andric   }
2150b57cec5SDimitry Andric 
2160b57cec5SDimitry Andric   // This function does the same as GetBlockBegin, but is much faster.
2170b57cec5SDimitry Andric   // Must be called with the allocator locked.
GetBlockBeginFastLocked(const void * ptr)218bdd1243dSDimitry Andric   void *GetBlockBeginFastLocked(const void *ptr) {
2190b57cec5SDimitry Andric     mutex_.CheckLocked();
2200b57cec5SDimitry Andric     uptr p = reinterpret_cast<uptr>(ptr);
2210b57cec5SDimitry Andric     uptr n = n_chunks_;
2220b57cec5SDimitry Andric     if (!n) return nullptr;
2230b57cec5SDimitry Andric     EnsureSortedChunks();
2240b57cec5SDimitry Andric     Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
2250b57cec5SDimitry Andric     auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
2260b57cec5SDimitry Andric     auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
2270b57cec5SDimitry Andric                      AddressSpaceView::Load(chunks[n - 1])->map_size;
2280b57cec5SDimitry Andric     if (p < min_mmap_ || p >= max_mmap_)
2290b57cec5SDimitry Andric       return nullptr;
2300b57cec5SDimitry Andric     uptr beg = 0, end = n - 1;
2310b57cec5SDimitry Andric     // This loop is a log(n) lower_bound. It does not check for the exact match
2320b57cec5SDimitry Andric     // to avoid expensive cache-thrashing loads.
2330b57cec5SDimitry Andric     while (end - beg >= 2) {
2340b57cec5SDimitry Andric       uptr mid = (beg + end) / 2;  // Invariant: mid >= beg + 1
2350b57cec5SDimitry Andric       if (p < reinterpret_cast<uptr>(chunks[mid]))
2360b57cec5SDimitry Andric         end = mid - 1;  // We are not interested in chunks[mid].
2370b57cec5SDimitry Andric       else
2380b57cec5SDimitry Andric         beg = mid;  // chunks[mid] may still be what we want.
2390b57cec5SDimitry Andric     }
2400b57cec5SDimitry Andric 
2410b57cec5SDimitry Andric     if (beg < end) {
2420b57cec5SDimitry Andric       CHECK_EQ(beg + 1, end);
2430b57cec5SDimitry Andric       // There are 2 chunks left, choose one.
2440b57cec5SDimitry Andric       if (p >= reinterpret_cast<uptr>(chunks[end]))
2450b57cec5SDimitry Andric         beg = end;
2460b57cec5SDimitry Andric     }
2470b57cec5SDimitry Andric 
2480b57cec5SDimitry Andric     const Header *h = AddressSpaceView::Load(chunks[beg]);
2490b57cec5SDimitry Andric     Header *h_ptr = chunks[beg];
2500b57cec5SDimitry Andric     if (h->map_beg + h->map_size <= p || p < h->map_beg)
2510b57cec5SDimitry Andric       return nullptr;
2520b57cec5SDimitry Andric     return GetUser(h_ptr);
2530b57cec5SDimitry Andric   }
2540b57cec5SDimitry Andric 
PrintStats()2550b57cec5SDimitry Andric   void PrintStats() {
2560b57cec5SDimitry Andric     Printf("Stats: LargeMmapAllocator: allocated %zd times, "
2570b57cec5SDimitry Andric            "remains %zd (%zd K) max %zd M; by size logs: ",
2580b57cec5SDimitry Andric            stats.n_allocs, stats.n_allocs - stats.n_frees,
2590b57cec5SDimitry Andric            stats.currently_allocated >> 10, stats.max_allocated >> 20);
2600b57cec5SDimitry Andric     for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
2610b57cec5SDimitry Andric       uptr c = stats.by_size_log[i];
2620b57cec5SDimitry Andric       if (!c) continue;
2630b57cec5SDimitry Andric       Printf("%zd:%zd; ", i, c);
2640b57cec5SDimitry Andric     }
2650b57cec5SDimitry Andric     Printf("\n");
2660b57cec5SDimitry Andric   }
2670b57cec5SDimitry Andric 
2680b57cec5SDimitry Andric   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
2690b57cec5SDimitry Andric   // introspection API.
ForceLock()27004eeddc0SDimitry Andric   void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
2710b57cec5SDimitry Andric 
ForceUnlock()27204eeddc0SDimitry Andric   void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
2730b57cec5SDimitry Andric 
2740b57cec5SDimitry Andric   // Iterate over all existing chunks.
2750b57cec5SDimitry Andric   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)2760b57cec5SDimitry Andric   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
2770b57cec5SDimitry Andric     EnsureSortedChunks();  // Avoid doing the sort while iterating.
2780b57cec5SDimitry Andric     const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
2790b57cec5SDimitry Andric     for (uptr i = 0; i < n_chunks_; i++) {
2800b57cec5SDimitry Andric       const Header *t = chunks[i];
2810b57cec5SDimitry Andric       callback(reinterpret_cast<uptr>(GetUser(t)), arg);
2820b57cec5SDimitry Andric       // Consistency check: verify that the array did not change.
2830b57cec5SDimitry Andric       CHECK_EQ(chunks[i], t);
2840b57cec5SDimitry Andric       CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i);
2850b57cec5SDimitry Andric     }
2860b57cec5SDimitry Andric   }
2870b57cec5SDimitry Andric 
2880b57cec5SDimitry Andric  private:
2890b57cec5SDimitry Andric   struct Header {
2900b57cec5SDimitry Andric     uptr map_beg;
2910b57cec5SDimitry Andric     uptr map_size;
2920b57cec5SDimitry Andric     uptr size;
2930b57cec5SDimitry Andric     uptr chunk_idx;
2940b57cec5SDimitry Andric   };
2950b57cec5SDimitry Andric 
GetHeader(uptr p)2960b57cec5SDimitry Andric   Header *GetHeader(uptr p) {
2970b57cec5SDimitry Andric     CHECK(IsAligned(p, page_size_));
2980b57cec5SDimitry Andric     return reinterpret_cast<Header*>(p - page_size_);
2990b57cec5SDimitry Andric   }
GetHeader(const void * p)3000b57cec5SDimitry Andric   Header *GetHeader(const void *p) {
3010b57cec5SDimitry Andric     return GetHeader(reinterpret_cast<uptr>(p));
3020b57cec5SDimitry Andric   }
3030b57cec5SDimitry Andric 
GetUser(const Header * h)304349cc55cSDimitry Andric   void *GetUser(const Header *h) const {
3050b57cec5SDimitry Andric     CHECK(IsAligned((uptr)h, page_size_));
3060b57cec5SDimitry Andric     return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
3070b57cec5SDimitry Andric   }
3080b57cec5SDimitry Andric 
RoundUpMapSize(uptr size)3090b57cec5SDimitry Andric   uptr RoundUpMapSize(uptr size) {
3100b57cec5SDimitry Andric     return RoundUpTo(size, page_size_) + page_size_;
3110b57cec5SDimitry Andric   }
3120b57cec5SDimitry Andric 
3130b57cec5SDimitry Andric   uptr page_size_;
3140b57cec5SDimitry Andric   Header **chunks_;
3150b57cec5SDimitry Andric   PtrArrayT ptr_array_;
3160b57cec5SDimitry Andric   uptr n_chunks_;
3170b57cec5SDimitry Andric   bool chunks_sorted_;
3180b57cec5SDimitry Andric   struct Stats {
3190b57cec5SDimitry Andric     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
3200b57cec5SDimitry Andric   } stats;
321349cc55cSDimitry Andric   mutable StaticSpinMutex mutex_;
3220b57cec5SDimitry Andric };
323