10b57cec5SDimitry Andric //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // Part of the Sanitizer Allocator.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric #ifndef SANITIZER_ALLOCATOR_H
130b57cec5SDimitry Andric #error This file must be included inside sanitizer_allocator.h
140b57cec5SDimitry Andric #endif
150b57cec5SDimitry Andric 
160b57cec5SDimitry Andric // Cache used by SizeClassAllocator64.
170b57cec5SDimitry Andric template <class SizeClassAllocator>
180b57cec5SDimitry Andric struct SizeClassAllocator64LocalCache {
190b57cec5SDimitry Andric   typedef SizeClassAllocator Allocator;
200b57cec5SDimitry Andric   typedef MemoryMapper<Allocator> MemoryMapperT;
210b57cec5SDimitry Andric 
InitSizeClassAllocator64LocalCache220b57cec5SDimitry Andric   void Init(AllocatorGlobalStats *s) {
230b57cec5SDimitry Andric     stats_.Init();
240b57cec5SDimitry Andric     if (s)
250b57cec5SDimitry Andric       s->Register(&stats_);
260b57cec5SDimitry Andric   }
270b57cec5SDimitry Andric 
DestroySizeClassAllocator64LocalCache280b57cec5SDimitry Andric   void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
290b57cec5SDimitry Andric     Drain(allocator);
300b57cec5SDimitry Andric     if (s)
310b57cec5SDimitry Andric       s->Unregister(&stats_);
320b57cec5SDimitry Andric   }
330b57cec5SDimitry Andric 
AllocateSizeClassAllocator64LocalCache340b57cec5SDimitry Andric   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
350b57cec5SDimitry Andric     CHECK_NE(class_id, 0UL);
360b57cec5SDimitry Andric     CHECK_LT(class_id, kNumClasses);
370b57cec5SDimitry Andric     PerClass *c = &per_class_[class_id];
380b57cec5SDimitry Andric     if (UNLIKELY(c->count == 0)) {
390b57cec5SDimitry Andric       if (UNLIKELY(!Refill(c, allocator, class_id)))
400b57cec5SDimitry Andric         return nullptr;
410b57cec5SDimitry Andric       DCHECK_GT(c->count, 0);
420b57cec5SDimitry Andric     }
430b57cec5SDimitry Andric     CompactPtrT chunk = c->chunks[--c->count];
440b57cec5SDimitry Andric     stats_.Add(AllocatorStatAllocated, c->class_size);
450b57cec5SDimitry Andric     return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
460b57cec5SDimitry Andric         allocator->GetRegionBeginBySizeClass(class_id), chunk));
470b57cec5SDimitry Andric   }
480b57cec5SDimitry Andric 
DeallocateSizeClassAllocator64LocalCache490b57cec5SDimitry Andric   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
500b57cec5SDimitry Andric     CHECK_NE(class_id, 0UL);
510b57cec5SDimitry Andric     CHECK_LT(class_id, kNumClasses);
520b57cec5SDimitry Andric     // If the first allocator call on a new thread is a deallocation, then
530b57cec5SDimitry Andric     // max_count will be zero, leading to check failure.
540b57cec5SDimitry Andric     PerClass *c = &per_class_[class_id];
550b57cec5SDimitry Andric     InitCache(c);
560b57cec5SDimitry Andric     if (UNLIKELY(c->count == c->max_count))
570b57cec5SDimitry Andric       DrainHalfMax(c, allocator, class_id);
580b57cec5SDimitry Andric     CompactPtrT chunk = allocator->PointerToCompactPtr(
590b57cec5SDimitry Andric         allocator->GetRegionBeginBySizeClass(class_id),
600b57cec5SDimitry Andric         reinterpret_cast<uptr>(p));
610b57cec5SDimitry Andric     c->chunks[c->count++] = chunk;
620b57cec5SDimitry Andric     stats_.Sub(AllocatorStatAllocated, c->class_size);
630b57cec5SDimitry Andric   }
640b57cec5SDimitry Andric 
DrainSizeClassAllocator64LocalCache650b57cec5SDimitry Andric   void Drain(SizeClassAllocator *allocator) {
660b57cec5SDimitry Andric     MemoryMapperT memory_mapper(*allocator);
670b57cec5SDimitry Andric     for (uptr i = 1; i < kNumClasses; i++) {
680b57cec5SDimitry Andric       PerClass *c = &per_class_[i];
690b57cec5SDimitry Andric       while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
700b57cec5SDimitry Andric     }
710b57cec5SDimitry Andric   }
720b57cec5SDimitry Andric 
730b57cec5SDimitry Andric  private:
740b57cec5SDimitry Andric   typedef typename Allocator::SizeClassMapT SizeClassMap;
750b57cec5SDimitry Andric   static const uptr kNumClasses = SizeClassMap::kNumClasses;
760b57cec5SDimitry Andric   typedef typename Allocator::CompactPtrT CompactPtrT;
770b57cec5SDimitry Andric 
780b57cec5SDimitry Andric   struct PerClass {
790b57cec5SDimitry Andric     u32 count;
800b57cec5SDimitry Andric     u32 max_count;
810b57cec5SDimitry Andric     uptr class_size;
820b57cec5SDimitry Andric     CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
830b57cec5SDimitry Andric   };
840b57cec5SDimitry Andric   PerClass per_class_[kNumClasses];
850b57cec5SDimitry Andric   AllocatorStats stats_;
860b57cec5SDimitry Andric 
InitCacheSizeClassAllocator64LocalCache870b57cec5SDimitry Andric   void InitCache(PerClass *c) {
880b57cec5SDimitry Andric     if (LIKELY(c->max_count))
890b57cec5SDimitry Andric       return;
900b57cec5SDimitry Andric     for (uptr i = 1; i < kNumClasses; i++) {
910b57cec5SDimitry Andric       PerClass *c = &per_class_[i];
920b57cec5SDimitry Andric       const uptr size = Allocator::ClassIdToSize(i);
930b57cec5SDimitry Andric       c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
940b57cec5SDimitry Andric       c->class_size = size;
950b57cec5SDimitry Andric     }
960b57cec5SDimitry Andric     DCHECK_NE(c->max_count, 0UL);
970b57cec5SDimitry Andric   }
980b57cec5SDimitry Andric 
RefillSizeClassAllocator64LocalCache990b57cec5SDimitry Andric   NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
1000b57cec5SDimitry Andric                        uptr class_id) {
1010b57cec5SDimitry Andric     InitCache(c);
1020b57cec5SDimitry Andric     const uptr num_requested_chunks = c->max_count / 2;
1030b57cec5SDimitry Andric     if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
1040b57cec5SDimitry Andric                                               num_requested_chunks)))
1050b57cec5SDimitry Andric       return false;
1060b57cec5SDimitry Andric     c->count = num_requested_chunks;
1070b57cec5SDimitry Andric     return true;
1080b57cec5SDimitry Andric   }
1090b57cec5SDimitry Andric 
DrainHalfMaxSizeClassAllocator64LocalCache1100b57cec5SDimitry Andric   NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
1110b57cec5SDimitry Andric                              uptr class_id) {
1120b57cec5SDimitry Andric     MemoryMapperT memory_mapper(*allocator);
1130b57cec5SDimitry Andric     Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
1140b57cec5SDimitry Andric   }
1150b57cec5SDimitry Andric 
DrainSizeClassAllocator64LocalCache1160b57cec5SDimitry Andric   void Drain(MemoryMapperT *memory_mapper, PerClass *c,
1170b57cec5SDimitry Andric              SizeClassAllocator *allocator, uptr class_id, uptr count) {
1180b57cec5SDimitry Andric     CHECK_GE(c->count, count);
1190b57cec5SDimitry Andric     const uptr first_idx_to_drain = c->count - count;
1200b57cec5SDimitry Andric     c->count -= count;
1210b57cec5SDimitry Andric     allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
1220b57cec5SDimitry Andric                                  &c->chunks[first_idx_to_drain], count);
1230b57cec5SDimitry Andric   }
1240b57cec5SDimitry Andric };
1250b57cec5SDimitry Andric 
1260b57cec5SDimitry Andric // Cache used by SizeClassAllocator32.
1270b57cec5SDimitry Andric template <class SizeClassAllocator>
1280b57cec5SDimitry Andric struct SizeClassAllocator32LocalCache {
1290b57cec5SDimitry Andric   typedef SizeClassAllocator Allocator;
1300b57cec5SDimitry Andric   typedef typename Allocator::TransferBatch TransferBatch;
1310b57cec5SDimitry Andric 
InitSizeClassAllocator32LocalCache1320b57cec5SDimitry Andric   void Init(AllocatorGlobalStats *s) {
1330b57cec5SDimitry Andric     stats_.Init();
1340b57cec5SDimitry Andric     if (s)
1350b57cec5SDimitry Andric       s->Register(&stats_);
1360b57cec5SDimitry Andric   }
1370b57cec5SDimitry Andric 
1380b57cec5SDimitry Andric   // Returns a TransferBatch suitable for class_id.
CreateBatchSizeClassAllocator32LocalCache1390b57cec5SDimitry Andric   TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
1400b57cec5SDimitry Andric                              TransferBatch *b) {
1410b57cec5SDimitry Andric     if (uptr batch_class_id = per_class_[class_id].batch_class_id)
1420b57cec5SDimitry Andric       return (TransferBatch*)Allocate(allocator, batch_class_id);
1430b57cec5SDimitry Andric     return b;
1440b57cec5SDimitry Andric   }
1450b57cec5SDimitry Andric 
1460b57cec5SDimitry Andric   // Destroys TransferBatch b.
DestroyBatchSizeClassAllocator32LocalCache1470b57cec5SDimitry Andric   void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
1480b57cec5SDimitry Andric                     TransferBatch *b) {
1490b57cec5SDimitry Andric     if (uptr batch_class_id = per_class_[class_id].batch_class_id)
1500b57cec5SDimitry Andric       Deallocate(allocator, batch_class_id, b);
1510b57cec5SDimitry Andric   }
1520b57cec5SDimitry Andric 
DestroySizeClassAllocator32LocalCache1530b57cec5SDimitry Andric   void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
1540b57cec5SDimitry Andric     Drain(allocator);
1550b57cec5SDimitry Andric     if (s)
1560b57cec5SDimitry Andric       s->Unregister(&stats_);
1570b57cec5SDimitry Andric   }
1580b57cec5SDimitry Andric 
AllocateSizeClassAllocator32LocalCache1590b57cec5SDimitry Andric   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
1600b57cec5SDimitry Andric     CHECK_NE(class_id, 0UL);
1610b57cec5SDimitry Andric     CHECK_LT(class_id, kNumClasses);
1620b57cec5SDimitry Andric     PerClass *c = &per_class_[class_id];
1630b57cec5SDimitry Andric     if (UNLIKELY(c->count == 0)) {
1640b57cec5SDimitry Andric       if (UNLIKELY(!Refill(c, allocator, class_id)))
1650b57cec5SDimitry Andric         return nullptr;
1660b57cec5SDimitry Andric       DCHECK_GT(c->count, 0);
1670b57cec5SDimitry Andric     }
1680b57cec5SDimitry Andric     void *res = c->batch[--c->count];
1690b57cec5SDimitry Andric     PREFETCH(c->batch[c->count - 1]);
1700b57cec5SDimitry Andric     stats_.Add(AllocatorStatAllocated, c->class_size);
1710b57cec5SDimitry Andric     return res;
1720b57cec5SDimitry Andric   }
1730b57cec5SDimitry Andric 
DeallocateSizeClassAllocator32LocalCache1740b57cec5SDimitry Andric   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
1750b57cec5SDimitry Andric     CHECK_NE(class_id, 0UL);
1760b57cec5SDimitry Andric     CHECK_LT(class_id, kNumClasses);
1770b57cec5SDimitry Andric     // If the first allocator call on a new thread is a deallocation, then
1780b57cec5SDimitry Andric     // max_count will be zero, leading to check failure.
1790b57cec5SDimitry Andric     PerClass *c = &per_class_[class_id];
1800b57cec5SDimitry Andric     InitCache(c);
1810b57cec5SDimitry Andric     if (UNLIKELY(c->count == c->max_count))
1820b57cec5SDimitry Andric       Drain(c, allocator, class_id);
1830b57cec5SDimitry Andric     c->batch[c->count++] = p;
1840b57cec5SDimitry Andric     stats_.Sub(AllocatorStatAllocated, c->class_size);
1850b57cec5SDimitry Andric   }
1860b57cec5SDimitry Andric 
DrainSizeClassAllocator32LocalCache1870b57cec5SDimitry Andric   void Drain(SizeClassAllocator *allocator) {
1880b57cec5SDimitry Andric     for (uptr i = 1; i < kNumClasses; i++) {
1890b57cec5SDimitry Andric       PerClass *c = &per_class_[i];
1900b57cec5SDimitry Andric       while (c->count > 0)
1910b57cec5SDimitry Andric         Drain(c, allocator, i);
1920b57cec5SDimitry Andric     }
1930b57cec5SDimitry Andric   }
1940b57cec5SDimitry Andric 
1950b57cec5SDimitry Andric  private:
1960b57cec5SDimitry Andric   typedef typename Allocator::SizeClassMapT SizeClassMap;
1970b57cec5SDimitry Andric   static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
1980b57cec5SDimitry Andric   static const uptr kNumClasses = SizeClassMap::kNumClasses;
1990b57cec5SDimitry Andric   // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
2000b57cec5SDimitry Andric   // allocated from kBatchClassID size class (except for those that are needed
2010b57cec5SDimitry Andric   // for kBatchClassID itself). The goal is to have TransferBatches in a totally
2020b57cec5SDimitry Andric   // different region of RAM to improve security.
2030b57cec5SDimitry Andric   static const bool kUseSeparateSizeClassForBatch =
2040b57cec5SDimitry Andric       Allocator::kUseSeparateSizeClassForBatch;
2050b57cec5SDimitry Andric 
2060b57cec5SDimitry Andric   struct PerClass {
2070b57cec5SDimitry Andric     uptr count;
2080b57cec5SDimitry Andric     uptr max_count;
2090b57cec5SDimitry Andric     uptr class_size;
2100b57cec5SDimitry Andric     uptr batch_class_id;
2110b57cec5SDimitry Andric     void *batch[2 * TransferBatch::kMaxNumCached];
2120b57cec5SDimitry Andric   };
2130b57cec5SDimitry Andric   PerClass per_class_[kNumClasses];
2140b57cec5SDimitry Andric   AllocatorStats stats_;
2150b57cec5SDimitry Andric 
InitCacheSizeClassAllocator32LocalCache2160b57cec5SDimitry Andric   void InitCache(PerClass *c) {
2170b57cec5SDimitry Andric     if (LIKELY(c->max_count))
2180b57cec5SDimitry Andric       return;
2190b57cec5SDimitry Andric     const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
2200b57cec5SDimitry Andric     for (uptr i = 1; i < kNumClasses; i++) {
2210b57cec5SDimitry Andric       PerClass *c = &per_class_[i];
2220b57cec5SDimitry Andric       const uptr size = Allocator::ClassIdToSize(i);
2230b57cec5SDimitry Andric       const uptr max_cached = TransferBatch::MaxCached(size);
2240b57cec5SDimitry Andric       c->max_count = 2 * max_cached;
2250b57cec5SDimitry Andric       c->class_size = size;
2260b57cec5SDimitry Andric       // Precompute the class id to use to store batches for the current class
2270b57cec5SDimitry Andric       // id. 0 means the class size is large enough to store a batch within one
2280b57cec5SDimitry Andric       // of the chunks. If using a separate size class, it will always be
2290b57cec5SDimitry Andric       // kBatchClassID, except for kBatchClassID itself.
2300b57cec5SDimitry Andric       if (kUseSeparateSizeClassForBatch) {
2310b57cec5SDimitry Andric         c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
2320b57cec5SDimitry Andric       } else {
2330b57cec5SDimitry Andric         c->batch_class_id = (size <
2340b57cec5SDimitry Andric           TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
2350b57cec5SDimitry Andric               batch_class_id : 0;
2360b57cec5SDimitry Andric       }
2370b57cec5SDimitry Andric     }
2380b57cec5SDimitry Andric     DCHECK_NE(c->max_count, 0UL);
2390b57cec5SDimitry Andric   }
2400b57cec5SDimitry Andric 
RefillSizeClassAllocator32LocalCache2410b57cec5SDimitry Andric   NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
2420b57cec5SDimitry Andric                        uptr class_id) {
2430b57cec5SDimitry Andric     InitCache(c);
2440b57cec5SDimitry Andric     TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
2450b57cec5SDimitry Andric     if (UNLIKELY(!b))
2460b57cec5SDimitry Andric       return false;
2470b57cec5SDimitry Andric     CHECK_GT(b->Count(), 0);
2480b57cec5SDimitry Andric     b->CopyToArray(c->batch);
2490b57cec5SDimitry Andric     c->count = b->Count();
2500b57cec5SDimitry Andric     DestroyBatch(class_id, allocator, b);
2510b57cec5SDimitry Andric     return true;
2520b57cec5SDimitry Andric   }
2530b57cec5SDimitry Andric 
DrainSizeClassAllocator32LocalCache2540b57cec5SDimitry Andric   NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
2550b57cec5SDimitry Andric                       uptr class_id) {
2560b57cec5SDimitry Andric     const uptr count = Min(c->max_count / 2, c->count);
2570b57cec5SDimitry Andric     const uptr first_idx_to_drain = c->count - count;
2580b57cec5SDimitry Andric     TransferBatch *b = CreateBatch(
2590b57cec5SDimitry Andric         class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
2600b57cec5SDimitry Andric     // Failure to allocate a batch while releasing memory is non recoverable.
2610b57cec5SDimitry Andric     // TODO(alekseys): Figure out how to do it without allocating a new batch.
2620b57cec5SDimitry Andric     if (UNLIKELY(!b)) {
2630b57cec5SDimitry Andric       Report("FATAL: Internal error: %s's allocator failed to allocate a "
2640b57cec5SDimitry Andric              "transfer batch.\n", SanitizerToolName);
265       Die();
266     }
267     b->SetFromArray(&c->batch[first_idx_to_drain], count);
268     c->count -= count;
269     allocator->DeallocateBatch(&stats_, class_id, b);
270   }
271 };
272