13cab2bb3Spatrick //===-- quarantine.h --------------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick 
93cab2bb3Spatrick #ifndef SCUDO_QUARANTINE_H_
103cab2bb3Spatrick #define SCUDO_QUARANTINE_H_
113cab2bb3Spatrick 
123cab2bb3Spatrick #include "list.h"
133cab2bb3Spatrick #include "mutex.h"
143cab2bb3Spatrick #include "string_utils.h"
153cab2bb3Spatrick 
163cab2bb3Spatrick namespace scudo {
173cab2bb3Spatrick 
183cab2bb3Spatrick struct QuarantineBatch {
193cab2bb3Spatrick   // With the following count, a batch (and the header that protects it) occupy
203cab2bb3Spatrick   // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
213cab2bb3Spatrick   static const u32 MaxCount = 1019;
223cab2bb3Spatrick   QuarantineBatch *Next;
233cab2bb3Spatrick   uptr Size;
243cab2bb3Spatrick   u32 Count;
253cab2bb3Spatrick   void *Batch[MaxCount];
263cab2bb3Spatrick 
initQuarantineBatch273cab2bb3Spatrick   void init(void *Ptr, uptr Size) {
283cab2bb3Spatrick     Count = 1;
293cab2bb3Spatrick     Batch[0] = Ptr;
303cab2bb3Spatrick     this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
313cab2bb3Spatrick   }
323cab2bb3Spatrick 
333cab2bb3Spatrick   // The total size of quarantined nodes recorded in this batch.
getQuarantinedSizeQuarantineBatch343cab2bb3Spatrick   uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
353cab2bb3Spatrick 
push_backQuarantineBatch363cab2bb3Spatrick   void push_back(void *Ptr, uptr Size) {
373cab2bb3Spatrick     DCHECK_LT(Count, MaxCount);
383cab2bb3Spatrick     Batch[Count++] = Ptr;
393cab2bb3Spatrick     this->Size += Size;
403cab2bb3Spatrick   }
413cab2bb3Spatrick 
canMergeQuarantineBatch423cab2bb3Spatrick   bool canMerge(const QuarantineBatch *const From) const {
433cab2bb3Spatrick     return Count + From->Count <= MaxCount;
443cab2bb3Spatrick   }
453cab2bb3Spatrick 
mergeQuarantineBatch463cab2bb3Spatrick   void merge(QuarantineBatch *const From) {
473cab2bb3Spatrick     DCHECK_LE(Count + From->Count, MaxCount);
483cab2bb3Spatrick     DCHECK_GE(Size, sizeof(QuarantineBatch));
493cab2bb3Spatrick 
503cab2bb3Spatrick     for (uptr I = 0; I < From->Count; ++I)
513cab2bb3Spatrick       Batch[Count + I] = From->Batch[I];
523cab2bb3Spatrick     Count += From->Count;
533cab2bb3Spatrick     Size += From->getQuarantinedSize();
543cab2bb3Spatrick 
553cab2bb3Spatrick     From->Count = 0;
563cab2bb3Spatrick     From->Size = sizeof(QuarantineBatch);
573cab2bb3Spatrick   }
583cab2bb3Spatrick 
shuffleQuarantineBatch593cab2bb3Spatrick   void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
603cab2bb3Spatrick };
613cab2bb3Spatrick 
623cab2bb3Spatrick static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
633cab2bb3Spatrick 
643cab2bb3Spatrick // Per-thread cache of memory blocks.
653cab2bb3Spatrick template <typename Callback> class QuarantineCache {
663cab2bb3Spatrick public:
init()67*d89ec533Spatrick   void init() { DCHECK_EQ(atomic_load_relaxed(&Size), 0U); }
683cab2bb3Spatrick 
693cab2bb3Spatrick   // Total memory used, including internal accounting.
getSize()703cab2bb3Spatrick   uptr getSize() const { return atomic_load_relaxed(&Size); }
713cab2bb3Spatrick   // Memory used for internal accounting.
getOverheadSize()723cab2bb3Spatrick   uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
733cab2bb3Spatrick 
enqueue(Callback Cb,void * Ptr,uptr Size)743cab2bb3Spatrick   void enqueue(Callback Cb, void *Ptr, uptr Size) {
753cab2bb3Spatrick     if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
763cab2bb3Spatrick       QuarantineBatch *B =
773cab2bb3Spatrick           reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
783cab2bb3Spatrick       DCHECK(B);
793cab2bb3Spatrick       B->init(Ptr, Size);
803cab2bb3Spatrick       enqueueBatch(B);
813cab2bb3Spatrick     } else {
823cab2bb3Spatrick       List.back()->push_back(Ptr, Size);
833cab2bb3Spatrick       addToSize(Size);
843cab2bb3Spatrick     }
853cab2bb3Spatrick   }
863cab2bb3Spatrick 
transfer(QuarantineCache * From)873cab2bb3Spatrick   void transfer(QuarantineCache *From) {
883cab2bb3Spatrick     List.append_back(&From->List);
893cab2bb3Spatrick     addToSize(From->getSize());
903cab2bb3Spatrick     atomic_store_relaxed(&From->Size, 0);
913cab2bb3Spatrick   }
923cab2bb3Spatrick 
enqueueBatch(QuarantineBatch * B)933cab2bb3Spatrick   void enqueueBatch(QuarantineBatch *B) {
943cab2bb3Spatrick     List.push_back(B);
953cab2bb3Spatrick     addToSize(B->Size);
963cab2bb3Spatrick   }
973cab2bb3Spatrick 
dequeueBatch()983cab2bb3Spatrick   QuarantineBatch *dequeueBatch() {
993cab2bb3Spatrick     if (List.empty())
1003cab2bb3Spatrick       return nullptr;
1013cab2bb3Spatrick     QuarantineBatch *B = List.front();
1023cab2bb3Spatrick     List.pop_front();
1033cab2bb3Spatrick     subFromSize(B->Size);
1043cab2bb3Spatrick     return B;
1053cab2bb3Spatrick   }
1063cab2bb3Spatrick 
mergeBatches(QuarantineCache * ToDeallocate)1073cab2bb3Spatrick   void mergeBatches(QuarantineCache *ToDeallocate) {
1083cab2bb3Spatrick     uptr ExtractedSize = 0;
1093cab2bb3Spatrick     QuarantineBatch *Current = List.front();
1103cab2bb3Spatrick     while (Current && Current->Next) {
1113cab2bb3Spatrick       if (Current->canMerge(Current->Next)) {
1123cab2bb3Spatrick         QuarantineBatch *Extracted = Current->Next;
1133cab2bb3Spatrick         // Move all the chunks into the current batch.
1143cab2bb3Spatrick         Current->merge(Extracted);
1153cab2bb3Spatrick         DCHECK_EQ(Extracted->Count, 0);
1163cab2bb3Spatrick         DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
1173cab2bb3Spatrick         // Remove the next batch From the list and account for its Size.
1183cab2bb3Spatrick         List.extract(Current, Extracted);
1193cab2bb3Spatrick         ExtractedSize += Extracted->Size;
1203cab2bb3Spatrick         // Add it to deallocation list.
1213cab2bb3Spatrick         ToDeallocate->enqueueBatch(Extracted);
1223cab2bb3Spatrick       } else {
1233cab2bb3Spatrick         Current = Current->Next;
1243cab2bb3Spatrick       }
1253cab2bb3Spatrick     }
1263cab2bb3Spatrick     subFromSize(ExtractedSize);
1273cab2bb3Spatrick   }
1283cab2bb3Spatrick 
getStats(ScopedString * Str)1293cab2bb3Spatrick   void getStats(ScopedString *Str) const {
1303cab2bb3Spatrick     uptr BatchCount = 0;
1313cab2bb3Spatrick     uptr TotalOverheadBytes = 0;
1323cab2bb3Spatrick     uptr TotalBytes = 0;
1333cab2bb3Spatrick     uptr TotalQuarantineChunks = 0;
1343cab2bb3Spatrick     for (const QuarantineBatch &Batch : List) {
1353cab2bb3Spatrick       BatchCount++;
1363cab2bb3Spatrick       TotalBytes += Batch.Size;
1373cab2bb3Spatrick       TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
1383cab2bb3Spatrick       TotalQuarantineChunks += Batch.Count;
1393cab2bb3Spatrick     }
1403cab2bb3Spatrick     const uptr QuarantineChunksCapacity =
1413cab2bb3Spatrick         BatchCount * QuarantineBatch::MaxCount;
1423cab2bb3Spatrick     const uptr ChunksUsagePercent =
1433cab2bb3Spatrick         (QuarantineChunksCapacity == 0)
1443cab2bb3Spatrick             ? 0
1453cab2bb3Spatrick             : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
1463cab2bb3Spatrick     const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
1473cab2bb3Spatrick     const uptr MemoryOverheadPercent =
1483cab2bb3Spatrick         (TotalQuarantinedBytes == 0)
1493cab2bb3Spatrick             ? 0
1503cab2bb3Spatrick             : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
1513cab2bb3Spatrick     Str->append(
1523cab2bb3Spatrick         "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
1533cab2bb3Spatrick         "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
1543cab2bb3Spatrick         BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
1553cab2bb3Spatrick         QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
1563cab2bb3Spatrick   }
1573cab2bb3Spatrick 
1583cab2bb3Spatrick private:
1593cab2bb3Spatrick   SinglyLinkedList<QuarantineBatch> List;
160*d89ec533Spatrick   atomic_uptr Size = {};
1613cab2bb3Spatrick 
addToSize(uptr add)1623cab2bb3Spatrick   void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
subFromSize(uptr sub)1633cab2bb3Spatrick   void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
1643cab2bb3Spatrick };
1653cab2bb3Spatrick 
1663cab2bb3Spatrick // The callback interface is:
1673cab2bb3Spatrick // void Callback::recycle(Node *Ptr);
1683cab2bb3Spatrick // void *Callback::allocate(uptr Size);
1693cab2bb3Spatrick // void Callback::deallocate(void *Ptr);
1703cab2bb3Spatrick template <typename Callback, typename Node> class GlobalQuarantine {
1713cab2bb3Spatrick public:
1723cab2bb3Spatrick   typedef QuarantineCache<Callback> CacheT;
173*d89ec533Spatrick   using ThisT = GlobalQuarantine<Callback, Node>;
1743cab2bb3Spatrick 
init(uptr Size,uptr CacheSize)175*d89ec533Spatrick   void init(uptr Size, uptr CacheSize) {
176*d89ec533Spatrick     DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
177*d89ec533Spatrick     DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
178*d89ec533Spatrick     DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
179*d89ec533Spatrick     DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize), 0U);
1803cab2bb3Spatrick     // Thread local quarantine size can be zero only when global quarantine size
1813cab2bb3Spatrick     // is zero (it allows us to perform just one atomic read per put() call).
1823cab2bb3Spatrick     CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
1833cab2bb3Spatrick 
1843cab2bb3Spatrick     atomic_store_relaxed(&MaxSize, Size);
1853cab2bb3Spatrick     atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
1863cab2bb3Spatrick     atomic_store_relaxed(&MaxCacheSize, CacheSize);
1873cab2bb3Spatrick 
1881f9cb04fSpatrick     Cache.init();
1893cab2bb3Spatrick   }
1903cab2bb3Spatrick 
getMaxSize()1913cab2bb3Spatrick   uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
getCacheSize()1923cab2bb3Spatrick   uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
1933cab2bb3Spatrick 
put(CacheT * C,Callback Cb,Node * Ptr,uptr Size)1943cab2bb3Spatrick   void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
1953cab2bb3Spatrick     C->enqueue(Cb, Ptr, Size);
1963cab2bb3Spatrick     if (C->getSize() > getCacheSize())
1973cab2bb3Spatrick       drain(C, Cb);
1983cab2bb3Spatrick   }
1993cab2bb3Spatrick 
drain(CacheT * C,Callback Cb)2003cab2bb3Spatrick   void NOINLINE drain(CacheT *C, Callback Cb) {
2013cab2bb3Spatrick     {
2023cab2bb3Spatrick       ScopedLock L(CacheMutex);
2033cab2bb3Spatrick       Cache.transfer(C);
2043cab2bb3Spatrick     }
2053cab2bb3Spatrick     if (Cache.getSize() > getMaxSize() && RecycleMutex.tryLock())
2063cab2bb3Spatrick       recycle(atomic_load_relaxed(&MinSize), Cb);
2073cab2bb3Spatrick   }
2083cab2bb3Spatrick 
drainAndRecycle(CacheT * C,Callback Cb)2093cab2bb3Spatrick   void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
2103cab2bb3Spatrick     {
2113cab2bb3Spatrick       ScopedLock L(CacheMutex);
2123cab2bb3Spatrick       Cache.transfer(C);
2133cab2bb3Spatrick     }
2143cab2bb3Spatrick     RecycleMutex.lock();
2153cab2bb3Spatrick     recycle(0, Cb);
2163cab2bb3Spatrick   }
2173cab2bb3Spatrick 
getStats(ScopedString * Str)2183cab2bb3Spatrick   void getStats(ScopedString *Str) const {
2193cab2bb3Spatrick     // It assumes that the world is stopped, just as the allocator's printStats.
2203cab2bb3Spatrick     Cache.getStats(Str);
2213cab2bb3Spatrick     Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
2223cab2bb3Spatrick                 getMaxSize() >> 10, getCacheSize() >> 10);
2233cab2bb3Spatrick   }
2243cab2bb3Spatrick 
disable()2253cab2bb3Spatrick   void disable() {
2263cab2bb3Spatrick     // RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
2273cab2bb3Spatrick     RecycleMutex.lock();
2283cab2bb3Spatrick     CacheMutex.lock();
2293cab2bb3Spatrick   }
2303cab2bb3Spatrick 
enable()2313cab2bb3Spatrick   void enable() {
2323cab2bb3Spatrick     CacheMutex.unlock();
2333cab2bb3Spatrick     RecycleMutex.unlock();
2343cab2bb3Spatrick   }
2353cab2bb3Spatrick 
2363cab2bb3Spatrick private:
2373cab2bb3Spatrick   // Read-only data.
2383cab2bb3Spatrick   alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
2393cab2bb3Spatrick   CacheT Cache;
alignas(SCUDO_CACHE_LINE_SIZE)2403cab2bb3Spatrick   alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
241*d89ec533Spatrick   atomic_uptr MinSize = {};
242*d89ec533Spatrick   atomic_uptr MaxSize = {};
243*d89ec533Spatrick   alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
2443cab2bb3Spatrick 
recycle(uptr MinSize,Callback Cb)2453cab2bb3Spatrick   void NOINLINE recycle(uptr MinSize, Callback Cb) {
2463cab2bb3Spatrick     CacheT Tmp;
2473cab2bb3Spatrick     Tmp.init();
2483cab2bb3Spatrick     {
2493cab2bb3Spatrick       ScopedLock L(CacheMutex);
2503cab2bb3Spatrick       // Go over the batches and merge partially filled ones to
2513cab2bb3Spatrick       // save some memory, otherwise batches themselves (since the memory used
2523cab2bb3Spatrick       // by them is counted against quarantine limit) can overcome the actual
2533cab2bb3Spatrick       // user's quarantined chunks, which diminishes the purpose of the
2543cab2bb3Spatrick       // quarantine.
2553cab2bb3Spatrick       const uptr CacheSize = Cache.getSize();
2563cab2bb3Spatrick       const uptr OverheadSize = Cache.getOverheadSize();
2573cab2bb3Spatrick       DCHECK_GE(CacheSize, OverheadSize);
2583cab2bb3Spatrick       // Do the merge only when overhead exceeds this predefined limit (might
2593cab2bb3Spatrick       // require some tuning). It saves us merge attempt when the batch list
2603cab2bb3Spatrick       // quarantine is unlikely to contain batches suitable for merge.
2613cab2bb3Spatrick       constexpr uptr OverheadThresholdPercents = 100;
2623cab2bb3Spatrick       if (CacheSize > OverheadSize &&
2633cab2bb3Spatrick           OverheadSize * (100 + OverheadThresholdPercents) >
2643cab2bb3Spatrick               CacheSize * OverheadThresholdPercents) {
2653cab2bb3Spatrick         Cache.mergeBatches(&Tmp);
2663cab2bb3Spatrick       }
2673cab2bb3Spatrick       // Extract enough chunks from the quarantine to get below the max
2683cab2bb3Spatrick       // quarantine size and leave some leeway for the newly quarantined chunks.
2693cab2bb3Spatrick       while (Cache.getSize() > MinSize)
2703cab2bb3Spatrick         Tmp.enqueueBatch(Cache.dequeueBatch());
2713cab2bb3Spatrick     }
2723cab2bb3Spatrick     RecycleMutex.unlock();
2733cab2bb3Spatrick     doRecycle(&Tmp, Cb);
2743cab2bb3Spatrick   }
2753cab2bb3Spatrick 
doRecycle(CacheT * C,Callback Cb)2763cab2bb3Spatrick   void NOINLINE doRecycle(CacheT *C, Callback Cb) {
2773cab2bb3Spatrick     while (QuarantineBatch *B = C->dequeueBatch()) {
2783cab2bb3Spatrick       const u32 Seed = static_cast<u32>(
2793cab2bb3Spatrick           (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
2803cab2bb3Spatrick       B->shuffle(Seed);
2813cab2bb3Spatrick       constexpr uptr NumberOfPrefetch = 8UL;
2823cab2bb3Spatrick       CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
2833cab2bb3Spatrick       for (uptr I = 0; I < NumberOfPrefetch; I++)
2843cab2bb3Spatrick         PREFETCH(B->Batch[I]);
2853cab2bb3Spatrick       for (uptr I = 0, Count = B->Count; I < Count; I++) {
2863cab2bb3Spatrick         if (I + NumberOfPrefetch < Count)
2873cab2bb3Spatrick           PREFETCH(B->Batch[I + NumberOfPrefetch]);
2883cab2bb3Spatrick         Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
2893cab2bb3Spatrick       }
2903cab2bb3Spatrick       Cb.deallocate(B);
2913cab2bb3Spatrick     }
2923cab2bb3Spatrick   }
2933cab2bb3Spatrick };
2943cab2bb3Spatrick 
2953cab2bb3Spatrick } // namespace scudo
2963cab2bb3Spatrick 
2973cab2bb3Spatrick #endif // SCUDO_QUARANTINE_H_
298