1 //===-- local_cache.h -------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_LOCAL_CACHE_H_
10 #define SCUDO_LOCAL_CACHE_H_
11 
12 #include "internal_defs.h"
13 #include "list.h"
14 #include "platform.h"
15 #include "report.h"
16 #include "stats.h"
17 #include "string_utils.h"
18 
19 namespace scudo {
20 
21 template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
22   typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
23   typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
24 
25   struct TransferBatch {
26     static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
27     void setFromArray(CompactPtrT *Array, u16 N) {
28       DCHECK_LE(N, MaxNumCached);
29       Count = N;
30       memcpy(Batch, Array, sizeof(Batch[0]) * Count);
31     }
32     void appendFromArray(CompactPtrT *Array, u16 N) {
33       DCHECK_LE(N, MaxNumCached - Count);
34       memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
35       // u16 will be promoted to int by arithmetic type conversion.
36       Count = static_cast<u16>(Count + N);
37     }
38     void appendFromTransferBatch(TransferBatch *B, u16 N) {
39       DCHECK_LE(N, MaxNumCached - Count);
40       DCHECK_GE(B->Count, N);
41       // Append from the back of `B`.
42       memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
43       // u16 will be promoted to int by arithmetic type conversion.
44       Count = static_cast<u16>(Count + N);
45       B->Count = static_cast<u16>(B->Count - N);
46     }
47     void clear() { Count = 0; }
48     void add(CompactPtrT P) {
49       DCHECK_LT(Count, MaxNumCached);
50       Batch[Count++] = P;
51     }
52     void copyToArray(CompactPtrT *Array) const {
53       memcpy(Array, Batch, sizeof(Batch[0]) * Count);
54     }
55     u16 getCount() const { return Count; }
56     bool isEmpty() const { return Count == 0U; }
57     CompactPtrT get(u16 I) const {
58       DCHECK_LE(I, Count);
59       return Batch[I];
60     }
61     static u16 getMaxCached(uptr Size) {
62       return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
63     }
64     TransferBatch *Next;
65 
66   private:
67     CompactPtrT Batch[MaxNumCached];
68     u16 Count;
69   };
70 
71   // A BatchGroup is used to collect blocks. Each group has a group id to
72   // identify the group kind of contained blocks.
73   struct BatchGroup {
74     // `Next` is used by IntrusiveList.
75     BatchGroup *Next;
76     // The compact base address of each group
77     uptr CompactPtrGroupBase;
78     // Cache value of TransferBatch::getMaxCached()
79     u16 MaxCachedPerBatch;
80     // Number of blocks pushed into this group. This is an increment-only
81     // counter.
82     uptr PushedBlocks;
83     // This is used to track how many bytes are not in-use since last time we
84     // tried to release pages.
85     uptr BytesInBGAtLastCheckpoint;
86     // Blocks are managed by TransferBatch in a list.
87     SinglyLinkedList<TransferBatch> Batches;
88   };
89 
90   static_assert(sizeof(BatchGroup) <= sizeof(TransferBatch),
91                 "BatchGroup uses the same class size as TransferBatch");
92 
93   void init(GlobalStats *S, SizeClassAllocator *A) {
94     DCHECK(isEmpty());
95     Stats.init();
96     if (LIKELY(S))
97       S->link(&Stats);
98     Allocator = A;
99   }
100 
101   void destroy(GlobalStats *S) {
102     drain();
103     if (LIKELY(S))
104       S->unlink(&Stats);
105   }
106 
107   void *allocate(uptr ClassId) {
108     DCHECK_LT(ClassId, NumClasses);
109     PerClass *C = &PerClassArray[ClassId];
110     if (C->Count == 0) {
111       if (UNLIKELY(!refill(C, ClassId)))
112         return nullptr;
113       DCHECK_GT(C->Count, 0);
114     }
115     // We read ClassSize first before accessing Chunks because it's adjacent to
116     // Count, while Chunks might be further off (depending on Count). That keeps
117     // the memory accesses in close quarters.
118     const uptr ClassSize = C->ClassSize;
119     CompactPtrT CompactP = C->Chunks[--C->Count];
120     Stats.add(StatAllocated, ClassSize);
121     Stats.sub(StatFree, ClassSize);
122     return Allocator->decompactPtr(ClassId, CompactP);
123   }
124 
125   bool deallocate(uptr ClassId, void *P) {
126     CHECK_LT(ClassId, NumClasses);
127     PerClass *C = &PerClassArray[ClassId];
128     // We still have to initialize the cache in the event that the first heap
129     // operation in a thread is a deallocation.
130     initCacheMaybe(C);
131 
132     // If the cache is full, drain half of blocks back to the main allocator.
133     const bool NeedToDrainCache = C->Count == C->MaxCount;
134     if (NeedToDrainCache)
135       drain(C, ClassId);
136     // See comment in allocate() about memory accesses.
137     const uptr ClassSize = C->ClassSize;
138     C->Chunks[C->Count++] =
139         Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
140     Stats.sub(StatAllocated, ClassSize);
141     Stats.add(StatFree, ClassSize);
142 
143     return NeedToDrainCache;
144   }
145 
146   bool isEmpty() const {
147     for (uptr I = 0; I < NumClasses; ++I)
148       if (PerClassArray[I].Count)
149         return false;
150     return true;
151   }
152 
153   void drain() {
154     // Drain BatchClassId last as createBatch can refill it.
155     for (uptr I = 0; I < NumClasses; ++I) {
156       if (I == BatchClassId)
157         continue;
158       while (PerClassArray[I].Count > 0)
159         drain(&PerClassArray[I], I);
160     }
161     while (PerClassArray[BatchClassId].Count > 0)
162       drain(&PerClassArray[BatchClassId], BatchClassId);
163     DCHECK(isEmpty());
164   }
165 
166   TransferBatch *createBatch(uptr ClassId, void *B) {
167     if (ClassId != BatchClassId)
168       B = allocate(BatchClassId);
169     if (UNLIKELY(!B))
170       reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
171     return reinterpret_cast<TransferBatch *>(B);
172   }
173 
174   BatchGroup *createGroup() {
175     void *Ptr = allocate(BatchClassId);
176     if (UNLIKELY(!Ptr))
177       reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
178     return reinterpret_cast<BatchGroup *>(Ptr);
179   }
180 
181   LocalStats &getStats() { return Stats; }
182 
183   void getStats(ScopedString *Str) {
184     bool EmptyCache = true;
185     for (uptr I = 0; I < NumClasses; ++I) {
186       if (PerClassArray[I].Count == 0)
187         continue;
188 
189       EmptyCache = false;
190       // The size of BatchClass is set to 0 intentionally. See the comment in
191       // initCache() for more details.
192       const uptr ClassSize = I == BatchClassId
193                                  ? SizeClassAllocator::getSizeByClassId(I)
194                                  : PerClassArray[I].ClassSize;
195       // Note that the string utils don't support printing u16 thus we cast it
196       // to a common use type uptr.
197       Str->append("    %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,
198                   static_cast<uptr>(PerClassArray[I].Count),
199                   static_cast<uptr>(PerClassArray[I].MaxCount));
200     }
201 
202     if (EmptyCache)
203       Str->append("    No block is cached.\n");
204   }
205 
206 private:
207   static const uptr NumClasses = SizeClassMap::NumClasses;
208   static const uptr BatchClassId = SizeClassMap::BatchClassId;
209   struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
210     u16 Count;
211     u16 MaxCount;
212     // Note: ClassSize is zero for the transfer batch.
213     uptr ClassSize;
214     CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
215   };
216   PerClass PerClassArray[NumClasses] = {};
217   LocalStats Stats;
218   SizeClassAllocator *Allocator = nullptr;
219 
220   ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
221     if (LIKELY(C->MaxCount))
222       return;
223     initCache();
224     DCHECK_NE(C->MaxCount, 0U);
225   }
226 
227   NOINLINE void initCache() {
228     for (uptr I = 0; I < NumClasses; I++) {
229       PerClass *P = &PerClassArray[I];
230       const uptr Size = SizeClassAllocator::getSizeByClassId(I);
231       P->MaxCount = static_cast<u16>(2 * TransferBatch::getMaxCached(Size));
232       if (I != BatchClassId) {
233         P->ClassSize = Size;
234       } else {
235         // ClassSize in this struct is only used for malloc/free stats, which
236         // should only track user allocations, not internal movements.
237         P->ClassSize = 0;
238       }
239     }
240   }
241 
242   void destroyBatch(uptr ClassId, void *B) {
243     if (ClassId != BatchClassId)
244       deallocate(BatchClassId, B);
245   }
246 
247   NOINLINE bool refill(PerClass *C, uptr ClassId) {
248     initCacheMaybe(C);
249     TransferBatch *B = Allocator->popBatch(this, ClassId);
250     if (UNLIKELY(!B))
251       return false;
252     DCHECK_GT(B->getCount(), 0);
253     C->Count = B->getCount();
254     B->copyToArray(C->Chunks);
255     B->clear();
256     destroyBatch(ClassId, B);
257     return true;
258   }
259 
260   NOINLINE void drain(PerClass *C, uptr ClassId) {
261     const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
262     Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
263     // u16 will be promoted to int by arithmetic type conversion.
264     C->Count = static_cast<u16>(C->Count - Count);
265     for (u16 I = 0; I < C->Count; I++)
266       C->Chunks[I] = C->Chunks[I + Count];
267   }
268 };
269 
270 } // namespace scudo
271 
272 #endif // SCUDO_LOCAL_CACHE_H_
273