1 //===-- local_cache.h -------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_LOCAL_CACHE_H_
10 #define SCUDO_LOCAL_CACHE_H_
11 
12 #include "internal_defs.h"
13 #include "list.h"
14 #include "platform.h"
15 #include "report.h"
16 #include "stats.h"
17 
18 namespace scudo {
19 
20 template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
21   typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
22   typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
23 
24   struct TransferBatch {
25     static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
26     void setFromArray(CompactPtrT *Array, u16 N) {
27       DCHECK_LE(N, MaxNumCached);
28       Count = N;
29       memcpy(Batch, Array, sizeof(Batch[0]) * Count);
30     }
31     void appendFromArray(CompactPtrT *Array, u16 N) {
32       DCHECK_LE(N, MaxNumCached - Count);
33       memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
34       // u16 will be promoted to int by arithmetic type conversion.
35       Count = static_cast<u16>(Count + N);
36     }
37     void clear() { Count = 0; }
38     void add(CompactPtrT P) {
39       DCHECK_LT(Count, MaxNumCached);
40       Batch[Count++] = P;
41     }
42     void copyToArray(CompactPtrT *Array) const {
43       memcpy(Array, Batch, sizeof(Batch[0]) * Count);
44     }
45     u16 getCount() const { return Count; }
46     CompactPtrT get(u16 I) const {
47       DCHECK_LE(I, Count);
48       return Batch[I];
49     }
50     static u16 getMaxCached(uptr Size) {
51       return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
52     }
53     TransferBatch *Next;
54 
55   private:
56     CompactPtrT Batch[MaxNumCached];
57     u16 Count;
58   };
59 
60   // A BatchGroup is used to collect blocks. Each group has a group id to
61   // identify the group kind of contained blocks.
62   struct BatchGroup {
63     // `Next` is used by IntrusiveList.
64     BatchGroup *Next;
65     // The identifier of each group
66     uptr GroupId;
67     // Cache value of TransferBatch::getMaxCached()
68     u16 MaxCachedPerBatch;
69     // Number of blocks pushed into this group. This is an increment-only
70     // counter.
71     uptr PushedBlocks;
72     // This is used to track how many blocks are pushed since last time we
73     // checked `PushedBlocks`. It's useful for page releasing to determine the
74     // usage of a BatchGroup.
75     uptr PushedBlocksAtLastCheckpoint;
76     // Blocks are managed by TransferBatch in a list.
77     SinglyLinkedList<TransferBatch> Batches;
78   };
79 
80   static_assert(sizeof(BatchGroup) <= sizeof(TransferBatch),
81                 "BatchGroup uses the same class size as TransferBatch");
82 
83   void init(GlobalStats *S, SizeClassAllocator *A) {
84     DCHECK(isEmpty());
85     Stats.init();
86     if (LIKELY(S))
87       S->link(&Stats);
88     Allocator = A;
89   }
90 
91   void destroy(GlobalStats *S) {
92     drain();
93     if (LIKELY(S))
94       S->unlink(&Stats);
95   }
96 
97   void *allocate(uptr ClassId) {
98     DCHECK_LT(ClassId, NumClasses);
99     PerClass *C = &PerClassArray[ClassId];
100     if (C->Count == 0) {
101       if (UNLIKELY(!refill(C, ClassId)))
102         return nullptr;
103       DCHECK_GT(C->Count, 0);
104     }
105     // We read ClassSize first before accessing Chunks because it's adjacent to
106     // Count, while Chunks might be further off (depending on Count). That keeps
107     // the memory accesses in close quarters.
108     const uptr ClassSize = C->ClassSize;
109     CompactPtrT CompactP = C->Chunks[--C->Count];
110     Stats.add(StatAllocated, ClassSize);
111     Stats.sub(StatFree, ClassSize);
112     return Allocator->decompactPtr(ClassId, CompactP);
113   }
114 
115   void deallocate(uptr ClassId, void *P) {
116     CHECK_LT(ClassId, NumClasses);
117     PerClass *C = &PerClassArray[ClassId];
118     // We still have to initialize the cache in the event that the first heap
119     // operation in a thread is a deallocation.
120     initCacheMaybe(C);
121     if (C->Count == C->MaxCount)
122       drain(C, ClassId);
123     // See comment in allocate() about memory accesses.
124     const uptr ClassSize = C->ClassSize;
125     C->Chunks[C->Count++] =
126         Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
127     Stats.sub(StatAllocated, ClassSize);
128     Stats.add(StatFree, ClassSize);
129   }
130 
131   bool isEmpty() const {
132     for (uptr I = 0; I < NumClasses; ++I)
133       if (PerClassArray[I].Count)
134         return false;
135     return true;
136   }
137 
138   void drain() {
139     // Drain BatchClassId last as createBatch can refill it.
140     for (uptr I = 0; I < NumClasses; ++I) {
141       if (I == BatchClassId)
142         continue;
143       while (PerClassArray[I].Count > 0)
144         drain(&PerClassArray[I], I);
145     }
146     while (PerClassArray[BatchClassId].Count > 0)
147       drain(&PerClassArray[BatchClassId], BatchClassId);
148     DCHECK(isEmpty());
149   }
150 
151   TransferBatch *createBatch(uptr ClassId, void *B) {
152     if (ClassId != BatchClassId)
153       B = allocate(BatchClassId);
154     if (UNLIKELY(!B))
155       reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
156     return reinterpret_cast<TransferBatch *>(B);
157   }
158 
159   BatchGroup *createGroup() {
160     void *Ptr = allocate(BatchClassId);
161     if (UNLIKELY(!Ptr))
162       reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
163     return reinterpret_cast<BatchGroup *>(Ptr);
164   }
165 
166   LocalStats &getStats() { return Stats; }
167 
168 private:
169   static const uptr NumClasses = SizeClassMap::NumClasses;
170   static const uptr BatchClassId = SizeClassMap::BatchClassId;
171   struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
172     u16 Count;
173     u16 MaxCount;
174     // Note: ClassSize is zero for the transfer batch.
175     uptr ClassSize;
176     CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
177   };
178   PerClass PerClassArray[NumClasses] = {};
179   LocalStats Stats;
180   SizeClassAllocator *Allocator = nullptr;
181 
182   ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
183     if (LIKELY(C->MaxCount))
184       return;
185     initCache();
186     DCHECK_NE(C->MaxCount, 0U);
187   }
188 
189   NOINLINE void initCache() {
190     for (uptr I = 0; I < NumClasses; I++) {
191       PerClass *P = &PerClassArray[I];
192       const uptr Size = SizeClassAllocator::getSizeByClassId(I);
193       P->MaxCount = static_cast<u16>(2 * TransferBatch::getMaxCached(Size));
194       if (I != BatchClassId) {
195         P->ClassSize = Size;
196       } else {
197         // ClassSize in this struct is only used for malloc/free stats, which
198         // should only track user allocations, not internal movements.
199         P->ClassSize = 0;
200       }
201     }
202   }
203 
204   void destroyBatch(uptr ClassId, void *B) {
205     if (ClassId != BatchClassId)
206       deallocate(BatchClassId, B);
207   }
208 
209   NOINLINE bool refill(PerClass *C, uptr ClassId) {
210     initCacheMaybe(C);
211     TransferBatch *B = Allocator->popBatch(this, ClassId);
212     if (UNLIKELY(!B))
213       return false;
214     DCHECK_GT(B->getCount(), 0);
215     C->Count = B->getCount();
216     B->copyToArray(C->Chunks);
217     B->clear();
218     destroyBatch(ClassId, B);
219     return true;
220   }
221 
222   NOINLINE void drain(PerClass *C, uptr ClassId) {
223     const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
224     Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
225     // u16 will be promoted to int by arithmetic type conversion.
226     C->Count = static_cast<u16>(C->Count - Count);
227     for (u16 I = 0; I < C->Count; I++)
228       C->Chunks[I] = C->Chunks[I + Count];
229   }
230 };
231 
232 } // namespace scudo
233 
234 #endif // SCUDO_LOCAL_CACHE_H_
235