1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_PRIMARY32_H_
10 #define SCUDO_PRIMARY32_H_
11 
12 #include "bytemap.h"
13 #include "common.h"
14 #include "list.h"
15 #include "local_cache.h"
16 #include "release.h"
17 #include "report.h"
18 #include "stats.h"
19 #include "string_utils.h"
20 
21 namespace scudo {
22 
23 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
24 //
25 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
26 // boundary, and keeps a bytemap of the mappable address space to track the size
27 // class they are associated with.
28 //
29 // Mapped regions are split into equally sized Blocks according to the size
30 // class they belong to, and the associated pointers are shuffled to prevent any
31 // predictable address pattern (the predictability increases with the block
32 // size).
33 //
34 // Regions for size class 0 are special and used to hold TransferBatches, which
35 // allow to transfer arrays of pointers from the global size class freelist to
36 // the thread specific freelist for said class, and back.
37 //
38 // Memory used by this allocator is never unmapped but can be partially
39 // reclaimed if the platform allows for it.
40 
41 template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
42 public:
43   typedef SizeClassMapT SizeClassMap;
44   // Regions should be large enough to hold the largest Block.
45   static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
46   typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
47   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
48   typedef typename CacheT::TransferBatch TransferBatch;
49 
50   static uptr getSizeByClassId(uptr ClassId) {
51     return (ClassId == SizeClassMap::BatchClassId)
52                ? sizeof(TransferBatch)
53                : SizeClassMap::getSizeByClassId(ClassId);
54   }
55 
56   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
57 
58   void initLinkerInitialized(s32 ReleaseToOsInterval) {
59     if (SCUDO_FUCHSIA)
60       reportError("SizeClassAllocator32 is not supported on Fuchsia");
61 
62     PossibleRegions.initLinkerInitialized();
63     MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
64 
65     u32 Seed;
66     if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
67       Seed =
68           static_cast<u32>(getMonotonicTime() ^
69                            (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
70     const uptr PageSize = getPageSizeCached();
71     for (uptr I = 0; I < NumClasses; I++) {
72       SizeClassInfo *Sci = getSizeClassInfo(I);
73       Sci->RandState = getRandomU32(&Seed);
74       // See comment in the 64-bit primary about releasing smaller size classes.
75       Sci->CanRelease = (ReleaseToOsInterval >= 0) &&
76                         (I != SizeClassMap::BatchClassId) &&
77                         (getSizeByClassId(I) >= (PageSize / 32));
78     }
79     ReleaseToOsIntervalMs = ReleaseToOsInterval;
80   }
81   void init(s32 ReleaseToOsInterval) {
82     memset(this, 0, sizeof(*this));
83     initLinkerInitialized(ReleaseToOsInterval);
84   }
85 
86   void unmapTestOnly() {
87     while (NumberOfStashedRegions > 0)
88       unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
89             RegionSize);
90     // TODO(kostyak): unmap the TransferBatch regions as well.
91     for (uptr I = 0; I < NumRegions; I++)
92       if (PossibleRegions[I])
93         unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
94     PossibleRegions.unmapTestOnly();
95   }
96 
97   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
98     DCHECK_LT(ClassId, NumClasses);
99     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
100     ScopedLock L(Sci->Mutex);
101     TransferBatch *B = Sci->FreeList.front();
102     if (B) {
103       Sci->FreeList.pop_front();
104     } else {
105       B = populateFreeList(C, ClassId, Sci);
106       if (UNLIKELY(!B))
107         return nullptr;
108     }
109     DCHECK_GT(B->getCount(), 0);
110     Sci->Stats.PoppedBlocks += B->getCount();
111     return B;
112   }
113 
114   void pushBatch(uptr ClassId, TransferBatch *B) {
115     DCHECK_LT(ClassId, NumClasses);
116     DCHECK_GT(B->getCount(), 0);
117     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
118     ScopedLock L(Sci->Mutex);
119     Sci->FreeList.push_front(B);
120     Sci->Stats.PushedBlocks += B->getCount();
121     if (Sci->CanRelease)
122       releaseToOSMaybe(Sci, ClassId);
123   }
124 
125   void disable() {
126     // The BatchClassId must be locked last since other classes can use it.
127     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
128       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
129         continue;
130       getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
131     }
132     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
133     RegionsStashMutex.lock();
134     PossibleRegions.disable();
135   }
136 
137   void enable() {
138     PossibleRegions.enable();
139     RegionsStashMutex.unlock();
140     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
141     for (uptr I = 0; I < NumClasses; I++) {
142       if (I == SizeClassMap::BatchClassId)
143         continue;
144       getSizeClassInfo(I)->Mutex.unlock();
145     }
146   }
147 
148   template <typename F> void iterateOverBlocks(F Callback) {
149     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
150       if (PossibleRegions[I]) {
151         const uptr BlockSize = getSizeByClassId(PossibleRegions[I]);
152         const uptr From = I * RegionSize;
153         const uptr To = From + (RegionSize / BlockSize) * BlockSize;
154         for (uptr Block = From; Block < To; Block += BlockSize)
155           Callback(Block);
156       }
157   }
158 
159   void getStats(ScopedString *Str) {
160     // TODO(kostyak): get the RSS per region.
161     uptr TotalMapped = 0;
162     uptr PoppedBlocks = 0;
163     uptr PushedBlocks = 0;
164     for (uptr I = 0; I < NumClasses; I++) {
165       SizeClassInfo *Sci = getSizeClassInfo(I);
166       TotalMapped += Sci->AllocatedUser;
167       PoppedBlocks += Sci->Stats.PoppedBlocks;
168       PushedBlocks += Sci->Stats.PushedBlocks;
169     }
170     Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
171                 "remains %zu\n",
172                 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
173     for (uptr I = 0; I < NumClasses; I++)
174       getStats(Str, I, 0);
175   }
176 
177   uptr releaseToOS() {
178     uptr TotalReleasedBytes = 0;
179     for (uptr I = 0; I < NumClasses; I++) {
180       if (I == SizeClassMap::BatchClassId)
181         continue;
182       SizeClassInfo *Sci = getSizeClassInfo(I);
183       ScopedLock L(Sci->Mutex);
184       TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
185     }
186     return TotalReleasedBytes;
187   }
188 
189 private:
190   static const uptr NumClasses = SizeClassMap::NumClasses;
191   static const uptr RegionSize = 1UL << RegionSizeLog;
192   static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog;
193 #if SCUDO_WORDSIZE == 32U
194   typedef FlatByteMap<NumRegions> ByteMap;
195 #else
196   typedef TwoLevelByteMap<(NumRegions >> 12), 1UL << 12> ByteMap;
197 #endif
198 
199   struct SizeClassStats {
200     uptr PoppedBlocks;
201     uptr PushedBlocks;
202   };
203 
204   struct ReleaseToOsInfo {
205     uptr PushedBlocksAtLastRelease;
206     uptr RangesReleased;
207     uptr LastReleasedBytes;
208     u64 LastReleaseAtNs;
209   };
210 
211   struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
212     HybridMutex Mutex;
213     SinglyLinkedList<TransferBatch> FreeList;
214     SizeClassStats Stats;
215     bool CanRelease;
216     u32 RandState;
217     uptr AllocatedUser;
218     ReleaseToOsInfo ReleaseInfo;
219   };
220   static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
221 
222   uptr computeRegionId(uptr Mem) {
223     const uptr Id = Mem >> RegionSizeLog;
224     CHECK_LT(Id, NumRegions);
225     return Id;
226   }
227 
228   uptr allocateRegionSlow() {
229     uptr MapSize = 2 * RegionSize;
230     const uptr MapBase = reinterpret_cast<uptr>(
231         map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
232     if (UNLIKELY(!MapBase))
233       return 0;
234     const uptr MapEnd = MapBase + MapSize;
235     uptr Region = MapBase;
236     if (isAligned(Region, RegionSize)) {
237       ScopedLock L(RegionsStashMutex);
238       if (NumberOfStashedRegions < MaxStashedRegions)
239         RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
240       else
241         MapSize = RegionSize;
242     } else {
243       Region = roundUpTo(MapBase, RegionSize);
244       unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
245       MapSize = RegionSize;
246     }
247     const uptr End = Region + MapSize;
248     if (End != MapEnd)
249       unmap(reinterpret_cast<void *>(End), MapEnd - End);
250     return Region;
251   }
252 
253   uptr allocateRegion(uptr ClassId) {
254     DCHECK_LT(ClassId, NumClasses);
255     uptr Region = 0;
256     {
257       ScopedLock L(RegionsStashMutex);
258       if (NumberOfStashedRegions > 0)
259         Region = RegionsStash[--NumberOfStashedRegions];
260     }
261     if (!Region)
262       Region = allocateRegionSlow();
263     if (LIKELY(Region)) {
264       if (ClassId) {
265         const uptr RegionIndex = computeRegionId(Region);
266         if (RegionIndex < MinRegionIndex)
267           MinRegionIndex = RegionIndex;
268         if (RegionIndex > MaxRegionIndex)
269           MaxRegionIndex = RegionIndex;
270         PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId));
271       }
272     }
273     return Region;
274   }
275 
276   SizeClassInfo *getSizeClassInfo(uptr ClassId) {
277     DCHECK_LT(ClassId, NumClasses);
278     return &SizeClassInfoArray[ClassId];
279   }
280 
281   bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId,
282                        TransferBatch **CurrentBatch, u32 MaxCount,
283                        void **PointersArray, u32 Count) {
284     if (ClassId != SizeClassMap::BatchClassId)
285       shuffle(PointersArray, Count, &Sci->RandState);
286     TransferBatch *B = *CurrentBatch;
287     for (uptr I = 0; I < Count; I++) {
288       if (B && B->getCount() == MaxCount) {
289         Sci->FreeList.push_back(B);
290         B = nullptr;
291       }
292       if (!B) {
293         B = C->createBatch(ClassId, PointersArray[I]);
294         if (UNLIKELY(!B))
295           return false;
296         B->clear();
297       }
298       B->add(PointersArray[I]);
299     }
300     *CurrentBatch = B;
301     return true;
302   }
303 
304   NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
305                                            SizeClassInfo *Sci) {
306     const uptr Region = allocateRegion(ClassId);
307     if (UNLIKELY(!Region))
308       return nullptr;
309     C->getStats().add(StatMapped, RegionSize);
310     const uptr Size = getSizeByClassId(ClassId);
311     const u32 MaxCount = TransferBatch::getMaxCached(Size);
312     DCHECK_GT(MaxCount, 0);
313     const uptr NumberOfBlocks = RegionSize / Size;
314     DCHECK_GT(NumberOfBlocks, 0);
315     TransferBatch *B = nullptr;
316     constexpr u32 ShuffleArraySize = 8U * TransferBatch::MaxNumCached;
317     void *ShuffleArray[ShuffleArraySize];
318     u32 Count = 0;
319     const uptr AllocatedUser = Size * NumberOfBlocks;
320     for (uptr I = Region; I < Region + AllocatedUser; I += Size) {
321       ShuffleArray[Count++] = reinterpret_cast<void *>(I);
322       if (Count == ShuffleArraySize) {
323         if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount,
324                                       ShuffleArray, Count)))
325           return nullptr;
326         Count = 0;
327       }
328     }
329     if (Count) {
330       if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray,
331                                     Count)))
332         return nullptr;
333     }
334     DCHECK(B);
335     if (!Sci->FreeList.empty()) {
336       Sci->FreeList.push_back(B);
337       B = Sci->FreeList.front();
338       Sci->FreeList.pop_front();
339     }
340     DCHECK_GT(B->getCount(), 0);
341 
342     C->getStats().add(StatFree, AllocatedUser);
343     Sci->AllocatedUser += AllocatedUser;
344     if (Sci->CanRelease)
345       Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
346     return B;
347   }
348 
349   void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
350     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
351     if (Sci->AllocatedUser == 0)
352       return;
353     const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
354     const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
355     Str->append("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
356                 "inuse: %6zu avail: %6zu rss: %6zuK\n",
357                 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
358                 Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
359                 AvailableChunks, Rss >> 10);
360   }
361 
362   NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
363                                  bool Force = false) {
364     const uptr BlockSize = getSizeByClassId(ClassId);
365     const uptr PageSize = getPageSizeCached();
366 
367     CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
368     const uptr BytesInFreeList =
369         Sci->AllocatedUser -
370         (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
371     if (BytesInFreeList < PageSize)
372       return 0; // No chance to release anything.
373     if ((Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
374             BlockSize <
375         PageSize) {
376       return 0; // Nothing new to release.
377     }
378 
379     if (!Force) {
380       const s32 IntervalMs = ReleaseToOsIntervalMs;
381       if (IntervalMs < 0)
382         return 0;
383       if (Sci->ReleaseInfo.LastReleaseAtNs +
384               static_cast<uptr>(IntervalMs) * 1000000ULL >
385           getMonotonicTime()) {
386         return 0; // Memory was returned recently.
387       }
388     }
389 
390     // TODO(kostyak): currently not ideal as we loop over all regions and
391     // iterate multiple times over the same freelist if a ClassId spans multiple
392     // regions. But it will have to do for now.
393     uptr TotalReleasedBytes = 0;
394     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
395       if (PossibleRegions[I] == ClassId) {
396         ReleaseRecorder Recorder(I * RegionSize);
397         releaseFreeMemoryToOS(Sci->FreeList, I * RegionSize,
398                               RegionSize / PageSize, BlockSize, &Recorder);
399         if (Recorder.getReleasedRangesCount() > 0) {
400           Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
401           Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
402           Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
403           TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
404         }
405       }
406     }
407     Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
408     return TotalReleasedBytes;
409   }
410 
411   SizeClassInfo SizeClassInfoArray[NumClasses];
412 
413   ByteMap PossibleRegions;
414   // Keep track of the lowest & highest regions allocated to avoid looping
415   // through the whole NumRegions.
416   uptr MinRegionIndex;
417   uptr MaxRegionIndex;
418   s32 ReleaseToOsIntervalMs;
419   // Unless several threads request regions simultaneously from different size
420   // classes, the stash rarely contains more than 1 entry.
421   static constexpr uptr MaxStashedRegions = 4;
422   HybridMutex RegionsStashMutex;
423   uptr NumberOfStashedRegions;
424   uptr RegionsStash[MaxStashedRegions];
425 };
426 
427 } // namespace scudo
428 
429 #endif // SCUDO_PRIMARY32_H_
430