1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_PRIMARY32_H_
10 #define SCUDO_PRIMARY32_H_
11 
12 #include "bytemap.h"
13 #include "common.h"
14 #include "list.h"
15 #include "local_cache.h"
16 #include "options.h"
17 #include "release.h"
18 #include "report.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 
22 namespace scudo {
23 
24 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
25 //
26 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
27 // boundary, and keeps a bytemap of the mappable address space to track the size
28 // class they are associated with.
29 //
30 // Mapped regions are split into equally sized Blocks according to the size
31 // class they belong to, and the associated pointers are shuffled to prevent any
32 // predictable address pattern (the predictability increases with the block
33 // size).
34 //
35 // Regions for size class 0 are special and used to hold TransferBatches, which
36 // allow to transfer arrays of pointers from the global size class freelist to
37 // the thread specific freelist for said class, and back.
38 //
39 // Memory used by this allocator is never unmapped but can be partially
40 // reclaimed if the platform allows for it.
41 
42 template <typename Config> class SizeClassAllocator32 {
43 public:
44   typedef typename Config::SizeClassMap SizeClassMap;
45   // The bytemap can only track UINT8_MAX - 1 classes.
46   static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
47   // Regions should be large enough to hold the largest Block.
48   static_assert((1UL << Config::PrimaryRegionSizeLog) >= SizeClassMap::MaxSize,
49                 "");
50   typedef SizeClassAllocator32<Config> ThisT;
51   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
52   typedef typename CacheT::TransferBatch TransferBatch;
53 
54   static uptr getSizeByClassId(uptr ClassId) {
55     return (ClassId == SizeClassMap::BatchClassId)
56                ? sizeof(TransferBatch)
57                : SizeClassMap::getSizeByClassId(ClassId);
58   }
59 
60   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
61 
62   void initLinkerInitialized(s32 ReleaseToOsInterval) {
63     if (SCUDO_FUCHSIA)
64       reportError("SizeClassAllocator32 is not supported on Fuchsia");
65 
66     PossibleRegions.initLinkerInitialized();
67 
68     u32 Seed;
69     const u64 Time = getMonotonicTime();
70     if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
71       Seed = static_cast<u32>(
72           Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
73     for (uptr I = 0; I < NumClasses; I++) {
74       SizeClassInfo *Sci = getSizeClassInfo(I);
75       Sci->RandState = getRandomU32(&Seed);
76       // Sci->MaxRegionIndex is already initialized to 0.
77       Sci->MinRegionIndex = NumRegions;
78       Sci->ReleaseInfo.LastReleaseAtNs = Time;
79     }
80     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
81   }
82   void init(s32 ReleaseToOsInterval) {
83     memset(this, 0, sizeof(*this));
84     initLinkerInitialized(ReleaseToOsInterval);
85   }
86 
87   void unmapTestOnly() {
88     while (NumberOfStashedRegions > 0)
89       unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
90             RegionSize);
91     uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
92     for (uptr I = 0; I < NumClasses; I++) {
93       SizeClassInfo *Sci = getSizeClassInfo(I);
94       if (Sci->MinRegionIndex < MinRegionIndex)
95         MinRegionIndex = Sci->MinRegionIndex;
96       if (Sci->MaxRegionIndex > MaxRegionIndex)
97         MaxRegionIndex = Sci->MaxRegionIndex;
98     }
99     for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
100       if (PossibleRegions[I])
101         unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
102     PossibleRegions.unmapTestOnly();
103   }
104 
105   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
106     DCHECK_LT(ClassId, NumClasses);
107     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
108     ScopedLock L(Sci->Mutex);
109     TransferBatch *B = Sci->FreeList.front();
110     if (B) {
111       Sci->FreeList.pop_front();
112     } else {
113       B = populateFreeList(C, ClassId, Sci);
114       if (UNLIKELY(!B))
115         return nullptr;
116     }
117     DCHECK_GT(B->getCount(), 0);
118     Sci->Stats.PoppedBlocks += B->getCount();
119     return B;
120   }
121 
122   void pushBatch(uptr ClassId, TransferBatch *B) {
123     DCHECK_LT(ClassId, NumClasses);
124     DCHECK_GT(B->getCount(), 0);
125     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
126     ScopedLock L(Sci->Mutex);
127     Sci->FreeList.push_front(B);
128     Sci->Stats.PushedBlocks += B->getCount();
129     if (ClassId != SizeClassMap::BatchClassId)
130       releaseToOSMaybe(Sci, ClassId);
131   }
132 
133   void disable() {
134     // The BatchClassId must be locked last since other classes can use it.
135     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
136       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
137         continue;
138       getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
139     }
140     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
141     RegionsStashMutex.lock();
142     PossibleRegions.disable();
143   }
144 
145   void enable() {
146     PossibleRegions.enable();
147     RegionsStashMutex.unlock();
148     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
149     for (uptr I = 0; I < NumClasses; I++) {
150       if (I == SizeClassMap::BatchClassId)
151         continue;
152       getSizeClassInfo(I)->Mutex.unlock();
153     }
154   }
155 
156   template <typename F> void iterateOverBlocks(F Callback) {
157     uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
158     for (uptr I = 0; I < NumClasses; I++) {
159       SizeClassInfo *Sci = getSizeClassInfo(I);
160       if (Sci->MinRegionIndex < MinRegionIndex)
161         MinRegionIndex = Sci->MinRegionIndex;
162       if (Sci->MaxRegionIndex > MaxRegionIndex)
163         MaxRegionIndex = Sci->MaxRegionIndex;
164     }
165     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
166       if (PossibleRegions[I] &&
167           (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
168         const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
169         const uptr From = I * RegionSize;
170         const uptr To = From + (RegionSize / BlockSize) * BlockSize;
171         for (uptr Block = From; Block < To; Block += BlockSize)
172           Callback(Block);
173       }
174   }
175 
176   void getStats(ScopedString *Str) {
177     // TODO(kostyak): get the RSS per region.
178     uptr TotalMapped = 0;
179     uptr PoppedBlocks = 0;
180     uptr PushedBlocks = 0;
181     for (uptr I = 0; I < NumClasses; I++) {
182       SizeClassInfo *Sci = getSizeClassInfo(I);
183       TotalMapped += Sci->AllocatedUser;
184       PoppedBlocks += Sci->Stats.PoppedBlocks;
185       PushedBlocks += Sci->Stats.PushedBlocks;
186     }
187     Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
188                 "remains %zu\n",
189                 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
190     for (uptr I = 0; I < NumClasses; I++)
191       getStats(Str, I, 0);
192   }
193 
194   bool setOption(Option O, sptr Value) {
195     if (O == Option::ReleaseInterval) {
196       const s32 Interval = Max(
197           Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
198           Config::PrimaryMinReleaseToOsIntervalMs);
199       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
200       return true;
201     }
202     // Not supported by the Primary, but not an error either.
203     return true;
204   }
205 
206   uptr releaseToOS() {
207     uptr TotalReleasedBytes = 0;
208     for (uptr I = 0; I < NumClasses; I++) {
209       if (I == SizeClassMap::BatchClassId)
210         continue;
211       SizeClassInfo *Sci = getSizeClassInfo(I);
212       ScopedLock L(Sci->Mutex);
213       TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
214     }
215     return TotalReleasedBytes;
216   }
217 
218   const char *getRegionInfoArrayAddress() const { return nullptr; }
219   static uptr getRegionInfoArraySize() { return 0; }
220 
221   static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
222                                     UNUSED uptr Ptr) {
223     return {};
224   }
225 
226   AtomicOptions Options;
227 
228 private:
229   static const uptr NumClasses = SizeClassMap::NumClasses;
230   static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
231   static const uptr NumRegions =
232       SCUDO_MMAP_RANGE_SIZE >> Config::PrimaryRegionSizeLog;
233   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
234   typedef FlatByteMap<NumRegions> ByteMap;
235 
236   struct SizeClassStats {
237     uptr PoppedBlocks;
238     uptr PushedBlocks;
239   };
240 
241   struct ReleaseToOsInfo {
242     uptr PushedBlocksAtLastRelease;
243     uptr RangesReleased;
244     uptr LastReleasedBytes;
245     u64 LastReleaseAtNs;
246   };
247 
248   struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
249     HybridMutex Mutex;
250     SinglyLinkedList<TransferBatch> FreeList;
251     uptr CurrentRegion;
252     uptr CurrentRegionAllocated;
253     SizeClassStats Stats;
254     u32 RandState;
255     uptr AllocatedUser;
256     // Lowest & highest region index allocated for this size class, to avoid
257     // looping through the whole NumRegions.
258     uptr MinRegionIndex;
259     uptr MaxRegionIndex;
260     ReleaseToOsInfo ReleaseInfo;
261   };
262   static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
263 
264   uptr computeRegionId(uptr Mem) {
265     const uptr Id = Mem >> Config::PrimaryRegionSizeLog;
266     CHECK_LT(Id, NumRegions);
267     return Id;
268   }
269 
270   uptr allocateRegionSlow() {
271     uptr MapSize = 2 * RegionSize;
272     const uptr MapBase = reinterpret_cast<uptr>(
273         map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
274     if (!MapBase)
275       return 0;
276     const uptr MapEnd = MapBase + MapSize;
277     uptr Region = MapBase;
278     if (isAligned(Region, RegionSize)) {
279       ScopedLock L(RegionsStashMutex);
280       if (NumberOfStashedRegions < MaxStashedRegions)
281         RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
282       else
283         MapSize = RegionSize;
284     } else {
285       Region = roundUpTo(MapBase, RegionSize);
286       unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
287       MapSize = RegionSize;
288     }
289     const uptr End = Region + MapSize;
290     if (End != MapEnd)
291       unmap(reinterpret_cast<void *>(End), MapEnd - End);
292     return Region;
293   }
294 
295   uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
296     DCHECK_LT(ClassId, NumClasses);
297     uptr Region = 0;
298     {
299       ScopedLock L(RegionsStashMutex);
300       if (NumberOfStashedRegions > 0)
301         Region = RegionsStash[--NumberOfStashedRegions];
302     }
303     if (!Region)
304       Region = allocateRegionSlow();
305     if (LIKELY(Region)) {
306       // Sci->Mutex is held by the caller, updating the Min/Max is safe.
307       const uptr RegionIndex = computeRegionId(Region);
308       if (RegionIndex < Sci->MinRegionIndex)
309         Sci->MinRegionIndex = RegionIndex;
310       if (RegionIndex > Sci->MaxRegionIndex)
311         Sci->MaxRegionIndex = RegionIndex;
312       PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
313     }
314     return Region;
315   }
316 
317   SizeClassInfo *getSizeClassInfo(uptr ClassId) {
318     DCHECK_LT(ClassId, NumClasses);
319     return &SizeClassInfoArray[ClassId];
320   }
321 
322   NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
323                                            SizeClassInfo *Sci) {
324     uptr Region;
325     uptr Offset;
326     // If the size-class currently has a region associated to it, use it. The
327     // newly created blocks will be located after the currently allocated memory
328     // for that region (up to RegionSize). Otherwise, create a new region, where
329     // the new blocks will be carved from the beginning.
330     if (Sci->CurrentRegion) {
331       Region = Sci->CurrentRegion;
332       DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
333       Offset = Sci->CurrentRegionAllocated;
334     } else {
335       DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
336       Region = allocateRegion(Sci, ClassId);
337       if (UNLIKELY(!Region))
338         return nullptr;
339       C->getStats().add(StatMapped, RegionSize);
340       Sci->CurrentRegion = Region;
341       Offset = 0;
342     }
343 
344     const uptr Size = getSizeByClassId(ClassId);
345     const u32 MaxCount = TransferBatch::getMaxCached(Size);
346     DCHECK_GT(MaxCount, 0U);
347     // The maximum number of blocks we should carve in the region is dictated
348     // by the maximum number of batches we want to fill, and the amount of
349     // memory left in the current region (we use the lowest of the two). This
350     // will not be 0 as we ensure that a region can at least hold one block (via
351     // static_assert and at the end of this function).
352     const u32 NumberOfBlocks =
353         Min(MaxNumBatches * MaxCount,
354             static_cast<u32>((RegionSize - Offset) / Size));
355     DCHECK_GT(NumberOfBlocks, 0U);
356 
357     constexpr u32 ShuffleArraySize =
358         MaxNumBatches * TransferBatch::MaxNumCached;
359     // Fill the transfer batches and put them in the size-class freelist. We
360     // need to randomize the blocks for security purposes, so we first fill a
361     // local array that we then shuffle before populating the batches.
362     void *ShuffleArray[ShuffleArraySize];
363     DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
364 
365     uptr P = Region + Offset;
366     for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
367       ShuffleArray[I] = reinterpret_cast<void *>(P);
368     // No need to shuffle the batches size class.
369     if (ClassId != SizeClassMap::BatchClassId)
370       shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState);
371     for (u32 I = 0; I < NumberOfBlocks;) {
372       TransferBatch *B = C->createBatch(ClassId, ShuffleArray[I]);
373       if (UNLIKELY(!B))
374         return nullptr;
375       const u32 N = Min(MaxCount, NumberOfBlocks - I);
376       B->setFromArray(&ShuffleArray[I], N);
377       Sci->FreeList.push_back(B);
378       I += N;
379     }
380     TransferBatch *B = Sci->FreeList.front();
381     Sci->FreeList.pop_front();
382     DCHECK(B);
383     DCHECK_GT(B->getCount(), 0);
384 
385     const uptr AllocatedUser = Size * NumberOfBlocks;
386     C->getStats().add(StatFree, AllocatedUser);
387     DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
388     // If there is not enough room in the region currently associated to fit
389     // more blocks, we deassociate the region by resetting CurrentRegion and
390     // CurrentRegionAllocated. Otherwise, update the allocated amount.
391     if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
392       Sci->CurrentRegion = 0;
393       Sci->CurrentRegionAllocated = 0;
394     } else {
395       Sci->CurrentRegionAllocated += AllocatedUser;
396     }
397     Sci->AllocatedUser += AllocatedUser;
398 
399     return B;
400   }
401 
402   void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
403     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
404     if (Sci->AllocatedUser == 0)
405       return;
406     const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
407     const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
408     Str->append("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
409                 "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
410                 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
411                 Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
412                 AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
413   }
414 
415   NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
416                                  bool Force = false) {
417     const uptr BlockSize = getSizeByClassId(ClassId);
418     const uptr PageSize = getPageSizeCached();
419 
420     DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
421     const uptr BytesInFreeList =
422         Sci->AllocatedUser -
423         (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
424     if (BytesInFreeList < PageSize)
425       return 0; // No chance to release anything.
426     const uptr BytesPushed =
427         (Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
428         BlockSize;
429     if (BytesPushed < PageSize)
430       return 0; // Nothing new to release.
431 
432     // Releasing smaller blocks is expensive, so we want to make sure that a
433     // significant amount of bytes are free, and that there has been a good
434     // amount of batches pushed to the freelist before attempting to release.
435     if (BlockSize < PageSize / 16U) {
436       if (!Force && BytesPushed < Sci->AllocatedUser / 16U)
437         return 0;
438       // We want 8x% to 9x% free bytes (the larger the bock, the lower the %).
439       if ((BytesInFreeList * 100U) / Sci->AllocatedUser <
440           (100U - 1U - BlockSize / 16U))
441         return 0;
442     }
443 
444     if (!Force) {
445       const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
446       if (IntervalMs < 0)
447         return 0;
448       if (Sci->ReleaseInfo.LastReleaseAtNs +
449               static_cast<u64>(IntervalMs) * 1000000 >
450           getMonotonicTime()) {
451         return 0; // Memory was returned recently.
452       }
453     }
454 
455     const uptr First = Sci->MinRegionIndex;
456     const uptr Last = Sci->MaxRegionIndex;
457     DCHECK_NE(Last, 0U);
458     DCHECK_LE(First, Last);
459     uptr TotalReleasedBytes = 0;
460     const uptr Base = First * RegionSize;
461     const uptr NumberOfRegions = Last - First + 1U;
462     ReleaseRecorder Recorder(Base);
463     auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
464       return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
465     };
466     releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
467                           BlockSize, &Recorder, SkipRegion);
468     if (Recorder.getReleasedRangesCount() > 0) {
469       Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
470       Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
471       Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
472       TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
473     }
474     Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
475 
476     return TotalReleasedBytes;
477   }
478 
479   SizeClassInfo SizeClassInfoArray[NumClasses];
480 
481   // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
482   ByteMap PossibleRegions;
483   atomic_s32 ReleaseToOsIntervalMs;
484   // Unless several threads request regions simultaneously from different size
485   // classes, the stash rarely contains more than 1 entry.
486   static constexpr uptr MaxStashedRegions = 4;
487   HybridMutex RegionsStashMutex;
488   uptr NumberOfStashedRegions;
489   uptr RegionsStash[MaxStashedRegions];
490 };
491 
492 } // namespace scudo
493 
494 #endif // SCUDO_PRIMARY32_H_
495