1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_PRIMARY32_H_
10 #define SCUDO_PRIMARY32_H_
11 
12 #include "bytemap.h"
13 #include "common.h"
14 #include "list.h"
15 #include "local_cache.h"
16 #include "release.h"
17 #include "report.h"
18 #include "stats.h"
19 #include "string_utils.h"
20 
21 namespace scudo {
22 
23 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
24 //
25 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
26 // boundary, and keeps a bytemap of the mappable address space to track the size
27 // class they are associated with.
28 //
29 // Mapped regions are split into equally sized Blocks according to the size
30 // class they belong to, and the associated pointers are shuffled to prevent any
31 // predictable address pattern (the predictability increases with the block
32 // size).
33 //
34 // Regions for size class 0 are special and used to hold TransferBatches, which
35 // allow to transfer arrays of pointers from the global size class freelist to
36 // the thread specific freelist for said class, and back.
37 //
38 // Memory used by this allocator is never unmapped but can be partially
39 // reclaimed if the platform allows for it.
40 
41 template <class SizeClassMapT, uptr RegionSizeLog,
42           s32 MinReleaseToOsIntervalMs = INT32_MIN,
43           s32 MaxReleaseToOsIntervalMs = INT32_MAX>
44 class SizeClassAllocator32 {
45 public:
46   typedef SizeClassMapT SizeClassMap;
47   // The bytemap can only track UINT8_MAX - 1 classes.
48   static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
49   // Regions should be large enough to hold the largest Block.
50   static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
51   typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog,
52                                MinReleaseToOsIntervalMs,
53                                MaxReleaseToOsIntervalMs>
54       ThisT;
55   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
56   typedef typename CacheT::TransferBatch TransferBatch;
57   static const bool SupportsMemoryTagging = false;
58 
59   static uptr getSizeByClassId(uptr ClassId) {
60     return (ClassId == SizeClassMap::BatchClassId)
61                ? sizeof(TransferBatch)
62                : SizeClassMap::getSizeByClassId(ClassId);
63   }
64 
65   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
66 
67   void initLinkerInitialized(s32 ReleaseToOsInterval) {
68     if (SCUDO_FUCHSIA)
69       reportError("SizeClassAllocator32 is not supported on Fuchsia");
70 
71     PossibleRegions.initLinkerInitialized();
72     MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
73 
74     u32 Seed;
75     const u64 Time = getMonotonicTime();
76     if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
77       Seed = static_cast<u32>(
78           Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
79     const uptr PageSize = getPageSizeCached();
80     for (uptr I = 0; I < NumClasses; I++) {
81       SizeClassInfo *Sci = getSizeClassInfo(I);
82       Sci->RandState = getRandomU32(&Seed);
83       // See comment in the 64-bit primary about releasing smaller size classes.
84       Sci->CanRelease = (I != SizeClassMap::BatchClassId) &&
85                         (getSizeByClassId(I) >= (PageSize / 32));
86       if (Sci->CanRelease)
87         Sci->ReleaseInfo.LastReleaseAtNs = Time;
88     }
89     setReleaseToOsIntervalMs(ReleaseToOsInterval);
90   }
91   void init(s32 ReleaseToOsInterval) {
92     memset(this, 0, sizeof(*this));
93     initLinkerInitialized(ReleaseToOsInterval);
94   }
95 
96   void unmapTestOnly() {
97     while (NumberOfStashedRegions > 0)
98       unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
99             RegionSize);
100     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
101       if (PossibleRegions[I])
102         unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
103     PossibleRegions.unmapTestOnly();
104   }
105 
106   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
107     DCHECK_LT(ClassId, NumClasses);
108     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
109     ScopedLock L(Sci->Mutex);
110     TransferBatch *B = Sci->FreeList.front();
111     if (B) {
112       Sci->FreeList.pop_front();
113     } else {
114       B = populateFreeList(C, ClassId, Sci);
115       if (UNLIKELY(!B))
116         return nullptr;
117     }
118     DCHECK_GT(B->getCount(), 0);
119     Sci->Stats.PoppedBlocks += B->getCount();
120     return B;
121   }
122 
123   void pushBatch(uptr ClassId, TransferBatch *B) {
124     DCHECK_LT(ClassId, NumClasses);
125     DCHECK_GT(B->getCount(), 0);
126     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
127     ScopedLock L(Sci->Mutex);
128     Sci->FreeList.push_front(B);
129     Sci->Stats.PushedBlocks += B->getCount();
130     if (Sci->CanRelease)
131       releaseToOSMaybe(Sci, ClassId);
132   }
133 
134   void disable() {
135     // The BatchClassId must be locked last since other classes can use it.
136     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
137       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
138         continue;
139       getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
140     }
141     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
142     RegionsStashMutex.lock();
143     PossibleRegions.disable();
144   }
145 
146   void enable() {
147     PossibleRegions.enable();
148     RegionsStashMutex.unlock();
149     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
150     for (uptr I = 0; I < NumClasses; I++) {
151       if (I == SizeClassMap::BatchClassId)
152         continue;
153       getSizeClassInfo(I)->Mutex.unlock();
154     }
155   }
156 
157   template <typename F> void iterateOverBlocks(F Callback) {
158     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
159       if (PossibleRegions[I] &&
160           (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
161         const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
162         const uptr From = I * RegionSize;
163         const uptr To = From + (RegionSize / BlockSize) * BlockSize;
164         for (uptr Block = From; Block < To; Block += BlockSize)
165           Callback(Block);
166       }
167   }
168 
169   void getStats(ScopedString *Str) {
170     // TODO(kostyak): get the RSS per region.
171     uptr TotalMapped = 0;
172     uptr PoppedBlocks = 0;
173     uptr PushedBlocks = 0;
174     for (uptr I = 0; I < NumClasses; I++) {
175       SizeClassInfo *Sci = getSizeClassInfo(I);
176       TotalMapped += Sci->AllocatedUser;
177       PoppedBlocks += Sci->Stats.PoppedBlocks;
178       PushedBlocks += Sci->Stats.PushedBlocks;
179     }
180     Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
181                 "remains %zu\n",
182                 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
183     for (uptr I = 0; I < NumClasses; I++)
184       getStats(Str, I, 0);
185   }
186 
187   void setReleaseToOsIntervalMs(s32 Interval) {
188     if (Interval >= MaxReleaseToOsIntervalMs) {
189       Interval = MaxReleaseToOsIntervalMs;
190     } else if (Interval <= MinReleaseToOsIntervalMs) {
191       Interval = MinReleaseToOsIntervalMs;
192     }
193     atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
194   }
195 
196   uptr releaseToOS() {
197     uptr TotalReleasedBytes = 0;
198     for (uptr I = 0; I < NumClasses; I++) {
199       SizeClassInfo *Sci = getSizeClassInfo(I);
200       ScopedLock L(Sci->Mutex);
201       TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
202     }
203     return TotalReleasedBytes;
204   }
205 
206   bool useMemoryTagging() { return false; }
207   void disableMemoryTagging() {}
208 
209   const char *getRegionInfoArrayAddress() const { return nullptr; }
210   static uptr getRegionInfoArraySize() { return 0; }
211 
212   static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
213     (void)RegionInfoData;
214     (void)Ptr;
215     return {};
216   }
217 
218 private:
219   static const uptr NumClasses = SizeClassMap::NumClasses;
220   static const uptr RegionSize = 1UL << RegionSizeLog;
221   static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog;
222   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
223   typedef FlatByteMap<NumRegions> ByteMap;
224 
225   struct SizeClassStats {
226     uptr PoppedBlocks;
227     uptr PushedBlocks;
228   };
229 
230   struct ReleaseToOsInfo {
231     uptr PushedBlocksAtLastRelease;
232     uptr RangesReleased;
233     uptr LastReleasedBytes;
234     u64 LastReleaseAtNs;
235   };
236 
237   struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
238     HybridMutex Mutex;
239     SinglyLinkedList<TransferBatch> FreeList;
240     uptr CurrentRegion;
241     uptr CurrentRegionAllocated;
242     SizeClassStats Stats;
243     bool CanRelease;
244     u32 RandState;
245     uptr AllocatedUser;
246     ReleaseToOsInfo ReleaseInfo;
247   };
248   static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
249 
250   uptr computeRegionId(uptr Mem) {
251     const uptr Id = Mem >> RegionSizeLog;
252     CHECK_LT(Id, NumRegions);
253     return Id;
254   }
255 
256   uptr allocateRegionSlow() {
257     uptr MapSize = 2 * RegionSize;
258     const uptr MapBase = reinterpret_cast<uptr>(
259         map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
260     if (UNLIKELY(!MapBase))
261       return 0;
262     const uptr MapEnd = MapBase + MapSize;
263     uptr Region = MapBase;
264     if (isAligned(Region, RegionSize)) {
265       ScopedLock L(RegionsStashMutex);
266       if (NumberOfStashedRegions < MaxStashedRegions)
267         RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
268       else
269         MapSize = RegionSize;
270     } else {
271       Region = roundUpTo(MapBase, RegionSize);
272       unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
273       MapSize = RegionSize;
274     }
275     const uptr End = Region + MapSize;
276     if (End != MapEnd)
277       unmap(reinterpret_cast<void *>(End), MapEnd - End);
278     return Region;
279   }
280 
281   uptr allocateRegion(uptr ClassId) {
282     DCHECK_LT(ClassId, NumClasses);
283     uptr Region = 0;
284     {
285       ScopedLock L(RegionsStashMutex);
286       if (NumberOfStashedRegions > 0)
287         Region = RegionsStash[--NumberOfStashedRegions];
288     }
289     if (!Region)
290       Region = allocateRegionSlow();
291     if (LIKELY(Region)) {
292       const uptr RegionIndex = computeRegionId(Region);
293       if (RegionIndex < MinRegionIndex)
294         MinRegionIndex = RegionIndex;
295       if (RegionIndex > MaxRegionIndex)
296         MaxRegionIndex = RegionIndex;
297       PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
298     }
299     return Region;
300   }
301 
302   SizeClassInfo *getSizeClassInfo(uptr ClassId) {
303     DCHECK_LT(ClassId, NumClasses);
304     return &SizeClassInfoArray[ClassId];
305   }
306 
307   bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId,
308                        TransferBatch **CurrentBatch, u32 MaxCount,
309                        void **PointersArray, u32 Count) {
310     if (ClassId != SizeClassMap::BatchClassId)
311       shuffle(PointersArray, Count, &Sci->RandState);
312     TransferBatch *B = *CurrentBatch;
313     for (uptr I = 0; I < Count; I++) {
314       if (B && B->getCount() == MaxCount) {
315         Sci->FreeList.push_back(B);
316         B = nullptr;
317       }
318       if (!B) {
319         B = C->createBatch(ClassId, PointersArray[I]);
320         if (UNLIKELY(!B))
321           return false;
322         B->clear();
323       }
324       B->add(PointersArray[I]);
325     }
326     *CurrentBatch = B;
327     return true;
328   }
329 
330   NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
331                                            SizeClassInfo *Sci) {
332     uptr Region;
333     uptr Offset;
334     // If the size-class currently has a region associated to it, use it. The
335     // newly created blocks will be located after the currently allocated memory
336     // for that region (up to RegionSize). Otherwise, create a new region, where
337     // the new blocks will be carved from the beginning.
338     if (Sci->CurrentRegion) {
339       Region = Sci->CurrentRegion;
340       DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
341       Offset = Sci->CurrentRegionAllocated;
342     } else {
343       DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
344       Region = allocateRegion(ClassId);
345       if (UNLIKELY(!Region))
346         return nullptr;
347       C->getStats().add(StatMapped, RegionSize);
348       Sci->CurrentRegion = Region;
349       Offset = 0;
350     }
351 
352     const uptr Size = getSizeByClassId(ClassId);
353     const u32 MaxCount = TransferBatch::getMaxCached(Size);
354     DCHECK_GT(MaxCount, 0U);
355     // The maximum number of blocks we should carve in the region is dictated
356     // by the maximum number of batches we want to fill, and the amount of
357     // memory left in the current region (we use the lowest of the two). This
358     // will not be 0 as we ensure that a region can at least hold one block (via
359     // static_assert and at the end of this function).
360     const u32 NumberOfBlocks =
361         Min(MaxNumBatches * MaxCount,
362             static_cast<u32>((RegionSize - Offset) / Size));
363     DCHECK_GT(NumberOfBlocks, 0U);
364 
365     TransferBatch *B = nullptr;
366     constexpr u32 ShuffleArraySize =
367         MaxNumBatches * TransferBatch::MaxNumCached;
368     // Fill the transfer batches and put them in the size-class freelist. We
369     // need to randomize the blocks for security purposes, so we first fill a
370     // local array that we then shuffle before populating the batches.
371     void *ShuffleArray[ShuffleArraySize];
372     u32 Count = 0;
373     const uptr AllocatedUser = Size * NumberOfBlocks;
374     for (uptr I = Region + Offset; I < Region + Offset + AllocatedUser;
375          I += Size) {
376       ShuffleArray[Count++] = reinterpret_cast<void *>(I);
377       if (Count == ShuffleArraySize) {
378         if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount,
379                                       ShuffleArray, Count)))
380           return nullptr;
381         Count = 0;
382       }
383     }
384     if (Count) {
385       if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray,
386                                     Count)))
387         return nullptr;
388     }
389     DCHECK(B);
390     if (!Sci->FreeList.empty()) {
391       Sci->FreeList.push_back(B);
392       B = Sci->FreeList.front();
393       Sci->FreeList.pop_front();
394     }
395     DCHECK_GT(B->getCount(), 0);
396 
397     C->getStats().add(StatFree, AllocatedUser);
398     DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
399     // If there is not enough room in the region currently associated to fit
400     // more blocks, we deassociate the region by resetting CurrentRegion and
401     // CurrentRegionAllocated. Otherwise, update the allocated amount.
402     if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
403       Sci->CurrentRegion = 0;
404       Sci->CurrentRegionAllocated = 0;
405     } else {
406       Sci->CurrentRegionAllocated += AllocatedUser;
407     }
408     Sci->AllocatedUser += AllocatedUser;
409 
410     return B;
411   }
412 
413   void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
414     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
415     if (Sci->AllocatedUser == 0)
416       return;
417     const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
418     const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
419     Str->append("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
420                 "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
421                 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
422                 Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
423                 AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
424   }
425 
426   s32 getReleaseToOsIntervalMs() {
427     return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
428   }
429 
430   NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
431                                  bool Force = false) {
432     const uptr BlockSize = getSizeByClassId(ClassId);
433     const uptr PageSize = getPageSizeCached();
434 
435     CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
436     const uptr BytesInFreeList =
437         Sci->AllocatedUser -
438         (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
439     if (BytesInFreeList < PageSize)
440       return 0; // No chance to release anything.
441     const uptr BytesPushed =
442         (Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
443         BlockSize;
444     if (BytesPushed < PageSize)
445       return 0; // Nothing new to release.
446 
447     if (!Force) {
448       const s32 IntervalMs = getReleaseToOsIntervalMs();
449       if (IntervalMs < 0)
450         return 0;
451       if (Sci->ReleaseInfo.LastReleaseAtNs +
452               static_cast<u64>(IntervalMs) * 1000000 >
453           getMonotonicTime()) {
454         return 0; // Memory was returned recently.
455       }
456     }
457 
458     // TODO(kostyak): currently not ideal as we loop over all regions and
459     // iterate multiple times over the same freelist if a ClassId spans multiple
460     // regions. But it will have to do for now.
461     uptr TotalReleasedBytes = 0;
462     const uptr MaxSize = (RegionSize / BlockSize) * BlockSize;
463     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
464       if (PossibleRegions[I] - 1U == ClassId) {
465         const uptr Region = I * RegionSize;
466         // If the region is the one currently associated to the size-class, we
467         // only need to release up to CurrentRegionAllocated, MaxSize otherwise.
468         const uptr Size = (Region == Sci->CurrentRegion)
469                               ? Sci->CurrentRegionAllocated
470                               : MaxSize;
471         ReleaseRecorder Recorder(Region);
472         releaseFreeMemoryToOS(Sci->FreeList, Region, Size, BlockSize,
473                               &Recorder);
474         if (Recorder.getReleasedRangesCount() > 0) {
475           Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
476           Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
477           Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
478           TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
479         }
480       }
481     }
482     Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
483     return TotalReleasedBytes;
484   }
485 
486   SizeClassInfo SizeClassInfoArray[NumClasses];
487 
488   // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
489   ByteMap PossibleRegions;
490   // Keep track of the lowest & highest regions allocated to avoid looping
491   // through the whole NumRegions.
492   uptr MinRegionIndex;
493   uptr MaxRegionIndex;
494   atomic_s32 ReleaseToOsIntervalMs;
495   // Unless several threads request regions simultaneously from different size
496   // classes, the stash rarely contains more than 1 entry.
497   static constexpr uptr MaxStashedRegions = 4;
498   HybridMutex RegionsStashMutex;
499   uptr NumberOfStashedRegions;
500   uptr RegionsStash[MaxStashedRegions];
501 };
502 
503 } // namespace scudo
504 
505 #endif // SCUDO_PRIMARY32_H_
506