13cab2bb3Spatrick //===-- primary32.h ---------------------------------------------*- C++ -*-===// 23cab2bb3Spatrick // 33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information. 53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 63cab2bb3Spatrick // 73cab2bb3Spatrick //===----------------------------------------------------------------------===// 83cab2bb3Spatrick 93cab2bb3Spatrick #ifndef SCUDO_PRIMARY32_H_ 103cab2bb3Spatrick #define SCUDO_PRIMARY32_H_ 113cab2bb3Spatrick 123cab2bb3Spatrick #include "bytemap.h" 133cab2bb3Spatrick #include "common.h" 143cab2bb3Spatrick #include "list.h" 153cab2bb3Spatrick #include "local_cache.h" 16d89ec533Spatrick #include "options.h" 173cab2bb3Spatrick #include "release.h" 183cab2bb3Spatrick #include "report.h" 193cab2bb3Spatrick #include "stats.h" 203cab2bb3Spatrick #include "string_utils.h" 213cab2bb3Spatrick 223cab2bb3Spatrick namespace scudo { 233cab2bb3Spatrick 243cab2bb3Spatrick // SizeClassAllocator32 is an allocator for 32 or 64-bit address space. 253cab2bb3Spatrick // 263cab2bb3Spatrick // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes 273cab2bb3Spatrick // boundary, and keeps a bytemap of the mappable address space to track the size 283cab2bb3Spatrick // class they are associated with. 293cab2bb3Spatrick // 303cab2bb3Spatrick // Mapped regions are split into equally sized Blocks according to the size 313cab2bb3Spatrick // class they belong to, and the associated pointers are shuffled to prevent any 323cab2bb3Spatrick // predictable address pattern (the predictability increases with the block 333cab2bb3Spatrick // size). 343cab2bb3Spatrick // 353cab2bb3Spatrick // Regions for size class 0 are special and used to hold TransferBatches, which 363cab2bb3Spatrick // allow to transfer arrays of pointers from the global size class freelist to 373cab2bb3Spatrick // the thread specific freelist for said class, and back. 383cab2bb3Spatrick // 393cab2bb3Spatrick // Memory used by this allocator is never unmapped but can be partially 403cab2bb3Spatrick // reclaimed if the platform allows for it. 413cab2bb3Spatrick 42d89ec533Spatrick template <typename Config> class SizeClassAllocator32 { 433cab2bb3Spatrick public: 44d89ec533Spatrick typedef typename Config::PrimaryCompactPtrT CompactPtrT; 45d89ec533Spatrick typedef typename Config::SizeClassMap SizeClassMap; 46*810390e3Srobert static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog; 471f9cb04fSpatrick // The bytemap can only track UINT8_MAX - 1 classes. 481f9cb04fSpatrick static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), ""); 493cab2bb3Spatrick // Regions should be large enough to hold the largest Block. 50d89ec533Spatrick static_assert((1UL << Config::PrimaryRegionSizeLog) >= SizeClassMap::MaxSize, 51d89ec533Spatrick ""); 52d89ec533Spatrick typedef SizeClassAllocator32<Config> ThisT; 533cab2bb3Spatrick typedef SizeClassAllocatorLocalCache<ThisT> CacheT; 543cab2bb3Spatrick typedef typename CacheT::TransferBatch TransferBatch; 55*810390e3Srobert typedef typename CacheT::BatchGroup BatchGroup; 563cab2bb3Spatrick getSizeByClassId(uptr ClassId)573cab2bb3Spatrick static uptr getSizeByClassId(uptr ClassId) { 583cab2bb3Spatrick return (ClassId == SizeClassMap::BatchClassId) 593cab2bb3Spatrick ? sizeof(TransferBatch) 603cab2bb3Spatrick : SizeClassMap::getSizeByClassId(ClassId); 613cab2bb3Spatrick } 623cab2bb3Spatrick canAllocate(uptr Size)633cab2bb3Spatrick static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; } 643cab2bb3Spatrick init(s32 ReleaseToOsInterval)65d89ec533Spatrick void init(s32 ReleaseToOsInterval) { 663cab2bb3Spatrick if (SCUDO_FUCHSIA) 673cab2bb3Spatrick reportError("SizeClassAllocator32 is not supported on Fuchsia"); 683cab2bb3Spatrick 69d89ec533Spatrick if (SCUDO_TRUSTY) 70d89ec533Spatrick reportError("SizeClassAllocator32 is not supported on Trusty"); 713cab2bb3Spatrick 72d89ec533Spatrick DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT))); 73d89ec533Spatrick PossibleRegions.init(); 743cab2bb3Spatrick u32 Seed; 751f9cb04fSpatrick const u64 Time = getMonotonicTime(); 76d89ec533Spatrick if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))) 771f9cb04fSpatrick Seed = static_cast<u32>( 781f9cb04fSpatrick Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6)); 793cab2bb3Spatrick for (uptr I = 0; I < NumClasses; I++) { 803cab2bb3Spatrick SizeClassInfo *Sci = getSizeClassInfo(I); 813cab2bb3Spatrick Sci->RandState = getRandomU32(&Seed); 82d89ec533Spatrick // Sci->MaxRegionIndex is already initialized to 0. 83d89ec533Spatrick Sci->MinRegionIndex = NumRegions; 841f9cb04fSpatrick Sci->ReleaseInfo.LastReleaseAtNs = Time; 853cab2bb3Spatrick } 86d89ec533Spatrick setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval)); 873cab2bb3Spatrick } 883cab2bb3Spatrick unmapTestOnly()893cab2bb3Spatrick void unmapTestOnly() { 903cab2bb3Spatrick while (NumberOfStashedRegions > 0) 913cab2bb3Spatrick unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]), 923cab2bb3Spatrick RegionSize); 93d89ec533Spatrick uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0; 94d89ec533Spatrick for (uptr I = 0; I < NumClasses; I++) { 95d89ec533Spatrick SizeClassInfo *Sci = getSizeClassInfo(I); 96d89ec533Spatrick if (Sci->MinRegionIndex < MinRegionIndex) 97d89ec533Spatrick MinRegionIndex = Sci->MinRegionIndex; 98d89ec533Spatrick if (Sci->MaxRegionIndex > MaxRegionIndex) 99d89ec533Spatrick MaxRegionIndex = Sci->MaxRegionIndex; 100d89ec533Spatrick *Sci = {}; 101d89ec533Spatrick } 102d89ec533Spatrick for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++) 1033cab2bb3Spatrick if (PossibleRegions[I]) 1043cab2bb3Spatrick unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize); 1053cab2bb3Spatrick PossibleRegions.unmapTestOnly(); 1063cab2bb3Spatrick } 1073cab2bb3Spatrick compactPtr(UNUSED uptr ClassId,uptr Ptr)108d89ec533Spatrick CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const { 109d89ec533Spatrick return static_cast<CompactPtrT>(Ptr); 110d89ec533Spatrick } 111d89ec533Spatrick decompactPtr(UNUSED uptr ClassId,CompactPtrT CompactPtr)112d89ec533Spatrick void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const { 113d89ec533Spatrick return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr)); 114d89ec533Spatrick } 115d89ec533Spatrick compactPtrGroup(CompactPtrT CompactPtr)116*810390e3Srobert uptr compactPtrGroup(CompactPtrT CompactPtr) { 117*810390e3Srobert return CompactPtr >> GroupSizeLog; 118*810390e3Srobert } 119*810390e3Srobert popBatch(CacheT * C,uptr ClassId)1203cab2bb3Spatrick TransferBatch *popBatch(CacheT *C, uptr ClassId) { 1213cab2bb3Spatrick DCHECK_LT(ClassId, NumClasses); 1223cab2bb3Spatrick SizeClassInfo *Sci = getSizeClassInfo(ClassId); 1233cab2bb3Spatrick ScopedLock L(Sci->Mutex); 124*810390e3Srobert TransferBatch *B = popBatchImpl(C, ClassId); 125*810390e3Srobert if (UNLIKELY(!B)) { 126*810390e3Srobert if (UNLIKELY(!populateFreeList(C, ClassId, Sci))) 1273cab2bb3Spatrick return nullptr; 128*810390e3Srobert B = popBatchImpl(C, ClassId); 129*810390e3Srobert // if `populateFreeList` succeeded, we are supposed to get free blocks. 130*810390e3Srobert DCHECK_NE(B, nullptr); 1313cab2bb3Spatrick } 1323cab2bb3Spatrick Sci->Stats.PoppedBlocks += B->getCount(); 1333cab2bb3Spatrick return B; 1343cab2bb3Spatrick } 1353cab2bb3Spatrick 136*810390e3Srobert // Push the array of free blocks to the designated batch group. pushBlocks(CacheT * C,uptr ClassId,CompactPtrT * Array,u32 Size)137*810390e3Srobert void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) { 1383cab2bb3Spatrick DCHECK_LT(ClassId, NumClasses); 139*810390e3Srobert DCHECK_GT(Size, 0); 140*810390e3Srobert 1413cab2bb3Spatrick SizeClassInfo *Sci = getSizeClassInfo(ClassId); 142*810390e3Srobert if (ClassId == SizeClassMap::BatchClassId) { 1433cab2bb3Spatrick ScopedLock L(Sci->Mutex); 144*810390e3Srobert // Constructing a batch group in the free list will use two blocks in 145*810390e3Srobert // BatchClassId. If we are pushing BatchClassId blocks, we will use the 146*810390e3Srobert // blocks in the array directly (can't delegate local cache which will 147*810390e3Srobert // cause a recursive allocation). However, The number of free blocks may 148*810390e3Srobert // be less than two. Therefore, populate the free list before inserting 149*810390e3Srobert // the blocks. 150*810390e3Srobert if (Size == 1 && !populateFreeList(C, ClassId, Sci)) 151*810390e3Srobert return; 152*810390e3Srobert pushBlocksImpl(C, ClassId, Array, Size); 153*810390e3Srobert Sci->Stats.PushedBlocks += Size; 154*810390e3Srobert return; 155*810390e3Srobert } 156*810390e3Srobert 157*810390e3Srobert // TODO(chiahungduan): Consider not doing grouping if the group size is not 158*810390e3Srobert // greater than the block size with a certain scale. 159*810390e3Srobert 160*810390e3Srobert // Sort the blocks so that blocks belonging to the same group can be pushed 161*810390e3Srobert // together. 162*810390e3Srobert bool SameGroup = true; 163*810390e3Srobert for (u32 I = 1; I < Size; ++I) { 164*810390e3Srobert if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) 165*810390e3Srobert SameGroup = false; 166*810390e3Srobert CompactPtrT Cur = Array[I]; 167*810390e3Srobert u32 J = I; 168*810390e3Srobert while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) { 169*810390e3Srobert Array[J] = Array[J - 1]; 170*810390e3Srobert --J; 171*810390e3Srobert } 172*810390e3Srobert Array[J] = Cur; 173*810390e3Srobert } 174*810390e3Srobert 175*810390e3Srobert ScopedLock L(Sci->Mutex); 176*810390e3Srobert pushBlocksImpl(C, ClassId, Array, Size, SameGroup); 177*810390e3Srobert 178*810390e3Srobert Sci->Stats.PushedBlocks += Size; 179d89ec533Spatrick if (ClassId != SizeClassMap::BatchClassId) 1803cab2bb3Spatrick releaseToOSMaybe(Sci, ClassId); 1813cab2bb3Spatrick } 1823cab2bb3Spatrick disable()1833cab2bb3Spatrick void disable() { 1843cab2bb3Spatrick // The BatchClassId must be locked last since other classes can use it. 1853cab2bb3Spatrick for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) { 1863cab2bb3Spatrick if (static_cast<uptr>(I) == SizeClassMap::BatchClassId) 1873cab2bb3Spatrick continue; 1883cab2bb3Spatrick getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock(); 1893cab2bb3Spatrick } 1903cab2bb3Spatrick getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock(); 1913cab2bb3Spatrick RegionsStashMutex.lock(); 1923cab2bb3Spatrick PossibleRegions.disable(); 1933cab2bb3Spatrick } 1943cab2bb3Spatrick enable()1953cab2bb3Spatrick void enable() { 1963cab2bb3Spatrick PossibleRegions.enable(); 1973cab2bb3Spatrick RegionsStashMutex.unlock(); 1983cab2bb3Spatrick getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock(); 1993cab2bb3Spatrick for (uptr I = 0; I < NumClasses; I++) { 2003cab2bb3Spatrick if (I == SizeClassMap::BatchClassId) 2013cab2bb3Spatrick continue; 2023cab2bb3Spatrick getSizeClassInfo(I)->Mutex.unlock(); 2033cab2bb3Spatrick } 2043cab2bb3Spatrick } 2053cab2bb3Spatrick iterateOverBlocks(F Callback)2063cab2bb3Spatrick template <typename F> void iterateOverBlocks(F Callback) { 207d89ec533Spatrick uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0; 208d89ec533Spatrick for (uptr I = 0; I < NumClasses; I++) { 209d89ec533Spatrick SizeClassInfo *Sci = getSizeClassInfo(I); 210d89ec533Spatrick if (Sci->MinRegionIndex < MinRegionIndex) 211d89ec533Spatrick MinRegionIndex = Sci->MinRegionIndex; 212d89ec533Spatrick if (Sci->MaxRegionIndex > MaxRegionIndex) 213d89ec533Spatrick MaxRegionIndex = Sci->MaxRegionIndex; 214d89ec533Spatrick } 2153cab2bb3Spatrick for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) 2161f9cb04fSpatrick if (PossibleRegions[I] && 2171f9cb04fSpatrick (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) { 2181f9cb04fSpatrick const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U); 2193cab2bb3Spatrick const uptr From = I * RegionSize; 2203cab2bb3Spatrick const uptr To = From + (RegionSize / BlockSize) * BlockSize; 2213cab2bb3Spatrick for (uptr Block = From; Block < To; Block += BlockSize) 2223cab2bb3Spatrick Callback(Block); 2233cab2bb3Spatrick } 2243cab2bb3Spatrick } 2253cab2bb3Spatrick getStats(ScopedString * Str)2263cab2bb3Spatrick void getStats(ScopedString *Str) { 2273cab2bb3Spatrick // TODO(kostyak): get the RSS per region. 2283cab2bb3Spatrick uptr TotalMapped = 0; 2293cab2bb3Spatrick uptr PoppedBlocks = 0; 2303cab2bb3Spatrick uptr PushedBlocks = 0; 2313cab2bb3Spatrick for (uptr I = 0; I < NumClasses; I++) { 2323cab2bb3Spatrick SizeClassInfo *Sci = getSizeClassInfo(I); 2333cab2bb3Spatrick TotalMapped += Sci->AllocatedUser; 2343cab2bb3Spatrick PoppedBlocks += Sci->Stats.PoppedBlocks; 2353cab2bb3Spatrick PushedBlocks += Sci->Stats.PushedBlocks; 2363cab2bb3Spatrick } 2373cab2bb3Spatrick Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; " 2383cab2bb3Spatrick "remains %zu\n", 2393cab2bb3Spatrick TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks); 2403cab2bb3Spatrick for (uptr I = 0; I < NumClasses; I++) 2413cab2bb3Spatrick getStats(Str, I, 0); 2423cab2bb3Spatrick } 2433cab2bb3Spatrick setOption(Option O,sptr Value)244d89ec533Spatrick bool setOption(Option O, sptr Value) { 245d89ec533Spatrick if (O == Option::ReleaseInterval) { 246d89ec533Spatrick const s32 Interval = Max( 247d89ec533Spatrick Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs), 248d89ec533Spatrick Config::PrimaryMinReleaseToOsIntervalMs); 249d89ec533Spatrick atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval); 250d89ec533Spatrick return true; 2511f9cb04fSpatrick } 252d89ec533Spatrick // Not supported by the Primary, but not an error either. 253d89ec533Spatrick return true; 2541f9cb04fSpatrick } 2551f9cb04fSpatrick releaseToOS()2563cab2bb3Spatrick uptr releaseToOS() { 2573cab2bb3Spatrick uptr TotalReleasedBytes = 0; 2583cab2bb3Spatrick for (uptr I = 0; I < NumClasses; I++) { 259d89ec533Spatrick if (I == SizeClassMap::BatchClassId) 260d89ec533Spatrick continue; 2613cab2bb3Spatrick SizeClassInfo *Sci = getSizeClassInfo(I); 2623cab2bb3Spatrick ScopedLock L(Sci->Mutex); 2633cab2bb3Spatrick TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true); 2643cab2bb3Spatrick } 2653cab2bb3Spatrick return TotalReleasedBytes; 2663cab2bb3Spatrick } 2673cab2bb3Spatrick getRegionInfoArrayAddress()2681f9cb04fSpatrick const char *getRegionInfoArrayAddress() const { return nullptr; } getRegionInfoArraySize()2691f9cb04fSpatrick static uptr getRegionInfoArraySize() { return 0; } 2701f9cb04fSpatrick findNearestBlock(UNUSED const char * RegionInfoData,UNUSED uptr Ptr)271d89ec533Spatrick static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData, 272d89ec533Spatrick UNUSED uptr Ptr) { 2731f9cb04fSpatrick return {}; 2741f9cb04fSpatrick } 2751f9cb04fSpatrick 276d89ec533Spatrick AtomicOptions Options; 277d89ec533Spatrick 2783cab2bb3Spatrick private: 2793cab2bb3Spatrick static const uptr NumClasses = SizeClassMap::NumClasses; 280d89ec533Spatrick static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog; 281d89ec533Spatrick static const uptr NumRegions = 282d89ec533Spatrick SCUDO_MMAP_RANGE_SIZE >> Config::PrimaryRegionSizeLog; 2831f9cb04fSpatrick static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U; 2843cab2bb3Spatrick typedef FlatByteMap<NumRegions> ByteMap; 2853cab2bb3Spatrick 2863cab2bb3Spatrick struct SizeClassStats { 2873cab2bb3Spatrick uptr PoppedBlocks; 2883cab2bb3Spatrick uptr PushedBlocks; 2893cab2bb3Spatrick }; 2903cab2bb3Spatrick 2913cab2bb3Spatrick struct ReleaseToOsInfo { 2923cab2bb3Spatrick uptr PushedBlocksAtLastRelease; 2933cab2bb3Spatrick uptr RangesReleased; 2943cab2bb3Spatrick uptr LastReleasedBytes; 2953cab2bb3Spatrick u64 LastReleaseAtNs; 2963cab2bb3Spatrick }; 2973cab2bb3Spatrick alignas(SCUDO_CACHE_LINE_SIZE)2981f9cb04fSpatrick struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo { 2993cab2bb3Spatrick HybridMutex Mutex; 300*810390e3Srobert SinglyLinkedList<BatchGroup> FreeList; 3011f9cb04fSpatrick uptr CurrentRegion; 3021f9cb04fSpatrick uptr CurrentRegionAllocated; 3033cab2bb3Spatrick SizeClassStats Stats; 3043cab2bb3Spatrick u32 RandState; 3053cab2bb3Spatrick uptr AllocatedUser; 306d89ec533Spatrick // Lowest & highest region index allocated for this size class, to avoid 307d89ec533Spatrick // looping through the whole NumRegions. 308d89ec533Spatrick uptr MinRegionIndex; 309d89ec533Spatrick uptr MaxRegionIndex; 3103cab2bb3Spatrick ReleaseToOsInfo ReleaseInfo; 3113cab2bb3Spatrick }; 3123cab2bb3Spatrick static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); 3133cab2bb3Spatrick computeRegionId(uptr Mem)3143cab2bb3Spatrick uptr computeRegionId(uptr Mem) { 315d89ec533Spatrick const uptr Id = Mem >> Config::PrimaryRegionSizeLog; 3163cab2bb3Spatrick CHECK_LT(Id, NumRegions); 3173cab2bb3Spatrick return Id; 3183cab2bb3Spatrick } 3193cab2bb3Spatrick allocateRegionSlow()3203cab2bb3Spatrick uptr allocateRegionSlow() { 3213cab2bb3Spatrick uptr MapSize = 2 * RegionSize; 3223cab2bb3Spatrick const uptr MapBase = reinterpret_cast<uptr>( 3233cab2bb3Spatrick map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM)); 324d89ec533Spatrick if (!MapBase) 3253cab2bb3Spatrick return 0; 3263cab2bb3Spatrick const uptr MapEnd = MapBase + MapSize; 3273cab2bb3Spatrick uptr Region = MapBase; 3283cab2bb3Spatrick if (isAligned(Region, RegionSize)) { 3293cab2bb3Spatrick ScopedLock L(RegionsStashMutex); 3303cab2bb3Spatrick if (NumberOfStashedRegions < MaxStashedRegions) 3313cab2bb3Spatrick RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize; 3323cab2bb3Spatrick else 3333cab2bb3Spatrick MapSize = RegionSize; 3343cab2bb3Spatrick } else { 3353cab2bb3Spatrick Region = roundUpTo(MapBase, RegionSize); 3363cab2bb3Spatrick unmap(reinterpret_cast<void *>(MapBase), Region - MapBase); 3373cab2bb3Spatrick MapSize = RegionSize; 3383cab2bb3Spatrick } 3393cab2bb3Spatrick const uptr End = Region + MapSize; 3403cab2bb3Spatrick if (End != MapEnd) 3413cab2bb3Spatrick unmap(reinterpret_cast<void *>(End), MapEnd - End); 3423cab2bb3Spatrick return Region; 3433cab2bb3Spatrick } 3443cab2bb3Spatrick allocateRegion(SizeClassInfo * Sci,uptr ClassId)345d89ec533Spatrick uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) { 3463cab2bb3Spatrick DCHECK_LT(ClassId, NumClasses); 3473cab2bb3Spatrick uptr Region = 0; 3483cab2bb3Spatrick { 3493cab2bb3Spatrick ScopedLock L(RegionsStashMutex); 3503cab2bb3Spatrick if (NumberOfStashedRegions > 0) 3513cab2bb3Spatrick Region = RegionsStash[--NumberOfStashedRegions]; 3523cab2bb3Spatrick } 3533cab2bb3Spatrick if (!Region) 3543cab2bb3Spatrick Region = allocateRegionSlow(); 3553cab2bb3Spatrick if (LIKELY(Region)) { 356d89ec533Spatrick // Sci->Mutex is held by the caller, updating the Min/Max is safe. 3573cab2bb3Spatrick const uptr RegionIndex = computeRegionId(Region); 358d89ec533Spatrick if (RegionIndex < Sci->MinRegionIndex) 359d89ec533Spatrick Sci->MinRegionIndex = RegionIndex; 360d89ec533Spatrick if (RegionIndex > Sci->MaxRegionIndex) 361d89ec533Spatrick Sci->MaxRegionIndex = RegionIndex; 3621f9cb04fSpatrick PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U)); 3633cab2bb3Spatrick } 3643cab2bb3Spatrick return Region; 3653cab2bb3Spatrick } 3663cab2bb3Spatrick getSizeClassInfo(uptr ClassId)3673cab2bb3Spatrick SizeClassInfo *getSizeClassInfo(uptr ClassId) { 3683cab2bb3Spatrick DCHECK_LT(ClassId, NumClasses); 3693cab2bb3Spatrick return &SizeClassInfoArray[ClassId]; 3703cab2bb3Spatrick } 3713cab2bb3Spatrick 372*810390e3Srobert // Push the blocks to their batch group. The layout will be like, 373*810390e3Srobert // 374*810390e3Srobert // FreeList - > BG -> BG -> BG 375*810390e3Srobert // | | | 376*810390e3Srobert // v v v 377*810390e3Srobert // TB TB TB 378*810390e3Srobert // | 379*810390e3Srobert // v 380*810390e3Srobert // TB 381*810390e3Srobert // 382*810390e3Srobert // Each BlockGroup(BG) will associate with unique group id and the free blocks 383*810390e3Srobert // are managed by a list of TransferBatch(TB). To reduce the time of inserting 384*810390e3Srobert // blocks, BGs are sorted and the input `Array` are supposed to be sorted so 385*810390e3Srobert // that we can get better performance of maintaining sorted property. 386*810390e3Srobert // Use `SameGroup=true` to indicate that all blocks in the array are from the 387*810390e3Srobert // same group then we will skip checking the group id of each block. 388*810390e3Srobert // 389*810390e3Srobert // Note that this aims to have a better management of dirty pages, i.e., the 390*810390e3Srobert // RSS usage won't grow indefinitely. There's an exception that we may not put 391*810390e3Srobert // a block to its associated group. While populating new blocks, we may have 392*810390e3Srobert // blocks cross different groups. However, most cases will fall into same 393*810390e3Srobert // group and they are supposed to be popped soon. In that case, it's not worth 394*810390e3Srobert // sorting the array with the almost-sorted property. Therefore, we use 395*810390e3Srobert // `SameGroup=true` instead. 396*810390e3Srobert // 397*810390e3Srobert // The region mutex needs to be held while calling this method. 398*810390e3Srobert void pushBlocksImpl(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size, 399*810390e3Srobert bool SameGroup = false) { 400*810390e3Srobert DCHECK_GT(Size, 0U); 401*810390e3Srobert SizeClassInfo *Sci = getSizeClassInfo(ClassId); 402*810390e3Srobert 403*810390e3Srobert auto CreateGroup = [&](uptr GroupId) { 404*810390e3Srobert BatchGroup *BG = nullptr; 405*810390e3Srobert TransferBatch *TB = nullptr; 406*810390e3Srobert if (ClassId == SizeClassMap::BatchClassId) { 407*810390e3Srobert DCHECK_GE(Size, 2U); 408*810390e3Srobert BG = reinterpret_cast<BatchGroup *>( 409*810390e3Srobert decompactPtr(ClassId, Array[Size - 1])); 410*810390e3Srobert BG->Batches.clear(); 411*810390e3Srobert 412*810390e3Srobert TB = reinterpret_cast<TransferBatch *>( 413*810390e3Srobert decompactPtr(ClassId, Array[Size - 2])); 414*810390e3Srobert TB->clear(); 415*810390e3Srobert } else { 416*810390e3Srobert BG = C->createGroup(); 417*810390e3Srobert BG->Batches.clear(); 418*810390e3Srobert 419*810390e3Srobert TB = C->createBatch(ClassId, nullptr); 420*810390e3Srobert TB->clear(); 421*810390e3Srobert } 422*810390e3Srobert 423*810390e3Srobert BG->GroupId = GroupId; 424*810390e3Srobert BG->Batches.push_front(TB); 425*810390e3Srobert BG->PushedBlocks = 0; 426*810390e3Srobert BG->PushedBlocksAtLastCheckpoint = 0; 427*810390e3Srobert BG->MaxCachedPerBatch = 428*810390e3Srobert TransferBatch::getMaxCached(getSizeByClassId(ClassId)); 429*810390e3Srobert 430*810390e3Srobert return BG; 431*810390e3Srobert }; 432*810390e3Srobert 433*810390e3Srobert auto InsertBlocks = [&](BatchGroup *BG, CompactPtrT *Array, u32 Size) { 434*810390e3Srobert SinglyLinkedList<TransferBatch> &Batches = BG->Batches; 435*810390e3Srobert TransferBatch *CurBatch = Batches.front(); 436*810390e3Srobert DCHECK_NE(CurBatch, nullptr); 437*810390e3Srobert 438*810390e3Srobert for (u32 I = 0; I < Size;) { 439*810390e3Srobert DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount()); 440*810390e3Srobert u16 UnusedSlots = 441*810390e3Srobert static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount()); 442*810390e3Srobert if (UnusedSlots == 0) { 443*810390e3Srobert CurBatch = C->createBatch( 444*810390e3Srobert ClassId, 445*810390e3Srobert reinterpret_cast<void *>(decompactPtr(ClassId, Array[I]))); 446*810390e3Srobert CurBatch->clear(); 447*810390e3Srobert Batches.push_front(CurBatch); 448*810390e3Srobert UnusedSlots = BG->MaxCachedPerBatch; 449*810390e3Srobert } 450*810390e3Srobert // `UnusedSlots` is u16 so the result will be also fit in u16. 451*810390e3Srobert u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I)); 452*810390e3Srobert CurBatch->appendFromArray(&Array[I], AppendSize); 453*810390e3Srobert I += AppendSize; 454*810390e3Srobert } 455*810390e3Srobert 456*810390e3Srobert BG->PushedBlocks += Size; 457*810390e3Srobert }; 458*810390e3Srobert 459*810390e3Srobert BatchGroup *Cur = Sci->FreeList.front(); 460*810390e3Srobert 461*810390e3Srobert if (ClassId == SizeClassMap::BatchClassId) { 462*810390e3Srobert if (Cur == nullptr) { 463*810390e3Srobert // Don't need to classify BatchClassId. 464*810390e3Srobert Cur = CreateGroup(/*GroupId=*/0); 465*810390e3Srobert Sci->FreeList.push_front(Cur); 466*810390e3Srobert } 467*810390e3Srobert InsertBlocks(Cur, Array, Size); 468*810390e3Srobert return; 469*810390e3Srobert } 470*810390e3Srobert 471*810390e3Srobert // In the following, `Cur` always points to the BatchGroup for blocks that 472*810390e3Srobert // will be pushed next. `Prev` is the element right before `Cur`. 473*810390e3Srobert BatchGroup *Prev = nullptr; 474*810390e3Srobert 475*810390e3Srobert while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) { 476*810390e3Srobert Prev = Cur; 477*810390e3Srobert Cur = Cur->Next; 478*810390e3Srobert } 479*810390e3Srobert 480*810390e3Srobert if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) { 481*810390e3Srobert Cur = CreateGroup(compactPtrGroup(Array[0])); 482*810390e3Srobert if (Prev == nullptr) 483*810390e3Srobert Sci->FreeList.push_front(Cur); 484*810390e3Srobert else 485*810390e3Srobert Sci->FreeList.insert(Prev, Cur); 486*810390e3Srobert } 487*810390e3Srobert 488*810390e3Srobert // All the blocks are from the same group, just push without checking group 489*810390e3Srobert // id. 490*810390e3Srobert if (SameGroup) { 491*810390e3Srobert InsertBlocks(Cur, Array, Size); 492*810390e3Srobert return; 493*810390e3Srobert } 494*810390e3Srobert 495*810390e3Srobert // The blocks are sorted by group id. Determine the segment of group and 496*810390e3Srobert // push them to their group together. 497*810390e3Srobert u32 Count = 1; 498*810390e3Srobert for (u32 I = 1; I < Size; ++I) { 499*810390e3Srobert if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) { 500*810390e3Srobert DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId); 501*810390e3Srobert InsertBlocks(Cur, Array + I - Count, Count); 502*810390e3Srobert 503*810390e3Srobert while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) { 504*810390e3Srobert Prev = Cur; 505*810390e3Srobert Cur = Cur->Next; 506*810390e3Srobert } 507*810390e3Srobert 508*810390e3Srobert if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) { 509*810390e3Srobert Cur = CreateGroup(compactPtrGroup(Array[I])); 510*810390e3Srobert DCHECK_NE(Prev, nullptr); 511*810390e3Srobert Sci->FreeList.insert(Prev, Cur); 512*810390e3Srobert } 513*810390e3Srobert 514*810390e3Srobert Count = 1; 515*810390e3Srobert } else { 516*810390e3Srobert ++Count; 517*810390e3Srobert } 518*810390e3Srobert } 519*810390e3Srobert 520*810390e3Srobert InsertBlocks(Cur, Array + Size - Count, Count); 521*810390e3Srobert } 522*810390e3Srobert 523*810390e3Srobert // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest 524*810390e3Srobert // group id will be considered first. 525*810390e3Srobert // 526*810390e3Srobert // The region mutex needs to be held while calling this method. popBatchImpl(CacheT * C,uptr ClassId)527*810390e3Srobert TransferBatch *popBatchImpl(CacheT *C, uptr ClassId) { 528*810390e3Srobert SizeClassInfo *Sci = getSizeClassInfo(ClassId); 529*810390e3Srobert if (Sci->FreeList.empty()) 530*810390e3Srobert return nullptr; 531*810390e3Srobert 532*810390e3Srobert SinglyLinkedList<TransferBatch> &Batches = Sci->FreeList.front()->Batches; 533*810390e3Srobert DCHECK(!Batches.empty()); 534*810390e3Srobert 535*810390e3Srobert TransferBatch *B = Batches.front(); 536*810390e3Srobert Batches.pop_front(); 537*810390e3Srobert DCHECK_NE(B, nullptr); 538*810390e3Srobert DCHECK_GT(B->getCount(), 0U); 539*810390e3Srobert 540*810390e3Srobert if (Batches.empty()) { 541*810390e3Srobert BatchGroup *BG = Sci->FreeList.front(); 542*810390e3Srobert Sci->FreeList.pop_front(); 543*810390e3Srobert 544*810390e3Srobert // We don't keep BatchGroup with zero blocks to avoid empty-checking while 545*810390e3Srobert // allocating. Note that block used by constructing BatchGroup is recorded 546*810390e3Srobert // as free blocks in the last element of BatchGroup::Batches. Which means, 547*810390e3Srobert // once we pop the last TransferBatch, the block is implicitly 548*810390e3Srobert // deallocated. 549*810390e3Srobert if (ClassId != SizeClassMap::BatchClassId) 550*810390e3Srobert C->deallocate(SizeClassMap::BatchClassId, BG); 551*810390e3Srobert } 552*810390e3Srobert 553*810390e3Srobert return B; 554*810390e3Srobert } 555*810390e3Srobert populateFreeList(CacheT * C,uptr ClassId,SizeClassInfo * Sci)556*810390e3Srobert NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci) { 5571f9cb04fSpatrick uptr Region; 5581f9cb04fSpatrick uptr Offset; 5591f9cb04fSpatrick // If the size-class currently has a region associated to it, use it. The 5601f9cb04fSpatrick // newly created blocks will be located after the currently allocated memory 5611f9cb04fSpatrick // for that region (up to RegionSize). Otherwise, create a new region, where 5621f9cb04fSpatrick // the new blocks will be carved from the beginning. 5631f9cb04fSpatrick if (Sci->CurrentRegion) { 5641f9cb04fSpatrick Region = Sci->CurrentRegion; 5651f9cb04fSpatrick DCHECK_GT(Sci->CurrentRegionAllocated, 0U); 5661f9cb04fSpatrick Offset = Sci->CurrentRegionAllocated; 5671f9cb04fSpatrick } else { 5681f9cb04fSpatrick DCHECK_EQ(Sci->CurrentRegionAllocated, 0U); 569d89ec533Spatrick Region = allocateRegion(Sci, ClassId); 5703cab2bb3Spatrick if (UNLIKELY(!Region)) 571*810390e3Srobert return false; 5723cab2bb3Spatrick C->getStats().add(StatMapped, RegionSize); 5731f9cb04fSpatrick Sci->CurrentRegion = Region; 5741f9cb04fSpatrick Offset = 0; 5751f9cb04fSpatrick } 5761f9cb04fSpatrick 5773cab2bb3Spatrick const uptr Size = getSizeByClassId(ClassId); 578*810390e3Srobert const u16 MaxCount = TransferBatch::getMaxCached(Size); 5791f9cb04fSpatrick DCHECK_GT(MaxCount, 0U); 5801f9cb04fSpatrick // The maximum number of blocks we should carve in the region is dictated 5811f9cb04fSpatrick // by the maximum number of batches we want to fill, and the amount of 5821f9cb04fSpatrick // memory left in the current region (we use the lowest of the two). This 5831f9cb04fSpatrick // will not be 0 as we ensure that a region can at least hold one block (via 5841f9cb04fSpatrick // static_assert and at the end of this function). 5851f9cb04fSpatrick const u32 NumberOfBlocks = 5861f9cb04fSpatrick Min(MaxNumBatches * MaxCount, 5871f9cb04fSpatrick static_cast<u32>((RegionSize - Offset) / Size)); 5881f9cb04fSpatrick DCHECK_GT(NumberOfBlocks, 0U); 5891f9cb04fSpatrick 5901f9cb04fSpatrick constexpr u32 ShuffleArraySize = 5911f9cb04fSpatrick MaxNumBatches * TransferBatch::MaxNumCached; 5921f9cb04fSpatrick // Fill the transfer batches and put them in the size-class freelist. We 5931f9cb04fSpatrick // need to randomize the blocks for security purposes, so we first fill a 5941f9cb04fSpatrick // local array that we then shuffle before populating the batches. 595d89ec533Spatrick CompactPtrT ShuffleArray[ShuffleArraySize]; 596d89ec533Spatrick DCHECK_LE(NumberOfBlocks, ShuffleArraySize); 597d89ec533Spatrick 598d89ec533Spatrick uptr P = Region + Offset; 599d89ec533Spatrick for (u32 I = 0; I < NumberOfBlocks; I++, P += Size) 600d89ec533Spatrick ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P); 601d89ec533Spatrick // No need to shuffle the batches size class. 602d89ec533Spatrick if (ClassId != SizeClassMap::BatchClassId) 603d89ec533Spatrick shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState); 604d89ec533Spatrick for (u32 I = 0; I < NumberOfBlocks;) { 605*810390e3Srobert // `MaxCount` is u16 so the result will also fit in u16. 606*810390e3Srobert const u16 N = static_cast<u16>(Min<u32>(MaxCount, NumberOfBlocks - I)); 607*810390e3Srobert // Note that the N blocks here may have different group ids. Given that 608*810390e3Srobert // it only happens when it crosses the group size boundary. Instead of 609*810390e3Srobert // sorting them, treat them as same group here to avoid sorting the 610*810390e3Srobert // almost-sorted blocks. 611*810390e3Srobert pushBlocksImpl(C, ClassId, &ShuffleArray[I], N, /*SameGroup=*/true); 612d89ec533Spatrick I += N; 6133cab2bb3Spatrick } 6143cab2bb3Spatrick 615d89ec533Spatrick const uptr AllocatedUser = Size * NumberOfBlocks; 6163cab2bb3Spatrick C->getStats().add(StatFree, AllocatedUser); 6171f9cb04fSpatrick DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize); 6181f9cb04fSpatrick // If there is not enough room in the region currently associated to fit 6191f9cb04fSpatrick // more blocks, we deassociate the region by resetting CurrentRegion and 6201f9cb04fSpatrick // CurrentRegionAllocated. Otherwise, update the allocated amount. 6211f9cb04fSpatrick if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) { 6221f9cb04fSpatrick Sci->CurrentRegion = 0; 6231f9cb04fSpatrick Sci->CurrentRegionAllocated = 0; 6241f9cb04fSpatrick } else { 6251f9cb04fSpatrick Sci->CurrentRegionAllocated += AllocatedUser; 6261f9cb04fSpatrick } 6273cab2bb3Spatrick Sci->AllocatedUser += AllocatedUser; 6281f9cb04fSpatrick 629*810390e3Srobert return true; 6303cab2bb3Spatrick } 6313cab2bb3Spatrick getStats(ScopedString * Str,uptr ClassId,uptr Rss)6323cab2bb3Spatrick void getStats(ScopedString *Str, uptr ClassId, uptr Rss) { 6333cab2bb3Spatrick SizeClassInfo *Sci = getSizeClassInfo(ClassId); 6343cab2bb3Spatrick if (Sci->AllocatedUser == 0) 6353cab2bb3Spatrick return; 6363cab2bb3Spatrick const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks; 6373cab2bb3Spatrick const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId); 6383cab2bb3Spatrick Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu " 6391f9cb04fSpatrick "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n", 6403cab2bb3Spatrick ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10, 6413cab2bb3Spatrick Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse, 6421f9cb04fSpatrick AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased); 6431f9cb04fSpatrick } 6441f9cb04fSpatrick 6453cab2bb3Spatrick NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId, 6463cab2bb3Spatrick bool Force = false) { 6473cab2bb3Spatrick const uptr BlockSize = getSizeByClassId(ClassId); 6483cab2bb3Spatrick const uptr PageSize = getPageSizeCached(); 6493cab2bb3Spatrick 650d89ec533Spatrick DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks); 6513cab2bb3Spatrick const uptr BytesInFreeList = 6523cab2bb3Spatrick Sci->AllocatedUser - 6533cab2bb3Spatrick (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize; 6543cab2bb3Spatrick if (BytesInFreeList < PageSize) 6553cab2bb3Spatrick return 0; // No chance to release anything. 6561f9cb04fSpatrick const uptr BytesPushed = 6571f9cb04fSpatrick (Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) * 6581f9cb04fSpatrick BlockSize; 6591f9cb04fSpatrick if (BytesPushed < PageSize) 6603cab2bb3Spatrick return 0; // Nothing new to release. 6613cab2bb3Spatrick 662*810390e3Srobert const bool CheckDensity = BlockSize < PageSize / 16U; 663d89ec533Spatrick // Releasing smaller blocks is expensive, so we want to make sure that a 664d89ec533Spatrick // significant amount of bytes are free, and that there has been a good 665d89ec533Spatrick // amount of batches pushed to the freelist before attempting to release. 666*810390e3Srobert if (CheckDensity) { 667d89ec533Spatrick if (!Force && BytesPushed < Sci->AllocatedUser / 16U) 668d89ec533Spatrick return 0; 669d89ec533Spatrick } 670d89ec533Spatrick 6713cab2bb3Spatrick if (!Force) { 672d89ec533Spatrick const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); 6733cab2bb3Spatrick if (IntervalMs < 0) 6743cab2bb3Spatrick return 0; 6753cab2bb3Spatrick if (Sci->ReleaseInfo.LastReleaseAtNs + 6761f9cb04fSpatrick static_cast<u64>(IntervalMs) * 1000000 > 6773cab2bb3Spatrick getMonotonicTime()) { 6783cab2bb3Spatrick return 0; // Memory was returned recently. 6793cab2bb3Spatrick } 6803cab2bb3Spatrick } 6813cab2bb3Spatrick 682d89ec533Spatrick const uptr First = Sci->MinRegionIndex; 683d89ec533Spatrick const uptr Last = Sci->MaxRegionIndex; 684d89ec533Spatrick DCHECK_NE(Last, 0U); 685d89ec533Spatrick DCHECK_LE(First, Last); 6863cab2bb3Spatrick uptr TotalReleasedBytes = 0; 687d89ec533Spatrick const uptr Base = First * RegionSize; 688d89ec533Spatrick const uptr NumberOfRegions = Last - First + 1U; 689*810390e3Srobert const uptr GroupSize = (1U << GroupSizeLog); 690*810390e3Srobert const uptr CurRegionGroupId = 691*810390e3Srobert compactPtrGroup(compactPtr(ClassId, Sci->CurrentRegion)); 692*810390e3Srobert 693d89ec533Spatrick ReleaseRecorder Recorder(Base); 694*810390e3Srobert PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions); 695*810390e3Srobert 696d89ec533Spatrick auto DecompactPtr = [](CompactPtrT CompactPtr) { 697d89ec533Spatrick return reinterpret_cast<uptr>(CompactPtr); 698d89ec533Spatrick }; 699*810390e3Srobert for (BatchGroup &BG : Sci->FreeList) { 700*810390e3Srobert const uptr PushedBytesDelta = 701*810390e3Srobert BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint; 702*810390e3Srobert if (PushedBytesDelta * BlockSize < PageSize) 703*810390e3Srobert continue; 704*810390e3Srobert 705*810390e3Srobert uptr AllocatedGroupSize = BG.GroupId == CurRegionGroupId 706*810390e3Srobert ? Sci->CurrentRegionAllocated 707*810390e3Srobert : GroupSize; 708*810390e3Srobert if (AllocatedGroupSize == 0) 709*810390e3Srobert continue; 710*810390e3Srobert 711*810390e3Srobert // TransferBatches are pushed in front of BG.Batches. The first one may 712*810390e3Srobert // not have all caches used. 713*810390e3Srobert const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch + 714*810390e3Srobert BG.Batches.front()->getCount(); 715*810390e3Srobert const uptr BytesInBG = NumBlocks * BlockSize; 716*810390e3Srobert // Given the randomness property, we try to release the pages only if the 717*810390e3Srobert // bytes used by free blocks exceed certain proportion of allocated 718*810390e3Srobert // spaces. 719*810390e3Srobert if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize < 720*810390e3Srobert (100U - 1U - BlockSize / 16U)) { 721*810390e3Srobert continue; 722*810390e3Srobert } 723*810390e3Srobert 724*810390e3Srobert BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks; 725*810390e3Srobert // Note that we don't always visit blocks in each BatchGroup so that we 726*810390e3Srobert // may miss the chance of releasing certain pages that cross BatchGroups. 727*810390e3Srobert Context.markFreeBlocks(BG.Batches, DecompactPtr, Base); 728*810390e3Srobert } 729*810390e3Srobert 730*810390e3Srobert if (!Context.hasBlockMarked()) 731*810390e3Srobert return 0; 732*810390e3Srobert 733*810390e3Srobert auto SkipRegion = [this, First, ClassId](uptr RegionIndex) { 734*810390e3Srobert return (PossibleRegions[First + RegionIndex] - 1U) != ClassId; 735*810390e3Srobert }; 736*810390e3Srobert releaseFreeMemoryToOS(Context, Recorder, SkipRegion); 737*810390e3Srobert 7383cab2bb3Spatrick if (Recorder.getReleasedRangesCount() > 0) { 7393cab2bb3Spatrick Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks; 7403cab2bb3Spatrick Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount(); 7413cab2bb3Spatrick Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes(); 7423cab2bb3Spatrick TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes; 7433cab2bb3Spatrick } 7443cab2bb3Spatrick Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime(); 745d89ec533Spatrick 7463cab2bb3Spatrick return TotalReleasedBytes; 7473cab2bb3Spatrick } 7483cab2bb3Spatrick 749d89ec533Spatrick SizeClassInfo SizeClassInfoArray[NumClasses] = {}; 7503cab2bb3Spatrick 7511f9cb04fSpatrick // Track the regions in use, 0 is unused, otherwise store ClassId + 1. 752d89ec533Spatrick ByteMap PossibleRegions = {}; 753d89ec533Spatrick atomic_s32 ReleaseToOsIntervalMs = {}; 7543cab2bb3Spatrick // Unless several threads request regions simultaneously from different size 7553cab2bb3Spatrick // classes, the stash rarely contains more than 1 entry. 7563cab2bb3Spatrick static constexpr uptr MaxStashedRegions = 4; 7573cab2bb3Spatrick HybridMutex RegionsStashMutex; 758d89ec533Spatrick uptr NumberOfStashedRegions = 0; 759d89ec533Spatrick uptr RegionsStash[MaxStashedRegions] = {}; 7603cab2bb3Spatrick }; 7613cab2bb3Spatrick 7623cab2bb3Spatrick } // namespace scudo 7633cab2bb3Spatrick 7643cab2bb3Spatrick #endif // SCUDO_PRIMARY32_H_ 765