10b57cec5SDimitry Andric //===-- primary64.h ---------------------------------------------*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric 
90b57cec5SDimitry Andric #ifndef SCUDO_PRIMARY64_H_
100b57cec5SDimitry Andric #define SCUDO_PRIMARY64_H_
110b57cec5SDimitry Andric 
125f757f3fSDimitry Andric #include "allocator_common.h"
130b57cec5SDimitry Andric #include "bytemap.h"
140b57cec5SDimitry Andric #include "common.h"
150b57cec5SDimitry Andric #include "list.h"
160b57cec5SDimitry Andric #include "local_cache.h"
1706c3fb27SDimitry Andric #include "mem_map.h"
185ffd83dbSDimitry Andric #include "memtag.h"
19e8d8bef9SDimitry Andric #include "options.h"
200b57cec5SDimitry Andric #include "release.h"
210b57cec5SDimitry Andric #include "stats.h"
220b57cec5SDimitry Andric #include "string_utils.h"
2306c3fb27SDimitry Andric #include "thread_annotations.h"
240b57cec5SDimitry Andric 
255f757f3fSDimitry Andric #include "condition_variable.h"
265f757f3fSDimitry Andric 
270b57cec5SDimitry Andric namespace scudo {
280b57cec5SDimitry Andric 
290b57cec5SDimitry Andric // SizeClassAllocator64 is an allocator tuned for 64-bit address space.
300b57cec5SDimitry Andric //
310b57cec5SDimitry Andric // It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
320b57cec5SDimitry Andric // Regions, specific to each size class. Note that the base of that mapping is
33fe6060f1SDimitry Andric // random (based to the platform specific map() capabilities). If
34fe6060f1SDimitry Andric // PrimaryEnableRandomOffset is set, each Region actually starts at a random
35fe6060f1SDimitry Andric // offset from its base.
360b57cec5SDimitry Andric //
370b57cec5SDimitry Andric // Regions are mapped incrementally on demand to fulfill allocation requests,
380b57cec5SDimitry Andric // those mappings being split into equally sized Blocks based on the size class
390b57cec5SDimitry Andric // they belong to. The Blocks created are shuffled to prevent predictable
400b57cec5SDimitry Andric // address patterns (the predictability increases with the size of the Blocks).
410b57cec5SDimitry Andric //
420b57cec5SDimitry Andric // The 1st Region (for size class 0) holds the TransferBatches. This is a
430b57cec5SDimitry Andric // structure used to transfer arrays of available pointers from the class size
440b57cec5SDimitry Andric // freelist to the thread specific freelist, and back.
450b57cec5SDimitry Andric //
460b57cec5SDimitry Andric // The memory used by this allocator is never unmapped, but can be partially
4768d75effSDimitry Andric // released if the platform allows for it.
480b57cec5SDimitry Andric 
49e8d8bef9SDimitry Andric template <typename Config> class SizeClassAllocator64 {
500b57cec5SDimitry Andric public:
5106c3fb27SDimitry Andric   typedef typename Config::Primary::CompactPtrT CompactPtrT;
5206c3fb27SDimitry Andric   typedef typename Config::Primary::SizeClassMap SizeClassMap;
535f757f3fSDimitry Andric   typedef typename ConditionVariableState<
545f757f3fSDimitry Andric       typename Config::Primary>::ConditionVariableT ConditionVariableT;
5506c3fb27SDimitry Andric   static const uptr CompactPtrScale = Config::Primary::CompactPtrScale;
565f757f3fSDimitry Andric   static const uptr RegionSizeLog = Config::Primary::RegionSizeLog;
5706c3fb27SDimitry Andric   static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
585f757f3fSDimitry Andric   static_assert(RegionSizeLog >= GroupSizeLog,
595f757f3fSDimitry Andric                 "Group size shouldn't be greater than the region size");
6006c3fb27SDimitry Andric   static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
61e8d8bef9SDimitry Andric   typedef SizeClassAllocator64<Config> ThisT;
620b57cec5SDimitry Andric   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
635f757f3fSDimitry Andric   typedef TransferBatch<ThisT> TransferBatchT;
645f757f3fSDimitry Andric   typedef BatchGroup<ThisT> BatchGroupT;
655f757f3fSDimitry Andric 
665f757f3fSDimitry Andric   static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
675f757f3fSDimitry Andric                 "BatchGroupT uses the same class size as TransferBatchT");
680b57cec5SDimitry Andric 
getSizeByClassId(uptr ClassId)690b57cec5SDimitry Andric   static uptr getSizeByClassId(uptr ClassId) {
700b57cec5SDimitry Andric     return (ClassId == SizeClassMap::BatchClassId)
715f757f3fSDimitry Andric                ? roundUp(sizeof(TransferBatchT), 1U << CompactPtrScale)
720b57cec5SDimitry Andric                : SizeClassMap::getSizeByClassId(ClassId);
730b57cec5SDimitry Andric   }
740b57cec5SDimitry Andric 
canAllocate(uptr Size)750b57cec5SDimitry Andric   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
760b57cec5SDimitry Andric 
conditionVariableEnabled()775f757f3fSDimitry Andric   static bool conditionVariableEnabled() {
785f757f3fSDimitry Andric     return ConditionVariableState<typename Config::Primary>::enabled();
795f757f3fSDimitry Andric   }
805f757f3fSDimitry Andric 
init(s32 ReleaseToOsInterval)8106c3fb27SDimitry Andric   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
82fe6060f1SDimitry Andric     DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
8306c3fb27SDimitry Andric 
8406c3fb27SDimitry Andric     const uptr PageSize = getPageSizeCached();
855f757f3fSDimitry Andric     const uptr GroupSize = (1UL << GroupSizeLog);
8606c3fb27SDimitry Andric     const uptr PagesInGroup = GroupSize / PageSize;
8706c3fb27SDimitry Andric     const uptr MinSizeClass = getSizeByClassId(1);
8806c3fb27SDimitry Andric     // When trying to release pages back to memory, visiting smaller size
8906c3fb27SDimitry Andric     // classes is expensive. Therefore, we only try to release smaller size
9006c3fb27SDimitry Andric     // classes when the amount of free blocks goes over a certain threshold (See
9106c3fb27SDimitry Andric     // the comment in releaseToOSMaybe() for more details). For example, for
9206c3fb27SDimitry Andric     // size class 32, we only do the release when the size of free blocks is
9306c3fb27SDimitry Andric     // greater than 97% of pages in a group. However, this may introduce another
9406c3fb27SDimitry Andric     // issue that if the number of free blocks is bouncing between 97% ~ 100%.
9506c3fb27SDimitry Andric     // Which means we may try many page releases but only release very few of
9606c3fb27SDimitry Andric     // them (less than 3% in a group). Even though we have
9706c3fb27SDimitry Andric     // `&ReleaseToOsIntervalMs` which slightly reduce the frequency of these
9806c3fb27SDimitry Andric     // calls but it will be better to have another guard to mitigate this issue.
9906c3fb27SDimitry Andric     //
10006c3fb27SDimitry Andric     // Here we add another constraint on the minimum size requirement. The
10106c3fb27SDimitry Andric     // constraint is determined by the size of in-use blocks in the minimal size
10206c3fb27SDimitry Andric     // class. Take size class 32 as an example,
10306c3fb27SDimitry Andric     //
10406c3fb27SDimitry Andric     //   +-     one memory group      -+
10506c3fb27SDimitry Andric     //   +----------------------+------+
10606c3fb27SDimitry Andric     //   |  97% of free blocks  |      |
10706c3fb27SDimitry Andric     //   +----------------------+------+
10806c3fb27SDimitry Andric     //                           \    /
10906c3fb27SDimitry Andric     //                      3% in-use blocks
11006c3fb27SDimitry Andric     //
11106c3fb27SDimitry Andric     //   * The release size threshold is 97%.
11206c3fb27SDimitry Andric     //
11306c3fb27SDimitry Andric     // The 3% size in a group is about 7 pages. For two consecutive
11406c3fb27SDimitry Andric     // releaseToOSMaybe(), we require the difference between `PushedBlocks`
11506c3fb27SDimitry Andric     // should be greater than 7 pages. This mitigates the page releasing
11606c3fb27SDimitry Andric     // thrashing which is caused by memory usage bouncing around the threshold.
11706c3fb27SDimitry Andric     // The smallest size class takes longest time to do the page release so we
11806c3fb27SDimitry Andric     // use its size of in-use blocks as a heuristic.
11906c3fb27SDimitry Andric     SmallerBlockReleasePageDelta =
12006c3fb27SDimitry Andric         PagesInGroup * (1 + MinSizeClass / 16U) / 100;
12106c3fb27SDimitry Andric 
1220b57cec5SDimitry Andric     // Reserve the space required for the Primary.
12306c3fb27SDimitry Andric     CHECK(ReservedMemory.create(/*Addr=*/0U, PrimarySize,
12406c3fb27SDimitry Andric                                 "scudo:primary_reserve"));
12506c3fb27SDimitry Andric     PrimaryBase = ReservedMemory.getBase();
12606c3fb27SDimitry Andric     DCHECK_NE(PrimaryBase, 0U);
1270b57cec5SDimitry Andric 
1280b57cec5SDimitry Andric     u32 Seed;
12906c3fb27SDimitry Andric     const u64 Time = getMonotonicTimeFast();
130fe6060f1SDimitry Andric     if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
1315ffd83dbSDimitry Andric       Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
13206c3fb27SDimitry Andric 
1330b57cec5SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
1340b57cec5SDimitry Andric       RegionInfo *Region = getRegionInfo(I);
1355f757f3fSDimitry Andric 
136fe6060f1SDimitry Andric       // The actual start of a region is offset by a random number of pages
137fe6060f1SDimitry Andric       // when PrimaryEnableRandomOffset is set.
1385f757f3fSDimitry Andric       Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
13906c3fb27SDimitry Andric                           (Config::Primary::EnableRandomOffset
140fe6060f1SDimitry Andric                                ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
141fe6060f1SDimitry Andric                                : 0);
1425ffd83dbSDimitry Andric       Region->RandState = getRandomU32(&Seed);
14306c3fb27SDimitry Andric       // Releasing small blocks is expensive, set a higher threshold to avoid
14406c3fb27SDimitry Andric       // frequent page releases.
14506c3fb27SDimitry Andric       if (isSmallBlock(getSizeByClassId(I)))
14606c3fb27SDimitry Andric         Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
14706c3fb27SDimitry Andric       else
14806c3fb27SDimitry Andric         Region->TryReleaseThreshold = PageSize;
1495ffd83dbSDimitry Andric       Region->ReleaseInfo.LastReleaseAtNs = Time;
15006c3fb27SDimitry Andric 
15106c3fb27SDimitry Andric       Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
1525f757f3fSDimitry Andric           PrimaryBase + (I << RegionSizeLog), RegionSize);
15306c3fb27SDimitry Andric       CHECK(Region->MemMapInfo.MemMap.isAllocated());
1540b57cec5SDimitry Andric     }
15506c3fb27SDimitry Andric     shuffle(RegionInfoArray, NumClasses, &Seed);
15606c3fb27SDimitry Andric 
1575f757f3fSDimitry Andric     // The binding should be done after region shuffling so that it won't bind
1585f757f3fSDimitry Andric     // the FLLock from the wrong region.
1595f757f3fSDimitry Andric     for (uptr I = 0; I < NumClasses; I++)
1605f757f3fSDimitry Andric       getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock);
1615f757f3fSDimitry Andric 
162e8d8bef9SDimitry Andric     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
1630b57cec5SDimitry Andric   }
1640b57cec5SDimitry Andric 
unmapTestOnly()16506c3fb27SDimitry Andric   void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
166fe6060f1SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
167fe6060f1SDimitry Andric       RegionInfo *Region = getRegionInfo(I);
168fe6060f1SDimitry Andric       *Region = {};
169fe6060f1SDimitry Andric     }
17081ad6265SDimitry Andric     if (PrimaryBase)
17106c3fb27SDimitry Andric       ReservedMemory.release();
172fe6060f1SDimitry Andric     PrimaryBase = 0U;
1730b57cec5SDimitry Andric   }
1740b57cec5SDimitry Andric 
17506c3fb27SDimitry Andric   // When all blocks are freed, it has to be the same size as `AllocatedUser`.
verifyAllBlocksAreReleasedTestOnly()17606c3fb27SDimitry Andric   void verifyAllBlocksAreReleasedTestOnly() {
17706c3fb27SDimitry Andric     // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
17806c3fb27SDimitry Andric     uptr BatchClassUsedInFreeLists = 0;
17906c3fb27SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
18006c3fb27SDimitry Andric       // We have to count BatchClassUsedInFreeLists in other regions first.
18106c3fb27SDimitry Andric       if (I == SizeClassMap::BatchClassId)
18206c3fb27SDimitry Andric         continue;
18306c3fb27SDimitry Andric       RegionInfo *Region = getRegionInfo(I);
18406c3fb27SDimitry Andric       ScopedLock ML(Region->MMLock);
18506c3fb27SDimitry Andric       ScopedLock FL(Region->FLLock);
18606c3fb27SDimitry Andric       const uptr BlockSize = getSizeByClassId(I);
18706c3fb27SDimitry Andric       uptr TotalBlocks = 0;
1885f757f3fSDimitry Andric       for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
18906c3fb27SDimitry Andric         // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
19006c3fb27SDimitry Andric         BatchClassUsedInFreeLists += BG.Batches.size() + 1;
19106c3fb27SDimitry Andric         for (const auto &It : BG.Batches)
19206c3fb27SDimitry Andric           TotalBlocks += It.getCount();
19306c3fb27SDimitry Andric       }
19406c3fb27SDimitry Andric 
19506c3fb27SDimitry Andric       DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize);
19606c3fb27SDimitry Andric       DCHECK_EQ(Region->FreeListInfo.PushedBlocks,
19706c3fb27SDimitry Andric                 Region->FreeListInfo.PoppedBlocks);
19806c3fb27SDimitry Andric     }
19906c3fb27SDimitry Andric 
20006c3fb27SDimitry Andric     RegionInfo *Region = getRegionInfo(SizeClassMap::BatchClassId);
20106c3fb27SDimitry Andric     ScopedLock ML(Region->MMLock);
20206c3fb27SDimitry Andric     ScopedLock FL(Region->FLLock);
20306c3fb27SDimitry Andric     const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
20406c3fb27SDimitry Andric     uptr TotalBlocks = 0;
2055f757f3fSDimitry Andric     for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
20606c3fb27SDimitry Andric       if (LIKELY(!BG.Batches.empty())) {
20706c3fb27SDimitry Andric         for (const auto &It : BG.Batches)
20806c3fb27SDimitry Andric           TotalBlocks += It.getCount();
20906c3fb27SDimitry Andric       } else {
21006c3fb27SDimitry Andric         // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
21106c3fb27SDimitry Andric         // itself.
21206c3fb27SDimitry Andric         ++TotalBlocks;
21306c3fb27SDimitry Andric       }
21406c3fb27SDimitry Andric     }
21506c3fb27SDimitry Andric     DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
21606c3fb27SDimitry Andric               Region->MemMapInfo.AllocatedUser / BlockSize);
21706c3fb27SDimitry Andric     DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
21806c3fb27SDimitry Andric               Region->FreeListInfo.PushedBlocks);
21906c3fb27SDimitry Andric     const uptr BlocksInUse =
22006c3fb27SDimitry Andric         Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
22106c3fb27SDimitry Andric     DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
22206c3fb27SDimitry Andric   }
22306c3fb27SDimitry Andric 
2245f757f3fSDimitry Andric   // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
2255f757f3fSDimitry Andric   // count. Now it's the same as the number of blocks stored in the
2265f757f3fSDimitry Andric   // `TransferBatch`.
popBlocks(CacheT * C,uptr ClassId,CompactPtrT * ToArray,UNUSED const u16 MaxBlockCount)2275f757f3fSDimitry Andric   u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
2285f757f3fSDimitry Andric                 UNUSED const u16 MaxBlockCount) {
2295f757f3fSDimitry Andric     TransferBatchT *B = popBatch(C, ClassId);
2305f757f3fSDimitry Andric     if (!B)
2315f757f3fSDimitry Andric       return 0;
2325f757f3fSDimitry Andric 
2335f757f3fSDimitry Andric     const u16 Count = B->getCount();
2345f757f3fSDimitry Andric     DCHECK_GT(Count, 0U);
2355f757f3fSDimitry Andric     B->moveToArray(ToArray);
2365f757f3fSDimitry Andric 
2375f757f3fSDimitry Andric     if (ClassId != SizeClassMap::BatchClassId)
2385f757f3fSDimitry Andric       C->deallocate(SizeClassMap::BatchClassId, B);
2395f757f3fSDimitry Andric 
2405f757f3fSDimitry Andric     return Count;
2415f757f3fSDimitry Andric   }
2425f757f3fSDimitry Andric 
popBatch(CacheT * C,uptr ClassId)2435f757f3fSDimitry Andric   TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
2440b57cec5SDimitry Andric     DCHECK_LT(ClassId, NumClasses);
2450b57cec5SDimitry Andric     RegionInfo *Region = getRegionInfo(ClassId);
24606c3fb27SDimitry Andric 
24706c3fb27SDimitry Andric     {
24806c3fb27SDimitry Andric       ScopedLock L(Region->FLLock);
2495f757f3fSDimitry Andric       TransferBatchT *B = popBatchImpl(C, ClassId, Region);
25006c3fb27SDimitry Andric       if (LIKELY(B))
25106c3fb27SDimitry Andric         return B;
2520b57cec5SDimitry Andric     }
25306c3fb27SDimitry Andric 
2545f757f3fSDimitry Andric     bool ReportRegionExhausted = false;
2555f757f3fSDimitry Andric     TransferBatchT *B = nullptr;
25606c3fb27SDimitry Andric 
2575f757f3fSDimitry Andric     if (conditionVariableEnabled()) {
2585f757f3fSDimitry Andric       B = popBatchWithCV(C, ClassId, Region, ReportRegionExhausted);
2595f757f3fSDimitry Andric     } else {
26006c3fb27SDimitry Andric       while (true) {
2615f757f3fSDimitry Andric         // When two threads compete for `Region->MMLock`, we only want one of
2625f757f3fSDimitry Andric         // them to call populateFreeListAndPopBatch(). To avoid both of them
2635f757f3fSDimitry Andric         // doing that, always check the freelist before mapping new pages.
26406c3fb27SDimitry Andric         ScopedLock ML(Region->MMLock);
26506c3fb27SDimitry Andric         {
26606c3fb27SDimitry Andric           ScopedLock FL(Region->FLLock);
2675f757f3fSDimitry Andric           if ((B = popBatchImpl(C, ClassId, Region)))
2685f757f3fSDimitry Andric             break;
26906c3fb27SDimitry Andric         }
27006c3fb27SDimitry Andric 
27106c3fb27SDimitry Andric         const bool RegionIsExhausted = Region->Exhausted;
27206c3fb27SDimitry Andric         if (!RegionIsExhausted)
27306c3fb27SDimitry Andric           B = populateFreeListAndPopBatch(C, ClassId, Region);
2745f757f3fSDimitry Andric         ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
27506c3fb27SDimitry Andric         break;
27606c3fb27SDimitry Andric       }
2775f757f3fSDimitry Andric     }
27806c3fb27SDimitry Andric 
2795f757f3fSDimitry Andric     if (UNLIKELY(ReportRegionExhausted)) {
2805f757f3fSDimitry Andric       Printf("Can't populate more pages for size class %zu.\n",
2815f757f3fSDimitry Andric              getSizeByClassId(ClassId));
28206c3fb27SDimitry Andric 
28306c3fb27SDimitry Andric       // Theoretically, BatchClass shouldn't be used up. Abort immediately  when
28406c3fb27SDimitry Andric       // it happens.
28506c3fb27SDimitry Andric       if (ClassId == SizeClassMap::BatchClassId)
28606c3fb27SDimitry Andric         reportOutOfBatchClass();
28706c3fb27SDimitry Andric     }
28806c3fb27SDimitry Andric 
2890b57cec5SDimitry Andric     return B;
2900b57cec5SDimitry Andric   }
2910b57cec5SDimitry Andric 
292bdd1243dSDimitry Andric   // Push the array of free blocks to the designated batch group.
pushBlocks(CacheT * C,uptr ClassId,CompactPtrT * Array,u32 Size)293bdd1243dSDimitry Andric   void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
294bdd1243dSDimitry Andric     DCHECK_LT(ClassId, NumClasses);
295bdd1243dSDimitry Andric     DCHECK_GT(Size, 0);
296bdd1243dSDimitry Andric 
2970b57cec5SDimitry Andric     RegionInfo *Region = getRegionInfo(ClassId);
298bdd1243dSDimitry Andric     if (ClassId == SizeClassMap::BatchClassId) {
29906c3fb27SDimitry Andric       ScopedLock L(Region->FLLock);
30006c3fb27SDimitry Andric       pushBatchClassBlocks(Region, Array, Size);
3015f757f3fSDimitry Andric       if (conditionVariableEnabled())
3025f757f3fSDimitry Andric         Region->FLLockCV.notifyAll(Region->FLLock);
303bdd1243dSDimitry Andric       return;
304bdd1243dSDimitry Andric     }
305bdd1243dSDimitry Andric 
306bdd1243dSDimitry Andric     // TODO(chiahungduan): Consider not doing grouping if the group size is not
307bdd1243dSDimitry Andric     // greater than the block size with a certain scale.
308bdd1243dSDimitry Andric 
309bdd1243dSDimitry Andric     bool SameGroup = true;
3105f757f3fSDimitry Andric     if (GroupSizeLog < RegionSizeLog) {
3115f757f3fSDimitry Andric       // Sort the blocks so that blocks belonging to the same group can be
3125f757f3fSDimitry Andric       // pushed together.
313bdd1243dSDimitry Andric       for (u32 I = 1; I < Size; ++I) {
314bdd1243dSDimitry Andric         if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
315bdd1243dSDimitry Andric           SameGroup = false;
316bdd1243dSDimitry Andric         CompactPtrT Cur = Array[I];
317bdd1243dSDimitry Andric         u32 J = I;
318bdd1243dSDimitry Andric         while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
319bdd1243dSDimitry Andric           Array[J] = Array[J - 1];
320bdd1243dSDimitry Andric           --J;
321bdd1243dSDimitry Andric         }
322bdd1243dSDimitry Andric         Array[J] = Cur;
323bdd1243dSDimitry Andric       }
3245f757f3fSDimitry Andric     }
325bdd1243dSDimitry Andric 
32606c3fb27SDimitry Andric     {
32706c3fb27SDimitry Andric       ScopedLock L(Region->FLLock);
32806c3fb27SDimitry Andric       pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
3295f757f3fSDimitry Andric       if (conditionVariableEnabled())
3305f757f3fSDimitry Andric         Region->FLLockCV.notifyAll(Region->FLLock);
33106c3fb27SDimitry Andric     }
3320b57cec5SDimitry Andric   }
3330b57cec5SDimitry Andric 
disable()33406c3fb27SDimitry Andric   void disable() NO_THREAD_SAFETY_ANALYSIS {
335480093f4SDimitry Andric     // The BatchClassId must be locked last since other classes can use it.
336480093f4SDimitry Andric     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
337480093f4SDimitry Andric       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
338480093f4SDimitry Andric         continue;
33906c3fb27SDimitry Andric       getRegionInfo(static_cast<uptr>(I))->MMLock.lock();
34006c3fb27SDimitry Andric       getRegionInfo(static_cast<uptr>(I))->FLLock.lock();
341480093f4SDimitry Andric     }
34206c3fb27SDimitry Andric     getRegionInfo(SizeClassMap::BatchClassId)->MMLock.lock();
34306c3fb27SDimitry Andric     getRegionInfo(SizeClassMap::BatchClassId)->FLLock.lock();
3440b57cec5SDimitry Andric   }
3450b57cec5SDimitry Andric 
enable()34606c3fb27SDimitry Andric   void enable() NO_THREAD_SAFETY_ANALYSIS {
34706c3fb27SDimitry Andric     getRegionInfo(SizeClassMap::BatchClassId)->FLLock.unlock();
34806c3fb27SDimitry Andric     getRegionInfo(SizeClassMap::BatchClassId)->MMLock.unlock();
349480093f4SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
350480093f4SDimitry Andric       if (I == SizeClassMap::BatchClassId)
351480093f4SDimitry Andric         continue;
35206c3fb27SDimitry Andric       getRegionInfo(I)->FLLock.unlock();
35306c3fb27SDimitry Andric       getRegionInfo(I)->MMLock.unlock();
354480093f4SDimitry Andric     }
3550b57cec5SDimitry Andric   }
3560b57cec5SDimitry Andric 
iterateOverBlocks(F Callback)3575ffd83dbSDimitry Andric   template <typename F> void iterateOverBlocks(F Callback) {
35868d75effSDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
35968d75effSDimitry Andric       if (I == SizeClassMap::BatchClassId)
36068d75effSDimitry Andric         continue;
36106c3fb27SDimitry Andric       RegionInfo *Region = getRegionInfo(I);
36206c3fb27SDimitry Andric       // TODO: The call of `iterateOverBlocks` requires disabling
36306c3fb27SDimitry Andric       // SizeClassAllocator64. We may consider locking each region on demand
36406c3fb27SDimitry Andric       // only.
36506c3fb27SDimitry Andric       Region->FLLock.assertHeld();
36606c3fb27SDimitry Andric       Region->MMLock.assertHeld();
3670b57cec5SDimitry Andric       const uptr BlockSize = getSizeByClassId(I);
3680b57cec5SDimitry Andric       const uptr From = Region->RegionBeg;
36906c3fb27SDimitry Andric       const uptr To = From + Region->MemMapInfo.AllocatedUser;
3700b57cec5SDimitry Andric       for (uptr Block = From; Block < To; Block += BlockSize)
3710b57cec5SDimitry Andric         Callback(Block);
3720b57cec5SDimitry Andric     }
3730b57cec5SDimitry Andric   }
3740b57cec5SDimitry Andric 
getStats(ScopedString * Str)3755ffd83dbSDimitry Andric   void getStats(ScopedString *Str) {
3760b57cec5SDimitry Andric     // TODO(kostyak): get the RSS per region.
3770b57cec5SDimitry Andric     uptr TotalMapped = 0;
3780b57cec5SDimitry Andric     uptr PoppedBlocks = 0;
3790b57cec5SDimitry Andric     uptr PushedBlocks = 0;
3800b57cec5SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
3810b57cec5SDimitry Andric       RegionInfo *Region = getRegionInfo(I);
38206c3fb27SDimitry Andric       {
38306c3fb27SDimitry Andric         ScopedLock L(Region->MMLock);
38406c3fb27SDimitry Andric         TotalMapped += Region->MemMapInfo.MappedUser;
38506c3fb27SDimitry Andric       }
38606c3fb27SDimitry Andric       {
38706c3fb27SDimitry Andric         ScopedLock L(Region->FLLock);
38806c3fb27SDimitry Andric         PoppedBlocks += Region->FreeListInfo.PoppedBlocks;
38906c3fb27SDimitry Andric         PushedBlocks += Region->FreeListInfo.PushedBlocks;
39006c3fb27SDimitry Andric       }
3910b57cec5SDimitry Andric     }
392349cc55cSDimitry Andric     Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu "
39368d75effSDimitry Andric                 "allocations; remains %zu\n",
394349cc55cSDimitry Andric                 TotalMapped >> 20, 0U, PoppedBlocks,
39568d75effSDimitry Andric                 PoppedBlocks - PushedBlocks);
3960b57cec5SDimitry Andric 
39706c3fb27SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
39806c3fb27SDimitry Andric       RegionInfo *Region = getRegionInfo(I);
39906c3fb27SDimitry Andric       ScopedLock L1(Region->MMLock);
40006c3fb27SDimitry Andric       ScopedLock L2(Region->FLLock);
40106c3fb27SDimitry Andric       getStats(Str, I, Region);
40206c3fb27SDimitry Andric     }
4030b57cec5SDimitry Andric   }
4040b57cec5SDimitry Andric 
getFragmentationInfo(ScopedString * Str)4055f757f3fSDimitry Andric   void getFragmentationInfo(ScopedString *Str) {
4065f757f3fSDimitry Andric     Str->append(
4075f757f3fSDimitry Andric         "Fragmentation Stats: SizeClassAllocator64: page size = %zu bytes\n",
4085f757f3fSDimitry Andric         getPageSizeCached());
4095f757f3fSDimitry Andric 
4105f757f3fSDimitry Andric     for (uptr I = 1; I < NumClasses; I++) {
4115f757f3fSDimitry Andric       RegionInfo *Region = getRegionInfo(I);
4125f757f3fSDimitry Andric       ScopedLock L(Region->MMLock);
4135f757f3fSDimitry Andric       getRegionFragmentationInfo(Region, I, Str);
4145f757f3fSDimitry Andric     }
4155f757f3fSDimitry Andric   }
4165f757f3fSDimitry Andric 
setOption(Option O,sptr Value)417e8d8bef9SDimitry Andric   bool setOption(Option O, sptr Value) {
418e8d8bef9SDimitry Andric     if (O == Option::ReleaseInterval) {
41906c3fb27SDimitry Andric       const s32 Interval = Max(Min(static_cast<s32>(Value),
42006c3fb27SDimitry Andric                                    Config::Primary::MaxReleaseToOsIntervalMs),
42106c3fb27SDimitry Andric                                Config::Primary::MinReleaseToOsIntervalMs);
422e8d8bef9SDimitry Andric       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
423e8d8bef9SDimitry Andric       return true;
4245ffd83dbSDimitry Andric     }
425e8d8bef9SDimitry Andric     // Not supported by the Primary, but not an error either.
426e8d8bef9SDimitry Andric     return true;
4275ffd83dbSDimitry Andric   }
4285ffd83dbSDimitry Andric 
tryReleaseToOS(uptr ClassId,ReleaseToOS ReleaseType)42906c3fb27SDimitry Andric   uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
43006c3fb27SDimitry Andric     RegionInfo *Region = getRegionInfo(ClassId);
43106c3fb27SDimitry Andric     // Note that the tryLock() may fail spuriously, given that it should rarely
43206c3fb27SDimitry Andric     // happen and page releasing is fine to skip, we don't take certain
43306c3fb27SDimitry Andric     // approaches to ensure one page release is done.
43406c3fb27SDimitry Andric     if (Region->MMLock.tryLock()) {
43506c3fb27SDimitry Andric       uptr BytesReleased = releaseToOSMaybe(Region, ClassId, ReleaseType);
43606c3fb27SDimitry Andric       Region->MMLock.unlock();
43706c3fb27SDimitry Andric       return BytesReleased;
43806c3fb27SDimitry Andric     }
43906c3fb27SDimitry Andric     return 0;
44006c3fb27SDimitry Andric   }
44106c3fb27SDimitry Andric 
releaseToOS(ReleaseToOS ReleaseType)44206c3fb27SDimitry Andric   uptr releaseToOS(ReleaseToOS ReleaseType) {
44368d75effSDimitry Andric     uptr TotalReleasedBytes = 0;
4440b57cec5SDimitry Andric     for (uptr I = 0; I < NumClasses; I++) {
445e8d8bef9SDimitry Andric       if (I == SizeClassMap::BatchClassId)
446e8d8bef9SDimitry Andric         continue;
4470b57cec5SDimitry Andric       RegionInfo *Region = getRegionInfo(I);
44806c3fb27SDimitry Andric       ScopedLock L(Region->MMLock);
44906c3fb27SDimitry Andric       TotalReleasedBytes += releaseToOSMaybe(Region, I, ReleaseType);
4500b57cec5SDimitry Andric     }
45168d75effSDimitry Andric     return TotalReleasedBytes;
4520b57cec5SDimitry Andric   }
4530b57cec5SDimitry Andric 
getRegionInfoArrayAddress()4545ffd83dbSDimitry Andric   const char *getRegionInfoArrayAddress() const {
4555ffd83dbSDimitry Andric     return reinterpret_cast<const char *>(RegionInfoArray);
4565ffd83dbSDimitry Andric   }
4575ffd83dbSDimitry Andric 
getRegionInfoArraySize()458e8d8bef9SDimitry Andric   static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
4595ffd83dbSDimitry Andric 
getCompactPtrBaseByClassId(uptr ClassId)460fe6060f1SDimitry Andric   uptr getCompactPtrBaseByClassId(uptr ClassId) {
461fe6060f1SDimitry Andric     return getRegionInfo(ClassId)->RegionBeg;
462fe6060f1SDimitry Andric   }
463fe6060f1SDimitry Andric 
compactPtr(uptr ClassId,uptr Ptr)464fe6060f1SDimitry Andric   CompactPtrT compactPtr(uptr ClassId, uptr Ptr) {
465fe6060f1SDimitry Andric     DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
466fe6060f1SDimitry Andric     return compactPtrInternal(getCompactPtrBaseByClassId(ClassId), Ptr);
467fe6060f1SDimitry Andric   }
468fe6060f1SDimitry Andric 
decompactPtr(uptr ClassId,CompactPtrT CompactPtr)469fe6060f1SDimitry Andric   void *decompactPtr(uptr ClassId, CompactPtrT CompactPtr) {
470fe6060f1SDimitry Andric     DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
471fe6060f1SDimitry Andric     return reinterpret_cast<void *>(
472fe6060f1SDimitry Andric         decompactPtrInternal(getCompactPtrBaseByClassId(ClassId), CompactPtr));
473fe6060f1SDimitry Andric   }
474fe6060f1SDimitry Andric 
findNearestBlock(const char * RegionInfoData,uptr Ptr)47506c3fb27SDimitry Andric   static BlockInfo findNearestBlock(const char *RegionInfoData,
47606c3fb27SDimitry Andric                                     uptr Ptr) NO_THREAD_SAFETY_ANALYSIS {
4775ffd83dbSDimitry Andric     const RegionInfo *RegionInfoArray =
4785ffd83dbSDimitry Andric         reinterpret_cast<const RegionInfo *>(RegionInfoData);
47906c3fb27SDimitry Andric 
4805ffd83dbSDimitry Andric     uptr ClassId;
4815ffd83dbSDimitry Andric     uptr MinDistance = -1UL;
4825ffd83dbSDimitry Andric     for (uptr I = 0; I != NumClasses; ++I) {
4835ffd83dbSDimitry Andric       if (I == SizeClassMap::BatchClassId)
4845ffd83dbSDimitry Andric         continue;
4855ffd83dbSDimitry Andric       uptr Begin = RegionInfoArray[I].RegionBeg;
48606c3fb27SDimitry Andric       // TODO(chiahungduan): In fact, We need to lock the RegionInfo::MMLock.
48706c3fb27SDimitry Andric       // However, the RegionInfoData is passed with const qualifier and lock the
48806c3fb27SDimitry Andric       // mutex requires modifying RegionInfoData, which means we need to remove
48906c3fb27SDimitry Andric       // the const qualifier. This may lead to another undefined behavior (The
49006c3fb27SDimitry Andric       // first one is accessing `AllocatedUser` without locking. It's better to
49106c3fb27SDimitry Andric       // pass `RegionInfoData` as `void *` then we can lock the mutex properly.
49206c3fb27SDimitry Andric       uptr End = Begin + RegionInfoArray[I].MemMapInfo.AllocatedUser;
4935ffd83dbSDimitry Andric       if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
4945ffd83dbSDimitry Andric         continue;
4955ffd83dbSDimitry Andric       uptr RegionDistance;
4965ffd83dbSDimitry Andric       if (Begin <= Ptr) {
4975ffd83dbSDimitry Andric         if (Ptr < End)
4985ffd83dbSDimitry Andric           RegionDistance = 0;
4995ffd83dbSDimitry Andric         else
5005ffd83dbSDimitry Andric           RegionDistance = Ptr - End;
5015ffd83dbSDimitry Andric       } else {
5025ffd83dbSDimitry Andric         RegionDistance = Begin - Ptr;
5035ffd83dbSDimitry Andric       }
5045ffd83dbSDimitry Andric 
5055ffd83dbSDimitry Andric       if (RegionDistance < MinDistance) {
5065ffd83dbSDimitry Andric         MinDistance = RegionDistance;
5075ffd83dbSDimitry Andric         ClassId = I;
5085ffd83dbSDimitry Andric       }
5095ffd83dbSDimitry Andric     }
5105ffd83dbSDimitry Andric 
5115ffd83dbSDimitry Andric     BlockInfo B = {};
5125ffd83dbSDimitry Andric     if (MinDistance <= 8192) {
5135ffd83dbSDimitry Andric       B.RegionBegin = RegionInfoArray[ClassId].RegionBeg;
51406c3fb27SDimitry Andric       B.RegionEnd =
51506c3fb27SDimitry Andric           B.RegionBegin + RegionInfoArray[ClassId].MemMapInfo.AllocatedUser;
5165ffd83dbSDimitry Andric       B.BlockSize = SizeClassMap::getSizeByClassId(ClassId);
5175ffd83dbSDimitry Andric       B.BlockBegin =
5185ffd83dbSDimitry Andric           B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) *
5195ffd83dbSDimitry Andric                                sptr(B.BlockSize));
5205ffd83dbSDimitry Andric       while (B.BlockBegin < B.RegionBegin)
5215ffd83dbSDimitry Andric         B.BlockBegin += B.BlockSize;
5225ffd83dbSDimitry Andric       while (B.RegionEnd < B.BlockBegin + B.BlockSize)
5235ffd83dbSDimitry Andric         B.BlockBegin -= B.BlockSize;
5245ffd83dbSDimitry Andric     }
5255ffd83dbSDimitry Andric     return B;
5265ffd83dbSDimitry Andric   }
5275ffd83dbSDimitry Andric 
528e8d8bef9SDimitry Andric   AtomicOptions Options;
529e8d8bef9SDimitry Andric 
5300b57cec5SDimitry Andric private:
5315f757f3fSDimitry Andric   static const uptr RegionSize = 1UL << RegionSizeLog;
5320b57cec5SDimitry Andric   static const uptr NumClasses = SizeClassMap::NumClasses;
5330b57cec5SDimitry Andric   static const uptr PrimarySize = RegionSize * NumClasses;
5340b57cec5SDimitry Andric 
53506c3fb27SDimitry Andric   static const uptr MapSizeIncrement = Config::Primary::MapSizeIncrement;
536480093f4SDimitry Andric   // Fill at most this number of batches from the newly map'd memory.
5375ffd83dbSDimitry Andric   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
5380b57cec5SDimitry Andric 
5390b57cec5SDimitry Andric   struct ReleaseToOsInfo {
54006c3fb27SDimitry Andric     uptr BytesInFreeListAtLastCheckpoint;
5410b57cec5SDimitry Andric     uptr RangesReleased;
5420b57cec5SDimitry Andric     uptr LastReleasedBytes;
5430b57cec5SDimitry Andric     u64 LastReleaseAtNs;
5440b57cec5SDimitry Andric   };
5450b57cec5SDimitry Andric 
54606c3fb27SDimitry Andric   struct BlocksInfo {
5475f757f3fSDimitry Andric     SinglyLinkedList<BatchGroupT> BlockList = {};
54806c3fb27SDimitry Andric     uptr PoppedBlocks = 0;
54906c3fb27SDimitry Andric     uptr PushedBlocks = 0;
55006c3fb27SDimitry Andric   };
55106c3fb27SDimitry Andric 
55206c3fb27SDimitry Andric   struct PagesInfo {
55306c3fb27SDimitry Andric     MemMapT MemMap = {};
55406c3fb27SDimitry Andric     // Bytes mapped for user memory.
55506c3fb27SDimitry Andric     uptr MappedUser = 0;
55606c3fb27SDimitry Andric     // Bytes allocated for user memory.
55706c3fb27SDimitry Andric     uptr AllocatedUser = 0;
55806c3fb27SDimitry Andric   };
55906c3fb27SDimitry Andric 
5605ffd83dbSDimitry Andric   struct UnpaddedRegionInfo {
56106c3fb27SDimitry Andric     // Mutex for operations on freelist
56206c3fb27SDimitry Andric     HybridMutex FLLock;
5635f757f3fSDimitry Andric     ConditionVariableT FLLockCV GUARDED_BY(FLLock);
56406c3fb27SDimitry Andric     // Mutex for memmap operations
56506c3fb27SDimitry Andric     HybridMutex MMLock ACQUIRED_BEFORE(FLLock);
56606c3fb27SDimitry Andric     // `RegionBeg` is initialized before thread creation and won't be changed.
567fe6060f1SDimitry Andric     uptr RegionBeg = 0;
56806c3fb27SDimitry Andric     u32 RandState GUARDED_BY(MMLock) = 0;
56906c3fb27SDimitry Andric     BlocksInfo FreeListInfo GUARDED_BY(FLLock);
57006c3fb27SDimitry Andric     PagesInfo MemMapInfo GUARDED_BY(MMLock);
57106c3fb27SDimitry Andric     // The minimum size of pushed blocks to trigger page release.
57206c3fb27SDimitry Andric     uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0;
57306c3fb27SDimitry Andric     ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {};
57406c3fb27SDimitry Andric     bool Exhausted GUARDED_BY(MMLock) = false;
5755f757f3fSDimitry Andric     bool isPopulatingFreeList GUARDED_BY(FLLock) = false;
5760b57cec5SDimitry Andric   };
5775ffd83dbSDimitry Andric   struct RegionInfo : UnpaddedRegionInfo {
5785ffd83dbSDimitry Andric     char Padding[SCUDO_CACHE_LINE_SIZE -
579fe6060f1SDimitry Andric                  (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)] = {};
5805ffd83dbSDimitry Andric   };
581480093f4SDimitry Andric   static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
5820b57cec5SDimitry Andric 
getRegionInfo(uptr ClassId)5835ffd83dbSDimitry Andric   RegionInfo *getRegionInfo(uptr ClassId) {
5840b57cec5SDimitry Andric     DCHECK_LT(ClassId, NumClasses);
5850b57cec5SDimitry Andric     return &RegionInfoArray[ClassId];
5860b57cec5SDimitry Andric   }
5870b57cec5SDimitry Andric 
getRegionBaseByClassId(uptr ClassId)58806c3fb27SDimitry Andric   uptr getRegionBaseByClassId(uptr ClassId) {
58906c3fb27SDimitry Andric     return roundDown(getRegionInfo(ClassId)->RegionBeg - PrimaryBase,
59006c3fb27SDimitry Andric                      RegionSize) +
59106c3fb27SDimitry Andric            PrimaryBase;
5920b57cec5SDimitry Andric   }
5930b57cec5SDimitry Andric 
compactPtrInternal(uptr Base,uptr Ptr)594fe6060f1SDimitry Andric   static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
595fe6060f1SDimitry Andric     return static_cast<CompactPtrT>((Ptr - Base) >> CompactPtrScale);
596fe6060f1SDimitry Andric   }
597fe6060f1SDimitry Andric 
decompactPtrInternal(uptr Base,CompactPtrT CompactPtr)598fe6060f1SDimitry Andric   static uptr decompactPtrInternal(uptr Base, CompactPtrT CompactPtr) {
599fe6060f1SDimitry Andric     return Base + (static_cast<uptr>(CompactPtr) << CompactPtrScale);
600fe6060f1SDimitry Andric   }
601fe6060f1SDimitry Andric 
compactPtrGroup(CompactPtrT CompactPtr)602bdd1243dSDimitry Andric   static uptr compactPtrGroup(CompactPtrT CompactPtr) {
60306c3fb27SDimitry Andric     const uptr Mask = (static_cast<uptr>(1) << GroupScale) - 1;
60406c3fb27SDimitry Andric     return static_cast<uptr>(CompactPtr) & ~Mask;
605bdd1243dSDimitry Andric   }
decompactGroupBase(uptr Base,uptr CompactPtrGroupBase)60606c3fb27SDimitry Andric   static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBase) {
60706c3fb27SDimitry Andric     DCHECK_EQ(CompactPtrGroupBase % (static_cast<uptr>(1) << (GroupScale)), 0U);
60806c3fb27SDimitry Andric     return Base + (CompactPtrGroupBase << CompactPtrScale);
60906c3fb27SDimitry Andric   }
61006c3fb27SDimitry Andric 
isSmallBlock(uptr BlockSize)61106c3fb27SDimitry Andric   ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
61206c3fb27SDimitry Andric     const uptr PageSize = getPageSizeCached();
61306c3fb27SDimitry Andric     return BlockSize < PageSize / 16U;
61406c3fb27SDimitry Andric   }
61506c3fb27SDimitry Andric 
isLargeBlock(uptr BlockSize)61606c3fb27SDimitry Andric   ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
61706c3fb27SDimitry Andric     const uptr PageSize = getPageSizeCached();
61806c3fb27SDimitry Andric     return BlockSize > PageSize;
61906c3fb27SDimitry Andric   }
62006c3fb27SDimitry Andric 
pushBatchClassBlocks(RegionInfo * Region,CompactPtrT * Array,u32 Size)62106c3fb27SDimitry Andric   void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
62206c3fb27SDimitry Andric       REQUIRES(Region->FLLock) {
62306c3fb27SDimitry Andric     DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId));
62406c3fb27SDimitry Andric 
62506c3fb27SDimitry Andric     // Free blocks are recorded by TransferBatch in freelist for all
62606c3fb27SDimitry Andric     // size-classes. In addition, TransferBatch is allocated from BatchClassId.
62706c3fb27SDimitry Andric     // In order not to use additional block to record the free blocks in
62806c3fb27SDimitry Andric     // BatchClassId, they are self-contained. I.e., A TransferBatch records the
62906c3fb27SDimitry Andric     // block address of itself. See the figure below:
63006c3fb27SDimitry Andric     //
63106c3fb27SDimitry Andric     // TransferBatch at 0xABCD
63206c3fb27SDimitry Andric     // +----------------------------+
63306c3fb27SDimitry Andric     // | Free blocks' addr          |
63406c3fb27SDimitry Andric     // | +------+------+------+     |
63506c3fb27SDimitry Andric     // | |0xABCD|...   |...   |     |
63606c3fb27SDimitry Andric     // | +------+------+------+     |
63706c3fb27SDimitry Andric     // +----------------------------+
63806c3fb27SDimitry Andric     //
63906c3fb27SDimitry Andric     // When we allocate all the free blocks in the TransferBatch, the block used
64006c3fb27SDimitry Andric     // by TransferBatch is also free for use. We don't need to recycle the
64106c3fb27SDimitry Andric     // TransferBatch. Note that the correctness is maintained by the invariant,
64206c3fb27SDimitry Andric     //
64306c3fb27SDimitry Andric     //   The unit of each popBatch() request is entire TransferBatch. Return
64406c3fb27SDimitry Andric     //   part of the blocks in a TransferBatch is invalid.
64506c3fb27SDimitry Andric     //
64606c3fb27SDimitry Andric     // This ensures that TransferBatch won't leak the address itself while it's
64706c3fb27SDimitry Andric     // still holding other valid data.
64806c3fb27SDimitry Andric     //
64906c3fb27SDimitry Andric     // Besides, BatchGroup is also allocated from BatchClassId and has its
65006c3fb27SDimitry Andric     // address recorded in the TransferBatch too. To maintain the correctness,
65106c3fb27SDimitry Andric     //
65206c3fb27SDimitry Andric     //   The address of BatchGroup is always recorded in the last TransferBatch
65306c3fb27SDimitry Andric     //   in the freelist (also imply that the freelist should only be
65406c3fb27SDimitry Andric     //   updated with push_front). Once the last TransferBatch is popped,
65506c3fb27SDimitry Andric     //   the block used by BatchGroup is also free for use.
65606c3fb27SDimitry Andric     //
65706c3fb27SDimitry Andric     // With this approach, the blocks used by BatchGroup and TransferBatch are
65806c3fb27SDimitry Andric     // reusable and don't need additional space for them.
65906c3fb27SDimitry Andric 
66006c3fb27SDimitry Andric     Region->FreeListInfo.PushedBlocks += Size;
6615f757f3fSDimitry Andric     BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
66206c3fb27SDimitry Andric 
66306c3fb27SDimitry Andric     if (BG == nullptr) {
66406c3fb27SDimitry Andric       // Construct `BatchGroup` on the last element.
6655f757f3fSDimitry Andric       BG = reinterpret_cast<BatchGroupT *>(
66606c3fb27SDimitry Andric           decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
66706c3fb27SDimitry Andric       --Size;
66806c3fb27SDimitry Andric       BG->Batches.clear();
66906c3fb27SDimitry Andric       // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
67006c3fb27SDimitry Andric       // memory group here.
67106c3fb27SDimitry Andric       BG->CompactPtrGroupBase = 0;
67206c3fb27SDimitry Andric       // `BG` is also the block of BatchClassId. Note that this is different
67306c3fb27SDimitry Andric       // from `CreateGroup` in `pushBlocksImpl`
67406c3fb27SDimitry Andric       BG->PushedBlocks = 1;
67506c3fb27SDimitry Andric       BG->BytesInBGAtLastCheckpoint = 0;
6765f757f3fSDimitry Andric       BG->MaxCachedPerBatch =
6775f757f3fSDimitry Andric           CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
67806c3fb27SDimitry Andric 
67906c3fb27SDimitry Andric       Region->FreeListInfo.BlockList.push_front(BG);
68006c3fb27SDimitry Andric     }
68106c3fb27SDimitry Andric 
68206c3fb27SDimitry Andric     if (UNLIKELY(Size == 0))
68306c3fb27SDimitry Andric       return;
68406c3fb27SDimitry Andric 
68506c3fb27SDimitry Andric     // This happens under 2 cases.
68606c3fb27SDimitry Andric     //   1. just allocated a new `BatchGroup`.
68706c3fb27SDimitry Andric     //   2. Only 1 block is pushed when the freelist is empty.
68806c3fb27SDimitry Andric     if (BG->Batches.empty()) {
68906c3fb27SDimitry Andric       // Construct the `TransferBatch` on the last element.
6905f757f3fSDimitry Andric       TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
69106c3fb27SDimitry Andric           decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
69206c3fb27SDimitry Andric       TB->clear();
69306c3fb27SDimitry Andric       // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
69406c3fb27SDimitry Andric       // recorded in the TransferBatch.
69506c3fb27SDimitry Andric       TB->add(Array[Size - 1]);
69606c3fb27SDimitry Andric       TB->add(
69706c3fb27SDimitry Andric           compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
69806c3fb27SDimitry Andric       --Size;
69906c3fb27SDimitry Andric       DCHECK_EQ(BG->PushedBlocks, 1U);
70006c3fb27SDimitry Andric       // `TB` is also the block of BatchClassId.
70106c3fb27SDimitry Andric       BG->PushedBlocks += 1;
70206c3fb27SDimitry Andric       BG->Batches.push_front(TB);
70306c3fb27SDimitry Andric     }
70406c3fb27SDimitry Andric 
7055f757f3fSDimitry Andric     TransferBatchT *CurBatch = BG->Batches.front();
70606c3fb27SDimitry Andric     DCHECK_NE(CurBatch, nullptr);
70706c3fb27SDimitry Andric 
70806c3fb27SDimitry Andric     for (u32 I = 0; I < Size;) {
70906c3fb27SDimitry Andric       u16 UnusedSlots =
71006c3fb27SDimitry Andric           static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
71106c3fb27SDimitry Andric       if (UnusedSlots == 0) {
7125f757f3fSDimitry Andric         CurBatch = reinterpret_cast<TransferBatchT *>(
71306c3fb27SDimitry Andric             decompactPtr(SizeClassMap::BatchClassId, Array[I]));
71406c3fb27SDimitry Andric         CurBatch->clear();
71506c3fb27SDimitry Andric         // Self-contained
71606c3fb27SDimitry Andric         CurBatch->add(Array[I]);
71706c3fb27SDimitry Andric         ++I;
71806c3fb27SDimitry Andric         // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
71906c3fb27SDimitry Andric         // BatchClassId.
72006c3fb27SDimitry Andric         BG->Batches.push_front(CurBatch);
72106c3fb27SDimitry Andric         UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
72206c3fb27SDimitry Andric       }
72306c3fb27SDimitry Andric       // `UnusedSlots` is u16 so the result will be also fit in u16.
72406c3fb27SDimitry Andric       const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
72506c3fb27SDimitry Andric       CurBatch->appendFromArray(&Array[I], AppendSize);
72606c3fb27SDimitry Andric       I += AppendSize;
72706c3fb27SDimitry Andric     }
72806c3fb27SDimitry Andric 
72906c3fb27SDimitry Andric     BG->PushedBlocks += Size;
730bdd1243dSDimitry Andric   }
731bdd1243dSDimitry Andric 
732bdd1243dSDimitry Andric   // Push the blocks to their batch group. The layout will be like,
733bdd1243dSDimitry Andric   //
73406c3fb27SDimitry Andric   // FreeListInfo.BlockList - > BG -> BG -> BG
735bdd1243dSDimitry Andric   //                            |     |     |
736bdd1243dSDimitry Andric   //                            v     v     v
737bdd1243dSDimitry Andric   //                            TB    TB    TB
738bdd1243dSDimitry Andric   //                            |
739bdd1243dSDimitry Andric   //                            v
740bdd1243dSDimitry Andric   //                            TB
741bdd1243dSDimitry Andric   //
742bdd1243dSDimitry Andric   // Each BlockGroup(BG) will associate with unique group id and the free blocks
743bdd1243dSDimitry Andric   // are managed by a list of TransferBatch(TB). To reduce the time of inserting
744bdd1243dSDimitry Andric   // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
745bdd1243dSDimitry Andric   // that we can get better performance of maintaining sorted property.
746bdd1243dSDimitry Andric   // Use `SameGroup=true` to indicate that all blocks in the array are from the
747bdd1243dSDimitry Andric   // same group then we will skip checking the group id of each block.
74806c3fb27SDimitry Andric   void pushBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region,
74906c3fb27SDimitry Andric                       CompactPtrT *Array, u32 Size, bool SameGroup = false)
75006c3fb27SDimitry Andric       REQUIRES(Region->FLLock) {
75106c3fb27SDimitry Andric     DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
752bdd1243dSDimitry Andric     DCHECK_GT(Size, 0U);
753bdd1243dSDimitry Andric 
75406c3fb27SDimitry Andric     auto CreateGroup = [&](uptr CompactPtrGroupBase) {
7555f757f3fSDimitry Andric       BatchGroupT *BG =
7565f757f3fSDimitry Andric           reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
757bdd1243dSDimitry Andric       BG->Batches.clear();
7585f757f3fSDimitry Andric       TransferBatchT *TB =
7595f757f3fSDimitry Andric           reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
760bdd1243dSDimitry Andric       TB->clear();
761bdd1243dSDimitry Andric 
76206c3fb27SDimitry Andric       BG->CompactPtrGroupBase = CompactPtrGroupBase;
763bdd1243dSDimitry Andric       BG->Batches.push_front(TB);
764bdd1243dSDimitry Andric       BG->PushedBlocks = 0;
76506c3fb27SDimitry Andric       BG->BytesInBGAtLastCheckpoint = 0;
7665f757f3fSDimitry Andric       BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
767bdd1243dSDimitry Andric 
768bdd1243dSDimitry Andric       return BG;
769bdd1243dSDimitry Andric     };
770bdd1243dSDimitry Andric 
7715f757f3fSDimitry Andric     auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
7725f757f3fSDimitry Andric       SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
7735f757f3fSDimitry Andric       TransferBatchT *CurBatch = Batches.front();
774bdd1243dSDimitry Andric       DCHECK_NE(CurBatch, nullptr);
775bdd1243dSDimitry Andric 
776bdd1243dSDimitry Andric       for (u32 I = 0; I < Size;) {
777bdd1243dSDimitry Andric         DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
778bdd1243dSDimitry Andric         u16 UnusedSlots =
779bdd1243dSDimitry Andric             static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
780bdd1243dSDimitry Andric         if (UnusedSlots == 0) {
7815f757f3fSDimitry Andric           CurBatch =
7825f757f3fSDimitry Andric               reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
783bdd1243dSDimitry Andric           CurBatch->clear();
784bdd1243dSDimitry Andric           Batches.push_front(CurBatch);
785bdd1243dSDimitry Andric           UnusedSlots = BG->MaxCachedPerBatch;
786bdd1243dSDimitry Andric         }
787bdd1243dSDimitry Andric         // `UnusedSlots` is u16 so the result will be also fit in u16.
788bdd1243dSDimitry Andric         u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
789bdd1243dSDimitry Andric         CurBatch->appendFromArray(&Array[I], AppendSize);
790bdd1243dSDimitry Andric         I += AppendSize;
791bdd1243dSDimitry Andric       }
792bdd1243dSDimitry Andric 
793bdd1243dSDimitry Andric       BG->PushedBlocks += Size;
794bdd1243dSDimitry Andric     };
795bdd1243dSDimitry Andric 
79606c3fb27SDimitry Andric     Region->FreeListInfo.PushedBlocks += Size;
7975f757f3fSDimitry Andric     BatchGroupT *Cur = Region->FreeListInfo.BlockList.front();
798bdd1243dSDimitry Andric 
799bdd1243dSDimitry Andric     // In the following, `Cur` always points to the BatchGroup for blocks that
800bdd1243dSDimitry Andric     // will be pushed next. `Prev` is the element right before `Cur`.
8015f757f3fSDimitry Andric     BatchGroupT *Prev = nullptr;
802bdd1243dSDimitry Andric 
80306c3fb27SDimitry Andric     while (Cur != nullptr &&
80406c3fb27SDimitry Andric            compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) {
805bdd1243dSDimitry Andric       Prev = Cur;
806bdd1243dSDimitry Andric       Cur = Cur->Next;
807bdd1243dSDimitry Andric     }
808bdd1243dSDimitry Andric 
80906c3fb27SDimitry Andric     if (Cur == nullptr ||
81006c3fb27SDimitry Andric         compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) {
811bdd1243dSDimitry Andric       Cur = CreateGroup(compactPtrGroup(Array[0]));
812bdd1243dSDimitry Andric       if (Prev == nullptr)
81306c3fb27SDimitry Andric         Region->FreeListInfo.BlockList.push_front(Cur);
814bdd1243dSDimitry Andric       else
81506c3fb27SDimitry Andric         Region->FreeListInfo.BlockList.insert(Prev, Cur);
816bdd1243dSDimitry Andric     }
817bdd1243dSDimitry Andric 
818bdd1243dSDimitry Andric     // All the blocks are from the same group, just push without checking group
819bdd1243dSDimitry Andric     // id.
820bdd1243dSDimitry Andric     if (SameGroup) {
82106c3fb27SDimitry Andric       for (u32 I = 0; I < Size; ++I)
82206c3fb27SDimitry Andric         DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase);
82306c3fb27SDimitry Andric 
824bdd1243dSDimitry Andric       InsertBlocks(Cur, Array, Size);
825bdd1243dSDimitry Andric       return;
826bdd1243dSDimitry Andric     }
827bdd1243dSDimitry Andric 
828bdd1243dSDimitry Andric     // The blocks are sorted by group id. Determine the segment of group and
829bdd1243dSDimitry Andric     // push them to their group together.
830bdd1243dSDimitry Andric     u32 Count = 1;
831bdd1243dSDimitry Andric     for (u32 I = 1; I < Size; ++I) {
832bdd1243dSDimitry Andric       if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
83306c3fb27SDimitry Andric         DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase);
834bdd1243dSDimitry Andric         InsertBlocks(Cur, Array + I - Count, Count);
835bdd1243dSDimitry Andric 
83606c3fb27SDimitry Andric         while (Cur != nullptr &&
83706c3fb27SDimitry Andric                compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBase) {
838bdd1243dSDimitry Andric           Prev = Cur;
839bdd1243dSDimitry Andric           Cur = Cur->Next;
840bdd1243dSDimitry Andric         }
841bdd1243dSDimitry Andric 
84206c3fb27SDimitry Andric         if (Cur == nullptr ||
84306c3fb27SDimitry Andric             compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) {
844bdd1243dSDimitry Andric           Cur = CreateGroup(compactPtrGroup(Array[I]));
845bdd1243dSDimitry Andric           DCHECK_NE(Prev, nullptr);
84606c3fb27SDimitry Andric           Region->FreeListInfo.BlockList.insert(Prev, Cur);
847bdd1243dSDimitry Andric         }
848bdd1243dSDimitry Andric 
849bdd1243dSDimitry Andric         Count = 1;
850bdd1243dSDimitry Andric       } else {
851bdd1243dSDimitry Andric         ++Count;
852bdd1243dSDimitry Andric       }
853bdd1243dSDimitry Andric     }
854bdd1243dSDimitry Andric 
855bdd1243dSDimitry Andric     InsertBlocks(Cur, Array + Size - Count, Count);
856bdd1243dSDimitry Andric   }
857bdd1243dSDimitry Andric 
popBatchWithCV(CacheT * C,uptr ClassId,RegionInfo * Region,bool & ReportRegionExhausted)8585f757f3fSDimitry Andric   TransferBatchT *popBatchWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
8595f757f3fSDimitry Andric                                  bool &ReportRegionExhausted) {
8605f757f3fSDimitry Andric     TransferBatchT *B = nullptr;
8615f757f3fSDimitry Andric 
8625f757f3fSDimitry Andric     while (true) {
8635f757f3fSDimitry Andric       // We only expect one thread doing the freelist refillment and other
8645f757f3fSDimitry Andric       // threads will be waiting for either the completion of the
8655f757f3fSDimitry Andric       // `populateFreeListAndPopBatch()` or `pushBlocks()` called by other
8665f757f3fSDimitry Andric       // threads.
8675f757f3fSDimitry Andric       bool PopulateFreeList = false;
8685f757f3fSDimitry Andric       {
8695f757f3fSDimitry Andric         ScopedLock FL(Region->FLLock);
8705f757f3fSDimitry Andric         if (!Region->isPopulatingFreeList) {
8715f757f3fSDimitry Andric           Region->isPopulatingFreeList = true;
8725f757f3fSDimitry Andric           PopulateFreeList = true;
8735f757f3fSDimitry Andric         }
8745f757f3fSDimitry Andric       }
8755f757f3fSDimitry Andric 
8765f757f3fSDimitry Andric       if (PopulateFreeList) {
8775f757f3fSDimitry Andric         ScopedLock ML(Region->MMLock);
8785f757f3fSDimitry Andric 
8795f757f3fSDimitry Andric         const bool RegionIsExhausted = Region->Exhausted;
8805f757f3fSDimitry Andric         if (!RegionIsExhausted)
8815f757f3fSDimitry Andric           B = populateFreeListAndPopBatch(C, ClassId, Region);
8825f757f3fSDimitry Andric         ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
8835f757f3fSDimitry Andric 
8845f757f3fSDimitry Andric         {
8855f757f3fSDimitry Andric           // Before reacquiring the `FLLock`, the freelist may be used up again
8865f757f3fSDimitry Andric           // and some threads are waiting for the freelist refillment by the
8875f757f3fSDimitry Andric           // current thread. It's important to set
8885f757f3fSDimitry Andric           // `Region->isPopulatingFreeList` to false so the threads about to
8895f757f3fSDimitry Andric           // sleep will notice the status change.
8905f757f3fSDimitry Andric           ScopedLock FL(Region->FLLock);
8915f757f3fSDimitry Andric           Region->isPopulatingFreeList = false;
8925f757f3fSDimitry Andric           Region->FLLockCV.notifyAll(Region->FLLock);
8935f757f3fSDimitry Andric         }
8945f757f3fSDimitry Andric 
8955f757f3fSDimitry Andric         break;
8965f757f3fSDimitry Andric       }
8975f757f3fSDimitry Andric 
8985f757f3fSDimitry Andric       // At here, there are two preconditions to be met before waiting,
8995f757f3fSDimitry Andric       //   1. The freelist is empty.
9005f757f3fSDimitry Andric       //   2. Region->isPopulatingFreeList == true, i.e, someone is still doing
9015f757f3fSDimitry Andric       //   `populateFreeListAndPopBatch()`.
9025f757f3fSDimitry Andric       //
9035f757f3fSDimitry Andric       // Note that it has the chance that freelist is empty but
9045f757f3fSDimitry Andric       // Region->isPopulatingFreeList == false because all the new populated
9055f757f3fSDimitry Andric       // blocks were used up right after the refillment. Therefore, we have to
9065f757f3fSDimitry Andric       // check if someone is still populating the freelist.
9075f757f3fSDimitry Andric       ScopedLock FL(Region->FLLock);
9085f757f3fSDimitry Andric       if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
9095f757f3fSDimitry Andric         break;
9105f757f3fSDimitry Andric 
9115f757f3fSDimitry Andric       if (!Region->isPopulatingFreeList)
9125f757f3fSDimitry Andric         continue;
9135f757f3fSDimitry Andric 
9145f757f3fSDimitry Andric       // Now the freelist is empty and someone's doing the refillment. We will
9155f757f3fSDimitry Andric       // wait until anyone refills the freelist or someone finishes doing
9165f757f3fSDimitry Andric       // `populateFreeListAndPopBatch()`. The refillment can be done by
9175f757f3fSDimitry Andric       // `populateFreeListAndPopBatch()`, `pushBlocks()`,
9185f757f3fSDimitry Andric       // `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
9195f757f3fSDimitry Andric       Region->FLLockCV.wait(Region->FLLock);
9205f757f3fSDimitry Andric 
9215f757f3fSDimitry Andric       if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
9225f757f3fSDimitry Andric         break;
9235f757f3fSDimitry Andric     }
9245f757f3fSDimitry Andric 
9255f757f3fSDimitry Andric     return B;
9265f757f3fSDimitry Andric   }
9275f757f3fSDimitry Andric 
928bdd1243dSDimitry Andric   // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
929bdd1243dSDimitry Andric   // group id will be considered first.
930bdd1243dSDimitry Andric   //
931bdd1243dSDimitry Andric   // The region mutex needs to be held while calling this method.
popBatchImpl(CacheT * C,uptr ClassId,RegionInfo * Region)9325f757f3fSDimitry Andric   TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
93306c3fb27SDimitry Andric       REQUIRES(Region->FLLock) {
93406c3fb27SDimitry Andric     if (Region->FreeListInfo.BlockList.empty())
935bdd1243dSDimitry Andric       return nullptr;
936bdd1243dSDimitry Andric 
9375f757f3fSDimitry Andric     SinglyLinkedList<TransferBatchT> &Batches =
93806c3fb27SDimitry Andric         Region->FreeListInfo.BlockList.front()->Batches;
93906c3fb27SDimitry Andric 
94006c3fb27SDimitry Andric     if (Batches.empty()) {
94106c3fb27SDimitry Andric       DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
9425f757f3fSDimitry Andric       BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
94306c3fb27SDimitry Andric       Region->FreeListInfo.BlockList.pop_front();
94406c3fb27SDimitry Andric 
94506c3fb27SDimitry Andric       // Block used by `BatchGroup` is from BatchClassId. Turn the block into
94606c3fb27SDimitry Andric       // `TransferBatch` with single block.
9475f757f3fSDimitry Andric       TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
94806c3fb27SDimitry Andric       TB->clear();
94906c3fb27SDimitry Andric       TB->add(
95006c3fb27SDimitry Andric           compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
95106c3fb27SDimitry Andric       Region->FreeListInfo.PoppedBlocks += 1;
95206c3fb27SDimitry Andric       return TB;
95306c3fb27SDimitry Andric     }
954bdd1243dSDimitry Andric 
9555f757f3fSDimitry Andric     TransferBatchT *B = Batches.front();
956bdd1243dSDimitry Andric     Batches.pop_front();
957bdd1243dSDimitry Andric     DCHECK_NE(B, nullptr);
958bdd1243dSDimitry Andric     DCHECK_GT(B->getCount(), 0U);
959bdd1243dSDimitry Andric 
960bdd1243dSDimitry Andric     if (Batches.empty()) {
9615f757f3fSDimitry Andric       BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
96206c3fb27SDimitry Andric       Region->FreeListInfo.BlockList.pop_front();
963bdd1243dSDimitry Andric 
964bdd1243dSDimitry Andric       // We don't keep BatchGroup with zero blocks to avoid empty-checking while
965bdd1243dSDimitry Andric       // allocating. Note that block used by constructing BatchGroup is recorded
966bdd1243dSDimitry Andric       // as free blocks in the last element of BatchGroup::Batches. Which means,
967bdd1243dSDimitry Andric       // once we pop the last TransferBatch, the block is implicitly
968bdd1243dSDimitry Andric       // deallocated.
969bdd1243dSDimitry Andric       if (ClassId != SizeClassMap::BatchClassId)
970bdd1243dSDimitry Andric         C->deallocate(SizeClassMap::BatchClassId, BG);
971bdd1243dSDimitry Andric     }
972bdd1243dSDimitry Andric 
97306c3fb27SDimitry Andric     Region->FreeListInfo.PoppedBlocks += B->getCount();
97406c3fb27SDimitry Andric 
975bdd1243dSDimitry Andric     return B;
976bdd1243dSDimitry Andric   }
977bdd1243dSDimitry Andric 
97806c3fb27SDimitry Andric   // Refill the freelist and return one batch.
populateFreeListAndPopBatch(CacheT * C,uptr ClassId,RegionInfo * Region)9795f757f3fSDimitry Andric   NOINLINE TransferBatchT *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
98006c3fb27SDimitry Andric                                                        RegionInfo *Region)
98106c3fb27SDimitry Andric       REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
9820b57cec5SDimitry Andric     const uptr Size = getSizeByClassId(ClassId);
9835f757f3fSDimitry Andric     const u16 MaxCount = CacheT::getMaxCached(Size);
9840b57cec5SDimitry Andric 
9850b57cec5SDimitry Andric     const uptr RegionBeg = Region->RegionBeg;
98606c3fb27SDimitry Andric     const uptr MappedUser = Region->MemMapInfo.MappedUser;
98706c3fb27SDimitry Andric     const uptr TotalUserBytes =
98806c3fb27SDimitry Andric         Region->MemMapInfo.AllocatedUser + MaxCount * Size;
9890b57cec5SDimitry Andric     // Map more space for blocks, if necessary.
990fe6060f1SDimitry Andric     if (TotalUserBytes > MappedUser) {
9910b57cec5SDimitry Andric       // Do the mmap for the user memory.
992fe6060f1SDimitry Andric       const uptr MapSize =
99306c3fb27SDimitry Andric           roundUp(TotalUserBytes - MappedUser, MapSizeIncrement);
9940b57cec5SDimitry Andric       const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
995fe6060f1SDimitry Andric       if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
9960b57cec5SDimitry Andric         Region->Exhausted = true;
99706c3fb27SDimitry Andric         return nullptr;
9980b57cec5SDimitry Andric       }
99906c3fb27SDimitry Andric 
100006c3fb27SDimitry Andric       if (UNLIKELY(!Region->MemMapInfo.MemMap.remap(
100106c3fb27SDimitry Andric               RegionBeg + MappedUser, MapSize, "scudo:primary",
10025ffd83dbSDimitry Andric               MAP_ALLOWNOMEM | MAP_RESIZABLE |
100306c3fb27SDimitry Andric                   (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG
100406c3fb27SDimitry Andric                                                             : 0)))) {
100506c3fb27SDimitry Andric         return nullptr;
1006bdd1243dSDimitry Andric       }
100706c3fb27SDimitry Andric       Region->MemMapInfo.MappedUser += MapSize;
1008fe6060f1SDimitry Andric       C->getStats().add(StatMapped, MapSize);
10090b57cec5SDimitry Andric     }
10100b57cec5SDimitry Andric 
101106c3fb27SDimitry Andric     const u32 NumberOfBlocks =
101206c3fb27SDimitry Andric         Min(MaxNumBatches * MaxCount,
101306c3fb27SDimitry Andric             static_cast<u32>((Region->MemMapInfo.MappedUser -
101406c3fb27SDimitry Andric                               Region->MemMapInfo.AllocatedUser) /
101506c3fb27SDimitry Andric                              Size));
10160b57cec5SDimitry Andric     DCHECK_GT(NumberOfBlocks, 0);
10170b57cec5SDimitry Andric 
1018480093f4SDimitry Andric     constexpr u32 ShuffleArraySize =
10195f757f3fSDimitry Andric         MaxNumBatches * TransferBatchT::MaxNumCached;
1020fe6060f1SDimitry Andric     CompactPtrT ShuffleArray[ShuffleArraySize];
1021e8d8bef9SDimitry Andric     DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
1022e8d8bef9SDimitry Andric 
1023fe6060f1SDimitry Andric     const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
102406c3fb27SDimitry Andric     uptr P = RegionBeg + Region->MemMapInfo.AllocatedUser;
1025e8d8bef9SDimitry Andric     for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
1026fe6060f1SDimitry Andric       ShuffleArray[I] = compactPtrInternal(CompactPtrBase, P);
102706c3fb27SDimitry Andric 
102806c3fb27SDimitry Andric     ScopedLock L(Region->FLLock);
102906c3fb27SDimitry Andric 
103006c3fb27SDimitry Andric     if (ClassId != SizeClassMap::BatchClassId) {
103106c3fb27SDimitry Andric       u32 N = 1;
103206c3fb27SDimitry Andric       uptr CurGroup = compactPtrGroup(ShuffleArray[0]);
103306c3fb27SDimitry Andric       for (u32 I = 1; I < NumberOfBlocks; I++) {
103406c3fb27SDimitry Andric         if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) {
103506c3fb27SDimitry Andric           shuffle(ShuffleArray + I - N, N, &Region->RandState);
103606c3fb27SDimitry Andric           pushBlocksImpl(C, ClassId, Region, ShuffleArray + I - N, N,
103706c3fb27SDimitry Andric                          /*SameGroup=*/true);
103806c3fb27SDimitry Andric           N = 1;
103906c3fb27SDimitry Andric           CurGroup = compactPtrGroup(ShuffleArray[I]);
104006c3fb27SDimitry Andric         } else {
104106c3fb27SDimitry Andric           ++N;
1042480093f4SDimitry Andric         }
104306c3fb27SDimitry Andric       }
104406c3fb27SDimitry Andric 
104506c3fb27SDimitry Andric       shuffle(ShuffleArray + NumberOfBlocks - N, N, &Region->RandState);
104606c3fb27SDimitry Andric       pushBlocksImpl(C, ClassId, Region, &ShuffleArray[NumberOfBlocks - N], N,
104706c3fb27SDimitry Andric                      /*SameGroup=*/true);
104806c3fb27SDimitry Andric     } else {
104906c3fb27SDimitry Andric       pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks);
105006c3fb27SDimitry Andric     }
105106c3fb27SDimitry Andric 
10525f757f3fSDimitry Andric     TransferBatchT *B = popBatchImpl(C, ClassId, Region);
105306c3fb27SDimitry Andric     DCHECK_NE(B, nullptr);
105406c3fb27SDimitry Andric 
105506c3fb27SDimitry Andric     // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
105606c3fb27SDimitry Andric     // the requests from `PushBlocks` and `PopBatch` which are external
105706c3fb27SDimitry Andric     // interfaces. `populateFreeListAndPopBatch` is the internal interface so we
105806c3fb27SDimitry Andric     // should set the values back to avoid incorrectly setting the stats.
105906c3fb27SDimitry Andric     Region->FreeListInfo.PushedBlocks -= NumberOfBlocks;
10600b57cec5SDimitry Andric 
1061e8d8bef9SDimitry Andric     const uptr AllocatedUser = Size * NumberOfBlocks;
106268d75effSDimitry Andric     C->getStats().add(StatFree, AllocatedUser);
106306c3fb27SDimitry Andric     Region->MemMapInfo.AllocatedUser += AllocatedUser;
10640b57cec5SDimitry Andric 
106506c3fb27SDimitry Andric     return B;
10660b57cec5SDimitry Andric   }
10670b57cec5SDimitry Andric 
getStats(ScopedString * Str,uptr ClassId,RegionInfo * Region)106806c3fb27SDimitry Andric   void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region)
106906c3fb27SDimitry Andric       REQUIRES(Region->MMLock, Region->FLLock) {
107006c3fb27SDimitry Andric     if (Region->MemMapInfo.MappedUser == 0)
10710b57cec5SDimitry Andric       return;
107206c3fb27SDimitry Andric     const uptr BlockSize = getSizeByClassId(ClassId);
10735f757f3fSDimitry Andric     const uptr InUseBlocks =
107406c3fb27SDimitry Andric         Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
107506c3fb27SDimitry Andric     const uptr BytesInFreeList =
10765f757f3fSDimitry Andric         Region->MemMapInfo.AllocatedUser - InUseBlocks * BlockSize;
107706c3fb27SDimitry Andric     uptr RegionPushedBytesDelta = 0;
107806c3fb27SDimitry Andric     if (BytesInFreeList >=
107906c3fb27SDimitry Andric         Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
108006c3fb27SDimitry Andric       RegionPushedBytesDelta =
108106c3fb27SDimitry Andric           BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
108206c3fb27SDimitry Andric     }
108306c3fb27SDimitry Andric     const uptr TotalChunks = Region->MemMapInfo.AllocatedUser / BlockSize;
108406c3fb27SDimitry Andric     Str->append(
108506c3fb27SDimitry Andric         "%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
108606c3fb27SDimitry Andric         "inuse: %6zu total: %6zu releases: %6zu last "
108706c3fb27SDimitry Andric         "released: %6zuK latest pushed bytes: %6zuK region: 0x%zx (0x%zx)\n",
10885f757f3fSDimitry Andric         Region->Exhausted ? "E" : " ", ClassId, getSizeByClassId(ClassId),
108906c3fb27SDimitry Andric         Region->MemMapInfo.MappedUser >> 10, Region->FreeListInfo.PoppedBlocks,
10905f757f3fSDimitry Andric         Region->FreeListInfo.PushedBlocks, InUseBlocks, TotalChunks,
109106c3fb27SDimitry Andric         Region->ReleaseInfo.RangesReleased,
109206c3fb27SDimitry Andric         Region->ReleaseInfo.LastReleasedBytes >> 10,
109306c3fb27SDimitry Andric         RegionPushedBytesDelta >> 10, Region->RegionBeg,
10940b57cec5SDimitry Andric         getRegionBaseByClassId(ClassId));
10950b57cec5SDimitry Andric   }
10960b57cec5SDimitry Andric 
getRegionFragmentationInfo(RegionInfo * Region,uptr ClassId,ScopedString * Str)10975f757f3fSDimitry Andric   void getRegionFragmentationInfo(RegionInfo *Region, uptr ClassId,
10985f757f3fSDimitry Andric                                   ScopedString *Str) REQUIRES(Region->MMLock) {
10995f757f3fSDimitry Andric     const uptr BlockSize = getSizeByClassId(ClassId);
11005f757f3fSDimitry Andric     const uptr AllocatedUserEnd =
11015f757f3fSDimitry Andric         Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
11025f757f3fSDimitry Andric 
11035f757f3fSDimitry Andric     SinglyLinkedList<BatchGroupT> GroupsToRelease;
11045f757f3fSDimitry Andric     {
11055f757f3fSDimitry Andric       ScopedLock L(Region->FLLock);
11065f757f3fSDimitry Andric       GroupsToRelease = Region->FreeListInfo.BlockList;
11075f757f3fSDimitry Andric       Region->FreeListInfo.BlockList.clear();
11085f757f3fSDimitry Andric     }
11095f757f3fSDimitry Andric 
11105f757f3fSDimitry Andric     FragmentationRecorder Recorder;
11115f757f3fSDimitry Andric     if (!GroupsToRelease.empty()) {
11125f757f3fSDimitry Andric       PageReleaseContext Context =
11135f757f3fSDimitry Andric           markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
11145f757f3fSDimitry Andric                          getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
11155f757f3fSDimitry Andric       auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
11165f757f3fSDimitry Andric       releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
11175f757f3fSDimitry Andric 
11185f757f3fSDimitry Andric       mergeGroupsToReleaseBack(Region, GroupsToRelease);
11195f757f3fSDimitry Andric     }
11205f757f3fSDimitry Andric 
11215f757f3fSDimitry Andric     ScopedLock L(Region->FLLock);
11225f757f3fSDimitry Andric     const uptr PageSize = getPageSizeCached();
11235f757f3fSDimitry Andric     const uptr TotalBlocks = Region->MemMapInfo.AllocatedUser / BlockSize;
11245f757f3fSDimitry Andric     const uptr InUseBlocks =
11255f757f3fSDimitry Andric         Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
11265f757f3fSDimitry Andric     const uptr AllocatedPagesCount =
11275f757f3fSDimitry Andric         roundUp(Region->MemMapInfo.AllocatedUser, PageSize) / PageSize;
11285f757f3fSDimitry Andric     DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
11295f757f3fSDimitry Andric     const uptr InUsePages =
11305f757f3fSDimitry Andric         AllocatedPagesCount - Recorder.getReleasedPagesCount();
11315f757f3fSDimitry Andric     const uptr InUseBytes = InUsePages * PageSize;
11325f757f3fSDimitry Andric 
11335f757f3fSDimitry Andric     uptr Integral;
11345f757f3fSDimitry Andric     uptr Fractional;
11355f757f3fSDimitry Andric     computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
11365f757f3fSDimitry Andric                       &Fractional);
11375f757f3fSDimitry Andric     Str->append("  %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
11385f757f3fSDimitry Andric                 "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
11395f757f3fSDimitry Andric                 ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
11405f757f3fSDimitry Andric                 AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
11415f757f3fSDimitry Andric   }
11425f757f3fSDimitry Andric 
114368d75effSDimitry Andric   NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
114406c3fb27SDimitry Andric                                  ReleaseToOS ReleaseType = ReleaseToOS::Normal)
114506c3fb27SDimitry Andric       REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
11465f757f3fSDimitry Andric     const uptr BlockSize = getSizeByClassId(ClassId);
11475f757f3fSDimitry Andric     uptr BytesInFreeList;
11485f757f3fSDimitry Andric     const uptr AllocatedUserEnd =
11495f757f3fSDimitry Andric         Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
11505f757f3fSDimitry Andric     SinglyLinkedList<BatchGroupT> GroupsToRelease;
11515f757f3fSDimitry Andric 
11525f757f3fSDimitry Andric     {
115306c3fb27SDimitry Andric       ScopedLock L(Region->FLLock);
115406c3fb27SDimitry Andric 
11555f757f3fSDimitry Andric       BytesInFreeList = Region->MemMapInfo.AllocatedUser -
11565f757f3fSDimitry Andric                         (Region->FreeListInfo.PoppedBlocks -
115706c3fb27SDimitry Andric                          Region->FreeListInfo.PushedBlocks) *
115806c3fb27SDimitry Andric                             BlockSize;
115906c3fb27SDimitry Andric       if (UNLIKELY(BytesInFreeList == 0))
116006c3fb27SDimitry Andric         return false;
116106c3fb27SDimitry Andric 
11625f757f3fSDimitry Andric       // ==================================================================== //
116306c3fb27SDimitry Andric       // 1. Check if we have enough free blocks and if it's worth doing a page
116406c3fb27SDimitry Andric       //    release.
11655f757f3fSDimitry Andric       // ==================================================================== //
116606c3fb27SDimitry Andric       if (ReleaseType != ReleaseToOS::ForceAll &&
116706c3fb27SDimitry Andric           !hasChanceToReleasePages(Region, BlockSize, BytesInFreeList,
116806c3fb27SDimitry Andric                                    ReleaseType)) {
116906c3fb27SDimitry Andric         return 0;
117006c3fb27SDimitry Andric       }
117106c3fb27SDimitry Andric 
11725f757f3fSDimitry Andric       // ==================================================================== //
117306c3fb27SDimitry Andric       // 2. Determine which groups can release the pages. Use a heuristic to
117406c3fb27SDimitry Andric       //    gather groups that are candidates for doing a release.
11755f757f3fSDimitry Andric       // ==================================================================== //
117606c3fb27SDimitry Andric       if (ReleaseType == ReleaseToOS::ForceAll) {
117706c3fb27SDimitry Andric         GroupsToRelease = Region->FreeListInfo.BlockList;
117806c3fb27SDimitry Andric         Region->FreeListInfo.BlockList.clear();
117906c3fb27SDimitry Andric       } else {
11805f757f3fSDimitry Andric         GroupsToRelease =
11815f757f3fSDimitry Andric             collectGroupsToRelease(Region, BlockSize, AllocatedUserEnd,
11825f757f3fSDimitry Andric                                    getCompactPtrBaseByClassId(ClassId));
118306c3fb27SDimitry Andric       }
118406c3fb27SDimitry Andric       if (GroupsToRelease.empty())
118506c3fb27SDimitry Andric         return 0;
118606c3fb27SDimitry Andric     }
118706c3fb27SDimitry Andric 
118806c3fb27SDimitry Andric     // Note that we have extracted the `GroupsToRelease` from region freelist.
118906c3fb27SDimitry Andric     // It's safe to let pushBlocks()/popBatches() access the remaining region
119006c3fb27SDimitry Andric     // freelist. In the steps 3 and 4, we will temporarily release the FLLock
119106c3fb27SDimitry Andric     // and lock it again before step 5.
119206c3fb27SDimitry Andric 
119306c3fb27SDimitry Andric     // ==================================================================== //
11945f757f3fSDimitry Andric     // 3. Mark the free blocks in `GroupsToRelease` in the `PageReleaseContext`.
11955f757f3fSDimitry Andric     //    Then we can tell which pages are in-use by querying
11965f757f3fSDimitry Andric     //    `PageReleaseContext`.
119706c3fb27SDimitry Andric     // ==================================================================== //
11985f757f3fSDimitry Andric     PageReleaseContext Context =
11995f757f3fSDimitry Andric         markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
12005f757f3fSDimitry Andric                        getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
120106c3fb27SDimitry Andric     if (UNLIKELY(!Context.hasBlockMarked())) {
120206c3fb27SDimitry Andric       mergeGroupsToReleaseBack(Region, GroupsToRelease);
120306c3fb27SDimitry Andric       return 0;
120406c3fb27SDimitry Andric     }
120506c3fb27SDimitry Andric 
120606c3fb27SDimitry Andric     // ==================================================================== //
120706c3fb27SDimitry Andric     // 4. Release the unused physical pages back to the OS.
120806c3fb27SDimitry Andric     // ==================================================================== //
120906c3fb27SDimitry Andric     RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
121006c3fb27SDimitry Andric                                             Region->RegionBeg,
121106c3fb27SDimitry Andric                                             Context.getReleaseOffset());
121206c3fb27SDimitry Andric     auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
121306c3fb27SDimitry Andric     releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
121406c3fb27SDimitry Andric     if (Recorder.getReleasedRangesCount() > 0) {
121506c3fb27SDimitry Andric       Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
121606c3fb27SDimitry Andric       Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
121706c3fb27SDimitry Andric       Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
121806c3fb27SDimitry Andric     }
121906c3fb27SDimitry Andric     Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
122006c3fb27SDimitry Andric 
122106c3fb27SDimitry Andric     // ====================================================================== //
122206c3fb27SDimitry Andric     // 5. Merge the `GroupsToRelease` back to the freelist.
122306c3fb27SDimitry Andric     // ====================================================================== //
122406c3fb27SDimitry Andric     mergeGroupsToReleaseBack(Region, GroupsToRelease);
122506c3fb27SDimitry Andric 
12265f757f3fSDimitry Andric     return Recorder.getReleasedBytes();
122706c3fb27SDimitry Andric   }
122806c3fb27SDimitry Andric 
hasChanceToReleasePages(RegionInfo * Region,uptr BlockSize,uptr BytesInFreeList,ReleaseToOS ReleaseType)122906c3fb27SDimitry Andric   bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize,
123006c3fb27SDimitry Andric                                uptr BytesInFreeList, ReleaseToOS ReleaseType)
123106c3fb27SDimitry Andric       REQUIRES(Region->MMLock, Region->FLLock) {
123206c3fb27SDimitry Andric     DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
123306c3fb27SDimitry Andric               Region->FreeListInfo.PushedBlocks);
12340b57cec5SDimitry Andric     const uptr PageSize = getPageSizeCached();
12350b57cec5SDimitry Andric 
123606c3fb27SDimitry Andric     // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
123706c3fb27SDimitry Andric     // so that we won't underestimate the releasable pages. For example, the
123806c3fb27SDimitry Andric     // following is the region usage,
123906c3fb27SDimitry Andric     //
124006c3fb27SDimitry Andric     //  BytesInFreeListAtLastCheckpoint   AllocatedUser
124106c3fb27SDimitry Andric     //                v                         v
124206c3fb27SDimitry Andric     //  |--------------------------------------->
124306c3fb27SDimitry Andric     //         ^                   ^
124406c3fb27SDimitry Andric     //  BytesInFreeList     ReleaseThreshold
124506c3fb27SDimitry Andric     //
124606c3fb27SDimitry Andric     // In general, if we have collected enough bytes and the amount of free
124706c3fb27SDimitry Andric     // bytes meets the ReleaseThreshold, we will try to do page release. If we
124806c3fb27SDimitry Andric     // don't update `BytesInFreeListAtLastCheckpoint` when the current
124906c3fb27SDimitry Andric     // `BytesInFreeList` is smaller, we may take longer time to wait for enough
125006c3fb27SDimitry Andric     // freed blocks because we miss the bytes between
125106c3fb27SDimitry Andric     // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
125206c3fb27SDimitry Andric     if (BytesInFreeList <=
125306c3fb27SDimitry Andric         Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
125406c3fb27SDimitry Andric       Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
125506c3fb27SDimitry Andric     }
12560b57cec5SDimitry Andric 
125706c3fb27SDimitry Andric     const uptr RegionPushedBytesDelta =
125806c3fb27SDimitry Andric         BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
125906c3fb27SDimitry Andric     if (RegionPushedBytesDelta < PageSize)
126006c3fb27SDimitry Andric       return false;
126106c3fb27SDimitry Andric 
1262e8d8bef9SDimitry Andric     // Releasing smaller blocks is expensive, so we want to make sure that a
1263e8d8bef9SDimitry Andric     // significant amount of bytes are free, and that there has been a good
1264e8d8bef9SDimitry Andric     // amount of batches pushed to the freelist before attempting to release.
126506c3fb27SDimitry Andric     if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
126606c3fb27SDimitry Andric       if (RegionPushedBytesDelta < Region->TryReleaseThreshold)
126706c3fb27SDimitry Andric         return false;
1268e8d8bef9SDimitry Andric 
126906c3fb27SDimitry Andric     if (ReleaseType == ReleaseToOS::Normal) {
1270e8d8bef9SDimitry Andric       const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
12710b57cec5SDimitry Andric       if (IntervalMs < 0)
127206c3fb27SDimitry Andric         return false;
127306c3fb27SDimitry Andric 
127406c3fb27SDimitry Andric       // The constant 8 here is selected from profiling some apps and the number
127506c3fb27SDimitry Andric       // of unreleased pages in the large size classes is around 16 pages or
127606c3fb27SDimitry Andric       // more. Choose half of it as a heuristic and which also avoids page
127706c3fb27SDimitry Andric       // release every time for every pushBlocks() attempt by large blocks.
127806c3fb27SDimitry Andric       const bool ByPassReleaseInterval =
127906c3fb27SDimitry Andric           isLargeBlock(BlockSize) && RegionPushedBytesDelta > 8 * PageSize;
128006c3fb27SDimitry Andric       if (!ByPassReleaseInterval) {
128168d75effSDimitry Andric         if (Region->ReleaseInfo.LastReleaseAtNs +
12825ffd83dbSDimitry Andric                 static_cast<u64>(IntervalMs) * 1000000 >
128306c3fb27SDimitry Andric             getMonotonicTimeFast()) {
128406c3fb27SDimitry Andric           // Memory was returned recently.
128506c3fb27SDimitry Andric           return false;
12860b57cec5SDimitry Andric         }
12870b57cec5SDimitry Andric       }
128806c3fb27SDimitry Andric     } // if (ReleaseType == ReleaseToOS::Normal)
128906c3fb27SDimitry Andric 
129006c3fb27SDimitry Andric     return true;
129106c3fb27SDimitry Andric   }
12920b57cec5SDimitry Andric 
12935f757f3fSDimitry Andric   SinglyLinkedList<BatchGroupT>
collectGroupsToRelease(RegionInfo * Region,const uptr BlockSize,const uptr AllocatedUserEnd,const uptr CompactPtrBase)129406c3fb27SDimitry Andric   collectGroupsToRelease(RegionInfo *Region, const uptr BlockSize,
129506c3fb27SDimitry Andric                          const uptr AllocatedUserEnd, const uptr CompactPtrBase)
129606c3fb27SDimitry Andric       REQUIRES(Region->MMLock, Region->FLLock) {
12975f757f3fSDimitry Andric     const uptr GroupSize = (1UL << GroupSizeLog);
129806c3fb27SDimitry Andric     const uptr PageSize = getPageSizeCached();
12995f757f3fSDimitry Andric     SinglyLinkedList<BatchGroupT> GroupsToRelease;
1300bdd1243dSDimitry Andric 
130106c3fb27SDimitry Andric     // We are examining each group and will take the minimum distance to the
130206c3fb27SDimitry Andric     // release threshold as the next Region::TryReleaseThreshold(). Note that if
130306c3fb27SDimitry Andric     // the size of free blocks has reached the release threshold, the distance
130406c3fb27SDimitry Andric     // to the next release will be PageSize * SmallerBlockReleasePageDelta. See
130506c3fb27SDimitry Andric     // the comment on `SmallerBlockReleasePageDelta` for more details.
130606c3fb27SDimitry Andric     uptr MinDistToThreshold = GroupSize;
1307bdd1243dSDimitry Andric 
13085f757f3fSDimitry Andric     for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
130906c3fb27SDimitry Andric                      *Prev = nullptr;
131006c3fb27SDimitry Andric          BG != nullptr;) {
131106c3fb27SDimitry Andric       // Group boundary is always GroupSize-aligned from CompactPtr base. The
131206c3fb27SDimitry Andric       // layout of memory groups is like,
1313bdd1243dSDimitry Andric       //
131406c3fb27SDimitry Andric       //     (CompactPtrBase)
131506c3fb27SDimitry Andric       // #1 CompactPtrGroupBase   #2 CompactPtrGroupBase            ...
1316bdd1243dSDimitry Andric       //           |                       |                       |
1317bdd1243dSDimitry Andric       //           v                       v                       v
131806c3fb27SDimitry Andric       //           +-----------------------+-----------------------+
131906c3fb27SDimitry Andric       //            \                     / \                     /
132006c3fb27SDimitry Andric       //             ---   GroupSize   ---   ---   GroupSize   ---
1321bdd1243dSDimitry Andric       //
132206c3fb27SDimitry Andric       // After decompacting the CompactPtrGroupBase, we expect the alignment
132306c3fb27SDimitry Andric       // property is held as well.
132406c3fb27SDimitry Andric       const uptr BatchGroupBase =
132506c3fb27SDimitry Andric           decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBase);
132606c3fb27SDimitry Andric       DCHECK_LE(Region->RegionBeg, BatchGroupBase);
132706c3fb27SDimitry Andric       DCHECK_GE(AllocatedUserEnd, BatchGroupBase);
132806c3fb27SDimitry Andric       DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U);
1329bdd1243dSDimitry Andric       // TransferBatches are pushed in front of BG.Batches. The first one may
1330bdd1243dSDimitry Andric       // not have all caches used.
133106c3fb27SDimitry Andric       const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch +
133206c3fb27SDimitry Andric                              BG->Batches.front()->getCount();
1333bdd1243dSDimitry Andric       const uptr BytesInBG = NumBlocks * BlockSize;
133406c3fb27SDimitry Andric 
133506c3fb27SDimitry Andric       if (BytesInBG <= BG->BytesInBGAtLastCheckpoint) {
133606c3fb27SDimitry Andric         BG->BytesInBGAtLastCheckpoint = BytesInBG;
133706c3fb27SDimitry Andric         Prev = BG;
133806c3fb27SDimitry Andric         BG = BG->Next;
133906c3fb27SDimitry Andric         continue;
134006c3fb27SDimitry Andric       }
134106c3fb27SDimitry Andric 
134206c3fb27SDimitry Andric       const uptr PushedBytesDelta = BG->BytesInBGAtLastCheckpoint - BytesInBG;
134306c3fb27SDimitry Andric 
1344bdd1243dSDimitry Andric       // Given the randomness property, we try to release the pages only if the
1345bdd1243dSDimitry Andric       // bytes used by free blocks exceed certain proportion of group size. Note
1346bdd1243dSDimitry Andric       // that this heuristic only applies when all the spaces in a BatchGroup
1347bdd1243dSDimitry Andric       // are allocated.
134806c3fb27SDimitry Andric       if (isSmallBlock(BlockSize)) {
134906c3fb27SDimitry Andric         const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
135006c3fb27SDimitry Andric         const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
135106c3fb27SDimitry Andric                                             ? GroupSize
135206c3fb27SDimitry Andric                                             : AllocatedUserEnd - BatchGroupBase;
135306c3fb27SDimitry Andric         const uptr ReleaseThreshold =
135406c3fb27SDimitry Andric             (AllocatedGroupSize * (100 - 1U - BlockSize / 16U)) / 100U;
135506c3fb27SDimitry Andric         const bool HighDensity = BytesInBG >= ReleaseThreshold;
135606c3fb27SDimitry Andric         const bool MayHaveReleasedAll = NumBlocks >= (GroupSize / BlockSize);
135706c3fb27SDimitry Andric         // If all blocks in the group are released, we will do range marking
135806c3fb27SDimitry Andric         // which is fast. Otherwise, we will wait until we have accumulated
135906c3fb27SDimitry Andric         // a certain amount of free memory.
136006c3fb27SDimitry Andric         const bool ReachReleaseDelta =
136106c3fb27SDimitry Andric             MayHaveReleasedAll
136206c3fb27SDimitry Andric                 ? true
136306c3fb27SDimitry Andric                 : PushedBytesDelta >= PageSize * SmallerBlockReleasePageDelta;
136406c3fb27SDimitry Andric 
136506c3fb27SDimitry Andric         if (!HighDensity) {
136606c3fb27SDimitry Andric           DCHECK_LE(BytesInBG, ReleaseThreshold);
136706c3fb27SDimitry Andric           // The following is the usage of a memroy group,
136806c3fb27SDimitry Andric           //
136906c3fb27SDimitry Andric           //     BytesInBG             ReleaseThreshold
137006c3fb27SDimitry Andric           //  /             \                 v
137106c3fb27SDimitry Andric           //  +---+---------------------------+-----+
137206c3fb27SDimitry Andric           //  |   |         |                 |     |
137306c3fb27SDimitry Andric           //  +---+---------------------------+-----+
137406c3fb27SDimitry Andric           //       \        /                       ^
137506c3fb27SDimitry Andric           //    PushedBytesDelta                 GroupEnd
137606c3fb27SDimitry Andric           MinDistToThreshold =
137706c3fb27SDimitry Andric               Min(MinDistToThreshold,
137806c3fb27SDimitry Andric                   ReleaseThreshold - BytesInBG + PushedBytesDelta);
137906c3fb27SDimitry Andric         } else {
138006c3fb27SDimitry Andric           // If it reaches high density at this round, the next time we will try
138106c3fb27SDimitry Andric           // to release is based on SmallerBlockReleasePageDelta
138206c3fb27SDimitry Andric           MinDistToThreshold =
138306c3fb27SDimitry Andric               Min(MinDistToThreshold, PageSize * SmallerBlockReleasePageDelta);
138406c3fb27SDimitry Andric         }
138506c3fb27SDimitry Andric 
138606c3fb27SDimitry Andric         if (!HighDensity || !ReachReleaseDelta) {
138706c3fb27SDimitry Andric           Prev = BG;
138806c3fb27SDimitry Andric           BG = BG->Next;
138906c3fb27SDimitry Andric           continue;
139006c3fb27SDimitry Andric         }
139106c3fb27SDimitry Andric       }
139206c3fb27SDimitry Andric 
13935f757f3fSDimitry Andric       // If `BG` is the first BatchGroupT in the list, we only need to advance
139406c3fb27SDimitry Andric       // `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed
139506c3fb27SDimitry Andric       // for `Prev`.
139606c3fb27SDimitry Andric       //
139706c3fb27SDimitry Andric       //         (BG)   (BG->Next)
139806c3fb27SDimitry Andric       // Prev     Cur      BG
139906c3fb27SDimitry Andric       //   |       |       |
140006c3fb27SDimitry Andric       //   v       v       v
140106c3fb27SDimitry Andric       //  nil     +--+    +--+
140206c3fb27SDimitry Andric       //          |X | -> |  | -> ...
140306c3fb27SDimitry Andric       //          +--+    +--+
140406c3fb27SDimitry Andric       //
140506c3fb27SDimitry Andric       // Otherwise, `Prev` will be used to extract the `Cur` from the
140606c3fb27SDimitry Andric       // `FreeListInfo.BlockList`.
140706c3fb27SDimitry Andric       //
140806c3fb27SDimitry Andric       //         (BG)   (BG->Next)
140906c3fb27SDimitry Andric       // Prev     Cur      BG
141006c3fb27SDimitry Andric       //   |       |       |
141106c3fb27SDimitry Andric       //   v       v       v
141206c3fb27SDimitry Andric       //  +--+    +--+    +--+
141306c3fb27SDimitry Andric       //  |  | -> |X | -> |  | -> ...
141406c3fb27SDimitry Andric       //  +--+    +--+    +--+
141506c3fb27SDimitry Andric       //
141606c3fb27SDimitry Andric       // After FreeListInfo.BlockList::extract(),
141706c3fb27SDimitry Andric       //
141806c3fb27SDimitry Andric       // Prev     Cur       BG
141906c3fb27SDimitry Andric       //   |       |        |
142006c3fb27SDimitry Andric       //   v       v        v
142106c3fb27SDimitry Andric       //  +--+    +--+     +--+
142206c3fb27SDimitry Andric       //  |  |-+  |X |  +->|  | -> ...
142306c3fb27SDimitry Andric       //  +--+ |  +--+  |  +--+
142406c3fb27SDimitry Andric       //       +--------+
142506c3fb27SDimitry Andric       //
142606c3fb27SDimitry Andric       // Note that we need to advance before pushing this BatchGroup to
142706c3fb27SDimitry Andric       // GroupsToRelease because it's a destructive operation.
142806c3fb27SDimitry Andric 
14295f757f3fSDimitry Andric       BatchGroupT *Cur = BG;
143006c3fb27SDimitry Andric       BG = BG->Next;
143106c3fb27SDimitry Andric 
143206c3fb27SDimitry Andric       // Ideally, we may want to update this only after successful release.
143306c3fb27SDimitry Andric       // However, for smaller blocks, each block marking is a costly operation.
143406c3fb27SDimitry Andric       // Therefore, we update it earlier.
143506c3fb27SDimitry Andric       // TODO: Consider updating this after releasing pages if `ReleaseRecorder`
143606c3fb27SDimitry Andric       // can tell the released bytes in each group.
143706c3fb27SDimitry Andric       Cur->BytesInBGAtLastCheckpoint = BytesInBG;
143806c3fb27SDimitry Andric 
143906c3fb27SDimitry Andric       if (Prev != nullptr)
144006c3fb27SDimitry Andric         Region->FreeListInfo.BlockList.extract(Prev, Cur);
144106c3fb27SDimitry Andric       else
144206c3fb27SDimitry Andric         Region->FreeListInfo.BlockList.pop_front();
144306c3fb27SDimitry Andric       GroupsToRelease.push_back(Cur);
144406c3fb27SDimitry Andric     }
144506c3fb27SDimitry Andric 
144606c3fb27SDimitry Andric     // Only small blocks have the adaptive `TryReleaseThreshold`.
144706c3fb27SDimitry Andric     if (isSmallBlock(BlockSize)) {
144806c3fb27SDimitry Andric       // If the MinDistToThreshold is not updated, that means each memory group
144906c3fb27SDimitry Andric       // may have only pushed less than a page size. In that case, just set it
145006c3fb27SDimitry Andric       // back to normal.
145106c3fb27SDimitry Andric       if (MinDistToThreshold == GroupSize)
145206c3fb27SDimitry Andric         MinDistToThreshold = PageSize * SmallerBlockReleasePageDelta;
145306c3fb27SDimitry Andric       Region->TryReleaseThreshold = MinDistToThreshold;
145406c3fb27SDimitry Andric     }
145506c3fb27SDimitry Andric 
145606c3fb27SDimitry Andric     return GroupsToRelease;
145706c3fb27SDimitry Andric   }
145806c3fb27SDimitry Andric 
145906c3fb27SDimitry Andric   PageReleaseContext
markFreeBlocks(RegionInfo * Region,const uptr BlockSize,const uptr AllocatedUserEnd,const uptr CompactPtrBase,SinglyLinkedList<BatchGroupT> & GroupsToRelease)146006c3fb27SDimitry Andric   markFreeBlocks(RegionInfo *Region, const uptr BlockSize,
146106c3fb27SDimitry Andric                  const uptr AllocatedUserEnd, const uptr CompactPtrBase,
14625f757f3fSDimitry Andric                  SinglyLinkedList<BatchGroupT> &GroupsToRelease)
146306c3fb27SDimitry Andric       REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
14645f757f3fSDimitry Andric     const uptr GroupSize = (1UL << GroupSizeLog);
146506c3fb27SDimitry Andric     auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
146606c3fb27SDimitry Andric       return decompactPtrInternal(CompactPtrBase, CompactPtr);
146706c3fb27SDimitry Andric     };
146806c3fb27SDimitry Andric 
146906c3fb27SDimitry Andric     const uptr ReleaseBase = decompactGroupBase(
147006c3fb27SDimitry Andric         CompactPtrBase, GroupsToRelease.front()->CompactPtrGroupBase);
147106c3fb27SDimitry Andric     const uptr LastGroupEnd =
147206c3fb27SDimitry Andric         Min(decompactGroupBase(CompactPtrBase,
147306c3fb27SDimitry Andric                                GroupsToRelease.back()->CompactPtrGroupBase) +
147406c3fb27SDimitry Andric                 GroupSize,
147506c3fb27SDimitry Andric             AllocatedUserEnd);
147606c3fb27SDimitry Andric     // The last block may straddle the group boundary. Rounding up to BlockSize
147706c3fb27SDimitry Andric     // to get the exact range.
147806c3fb27SDimitry Andric     const uptr ReleaseEnd =
147906c3fb27SDimitry Andric         roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) +
148006c3fb27SDimitry Andric         Region->RegionBeg;
148106c3fb27SDimitry Andric     const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase;
148206c3fb27SDimitry Andric     const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg;
148306c3fb27SDimitry Andric 
148406c3fb27SDimitry Andric     PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
148506c3fb27SDimitry Andric                                ReleaseRangeSize, ReleaseOffset);
148606c3fb27SDimitry Andric     // We may not be able to do the page release in a rare case that we may
148706c3fb27SDimitry Andric     // fail on PageMap allocation.
148806c3fb27SDimitry Andric     if (UNLIKELY(!Context.ensurePageMapAllocated()))
148906c3fb27SDimitry Andric       return Context;
149006c3fb27SDimitry Andric 
14915f757f3fSDimitry Andric     for (BatchGroupT &BG : GroupsToRelease) {
149206c3fb27SDimitry Andric       const uptr BatchGroupBase =
149306c3fb27SDimitry Andric           decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase);
149406c3fb27SDimitry Andric       const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
149506c3fb27SDimitry Andric       const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
149606c3fb27SDimitry Andric                                           ? GroupSize
149706c3fb27SDimitry Andric                                           : AllocatedUserEnd - BatchGroupBase;
149806c3fb27SDimitry Andric       const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
149906c3fb27SDimitry Andric       const bool MayContainLastBlockInRegion =
150006c3fb27SDimitry Andric           BatchGroupUsedEnd == AllocatedUserEnd;
150106c3fb27SDimitry Andric       const bool BlockAlignedWithUsedEnd =
150206c3fb27SDimitry Andric           (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
150306c3fb27SDimitry Andric 
150406c3fb27SDimitry Andric       uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
150506c3fb27SDimitry Andric       if (!BlockAlignedWithUsedEnd)
150606c3fb27SDimitry Andric         ++MaxContainedBlocks;
150706c3fb27SDimitry Andric 
150806c3fb27SDimitry Andric       const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
150906c3fb27SDimitry Andric                              BG.Batches.front()->getCount();
151006c3fb27SDimitry Andric 
151106c3fb27SDimitry Andric       if (NumBlocks == MaxContainedBlocks) {
151206c3fb27SDimitry Andric         for (const auto &It : BG.Batches) {
151306c3fb27SDimitry Andric           if (&It != BG.Batches.front())
151406c3fb27SDimitry Andric             DCHECK_EQ(It.getCount(), BG.MaxCachedPerBatch);
151506c3fb27SDimitry Andric           for (u16 I = 0; I < It.getCount(); ++I)
151606c3fb27SDimitry Andric             DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase);
151706c3fb27SDimitry Andric         }
151806c3fb27SDimitry Andric 
151906c3fb27SDimitry Andric         Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
152006c3fb27SDimitry Andric                                       Region->RegionBeg, /*RegionIndex=*/0,
152106c3fb27SDimitry Andric                                       Region->MemMapInfo.AllocatedUser);
152206c3fb27SDimitry Andric       } else {
152306c3fb27SDimitry Andric         DCHECK_LT(NumBlocks, MaxContainedBlocks);
152406c3fb27SDimitry Andric         // Note that we don't always visit blocks in each BatchGroup so that we
152506c3fb27SDimitry Andric         // may miss the chance of releasing certain pages that cross
152606c3fb27SDimitry Andric         // BatchGroups.
152706c3fb27SDimitry Andric         Context.markFreeBlocksInRegion(
152806c3fb27SDimitry Andric             BG.Batches, DecompactPtr, Region->RegionBeg, /*RegionIndex=*/0,
152906c3fb27SDimitry Andric             Region->MemMapInfo.AllocatedUser, MayContainLastBlockInRegion);
153006c3fb27SDimitry Andric       }
153106c3fb27SDimitry Andric     }
153206c3fb27SDimitry Andric 
153306c3fb27SDimitry Andric     DCHECK(Context.hasBlockMarked());
153406c3fb27SDimitry Andric 
153506c3fb27SDimitry Andric     return Context;
153606c3fb27SDimitry Andric   }
153706c3fb27SDimitry Andric 
mergeGroupsToReleaseBack(RegionInfo * Region,SinglyLinkedList<BatchGroupT> & GroupsToRelease)153806c3fb27SDimitry Andric   void mergeGroupsToReleaseBack(RegionInfo *Region,
15395f757f3fSDimitry Andric                                 SinglyLinkedList<BatchGroupT> &GroupsToRelease)
15405f757f3fSDimitry Andric       REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
15415f757f3fSDimitry Andric     ScopedLock L(Region->FLLock);
15425f757f3fSDimitry Andric 
154306c3fb27SDimitry Andric     // After merging two freelists, we may have redundant `BatchGroup`s that
154406c3fb27SDimitry Andric     // need to be recycled. The number of unused `BatchGroup`s is expected to be
154506c3fb27SDimitry Andric     // small. Pick a constant which is inferred from real programs.
154606c3fb27SDimitry Andric     constexpr uptr MaxUnusedSize = 8;
154706c3fb27SDimitry Andric     CompactPtrT Blocks[MaxUnusedSize];
154806c3fb27SDimitry Andric     u32 Idx = 0;
154906c3fb27SDimitry Andric     RegionInfo *BatchClassRegion = getRegionInfo(SizeClassMap::BatchClassId);
155006c3fb27SDimitry Andric     // We can't call pushBatchClassBlocks() to recycle the unused `BatchGroup`s
155106c3fb27SDimitry Andric     // when we are manipulating the freelist of `BatchClassRegion`. Instead, we
155206c3fb27SDimitry Andric     // should just push it back to the freelist when we merge two `BatchGroup`s.
155306c3fb27SDimitry Andric     // This logic hasn't been implemented because we haven't supported releasing
155406c3fb27SDimitry Andric     // pages in `BatchClassRegion`.
155506c3fb27SDimitry Andric     DCHECK_NE(BatchClassRegion, Region);
155606c3fb27SDimitry Andric 
155706c3fb27SDimitry Andric     // Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note
155806c3fb27SDimitry Andric     // that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are
155906c3fb27SDimitry Andric     // sorted.
15605f757f3fSDimitry Andric     for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
156106c3fb27SDimitry Andric                      *Prev = nullptr;
156206c3fb27SDimitry Andric          ;) {
156306c3fb27SDimitry Andric       if (BG == nullptr || GroupsToRelease.empty()) {
156406c3fb27SDimitry Andric         if (!GroupsToRelease.empty())
156506c3fb27SDimitry Andric           Region->FreeListInfo.BlockList.append_back(&GroupsToRelease);
156606c3fb27SDimitry Andric         break;
156706c3fb27SDimitry Andric       }
156806c3fb27SDimitry Andric 
156906c3fb27SDimitry Andric       DCHECK(!BG->Batches.empty());
157006c3fb27SDimitry Andric 
157106c3fb27SDimitry Andric       if (BG->CompactPtrGroupBase <
157206c3fb27SDimitry Andric           GroupsToRelease.front()->CompactPtrGroupBase) {
157306c3fb27SDimitry Andric         Prev = BG;
157406c3fb27SDimitry Andric         BG = BG->Next;
1575bdd1243dSDimitry Andric         continue;
1576bdd1243dSDimitry Andric       }
1577bdd1243dSDimitry Andric 
15785f757f3fSDimitry Andric       BatchGroupT *Cur = GroupsToRelease.front();
15795f757f3fSDimitry Andric       TransferBatchT *UnusedTransferBatch = nullptr;
158006c3fb27SDimitry Andric       GroupsToRelease.pop_front();
158106c3fb27SDimitry Andric 
158206c3fb27SDimitry Andric       if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
158306c3fb27SDimitry Andric         BG->PushedBlocks += Cur->PushedBlocks;
158406c3fb27SDimitry Andric         // We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while
158506c3fb27SDimitry Andric         // collecting the `GroupsToRelease`.
158606c3fb27SDimitry Andric         BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint;
158706c3fb27SDimitry Andric         const uptr MaxCachedPerBatch = BG->MaxCachedPerBatch;
158806c3fb27SDimitry Andric 
158906c3fb27SDimitry Andric         // Note that the first TransferBatches in both `Batches` may not be
159006c3fb27SDimitry Andric         // full and only the first TransferBatch can have non-full blocks. Thus
159106c3fb27SDimitry Andric         // we have to merge them before appending one to another.
159206c3fb27SDimitry Andric         if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) {
159306c3fb27SDimitry Andric           BG->Batches.append_back(&Cur->Batches);
159406c3fb27SDimitry Andric         } else {
15955f757f3fSDimitry Andric           TransferBatchT *NonFullBatch = Cur->Batches.front();
159606c3fb27SDimitry Andric           Cur->Batches.pop_front();
159706c3fb27SDimitry Andric           const u16 NonFullBatchCount = NonFullBatch->getCount();
159806c3fb27SDimitry Andric           // The remaining Batches in `Cur` are full.
159906c3fb27SDimitry Andric           BG->Batches.append_back(&Cur->Batches);
160006c3fb27SDimitry Andric 
160106c3fb27SDimitry Andric           if (BG->Batches.front()->getCount() == MaxCachedPerBatch) {
160206c3fb27SDimitry Andric             // Only 1 non-full TransferBatch, push it to the front.
160306c3fb27SDimitry Andric             BG->Batches.push_front(NonFullBatch);
160406c3fb27SDimitry Andric           } else {
160506c3fb27SDimitry Andric             const u16 NumBlocksToMove = static_cast<u16>(
160606c3fb27SDimitry Andric                 Min(static_cast<u16>(MaxCachedPerBatch -
160706c3fb27SDimitry Andric                                      BG->Batches.front()->getCount()),
160806c3fb27SDimitry Andric                     NonFullBatchCount));
160906c3fb27SDimitry Andric             BG->Batches.front()->appendFromTransferBatch(NonFullBatch,
161006c3fb27SDimitry Andric                                                          NumBlocksToMove);
161106c3fb27SDimitry Andric             if (NonFullBatch->isEmpty())
161206c3fb27SDimitry Andric               UnusedTransferBatch = NonFullBatch;
161306c3fb27SDimitry Andric             else
161406c3fb27SDimitry Andric               BG->Batches.push_front(NonFullBatch);
161506c3fb27SDimitry Andric           }
1616bdd1243dSDimitry Andric         }
1617bdd1243dSDimitry Andric 
161806c3fb27SDimitry Andric         const u32 NeededSlots = UnusedTransferBatch == nullptr ? 1U : 2U;
161906c3fb27SDimitry Andric         if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) {
162006c3fb27SDimitry Andric           ScopedLock L(BatchClassRegion->FLLock);
162106c3fb27SDimitry Andric           pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
16225f757f3fSDimitry Andric           if (conditionVariableEnabled())
16235f757f3fSDimitry Andric             BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
162406c3fb27SDimitry Andric           Idx = 0;
16250b57cec5SDimitry Andric         }
162606c3fb27SDimitry Andric         Blocks[Idx++] =
162706c3fb27SDimitry Andric             compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(Cur));
162806c3fb27SDimitry Andric         if (UnusedTransferBatch) {
162906c3fb27SDimitry Andric           Blocks[Idx++] =
163006c3fb27SDimitry Andric               compactPtr(SizeClassMap::BatchClassId,
163106c3fb27SDimitry Andric                          reinterpret_cast<uptr>(UnusedTransferBatch));
16320b57cec5SDimitry Andric         }
163306c3fb27SDimitry Andric         Prev = BG;
163406c3fb27SDimitry Andric         BG = BG->Next;
163506c3fb27SDimitry Andric         continue;
163606c3fb27SDimitry Andric       }
163706c3fb27SDimitry Andric 
163806c3fb27SDimitry Andric       // At here, the `BG` is the first BatchGroup with CompactPtrGroupBase
163906c3fb27SDimitry Andric       // larger than the first element in `GroupsToRelease`. We need to insert
164006c3fb27SDimitry Andric       // `GroupsToRelease::front()` (which is `Cur` below)  before `BG`.
164106c3fb27SDimitry Andric       //
164206c3fb27SDimitry Andric       //   1. If `Prev` is nullptr, we simply push `Cur` to the front of
164306c3fb27SDimitry Andric       //      FreeListInfo.BlockList.
164406c3fb27SDimitry Andric       //   2. Otherwise, use `insert()` which inserts an element next to `Prev`.
164506c3fb27SDimitry Andric       //
164606c3fb27SDimitry Andric       // Afterwards, we don't need to advance `BG` because the order between
164706c3fb27SDimitry Andric       // `BG` and the new `GroupsToRelease::front()` hasn't been checked.
164806c3fb27SDimitry Andric       if (Prev == nullptr)
164906c3fb27SDimitry Andric         Region->FreeListInfo.BlockList.push_front(Cur);
165006c3fb27SDimitry Andric       else
165106c3fb27SDimitry Andric         Region->FreeListInfo.BlockList.insert(Prev, Cur);
165206c3fb27SDimitry Andric       DCHECK_EQ(Cur->Next, BG);
165306c3fb27SDimitry Andric       Prev = Cur;
165406c3fb27SDimitry Andric     }
165506c3fb27SDimitry Andric 
165606c3fb27SDimitry Andric     if (Idx != 0) {
165706c3fb27SDimitry Andric       ScopedLock L(BatchClassRegion->FLLock);
165806c3fb27SDimitry Andric       pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
16595f757f3fSDimitry Andric       if (conditionVariableEnabled())
16605f757f3fSDimitry Andric         BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
166106c3fb27SDimitry Andric     }
166206c3fb27SDimitry Andric 
166306c3fb27SDimitry Andric     if (SCUDO_DEBUG) {
16645f757f3fSDimitry Andric       BatchGroupT *Prev = Region->FreeListInfo.BlockList.front();
16655f757f3fSDimitry Andric       for (BatchGroupT *Cur = Prev->Next; Cur != nullptr;
166606c3fb27SDimitry Andric            Prev = Cur, Cur = Cur->Next) {
166706c3fb27SDimitry Andric         CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
166806c3fb27SDimitry Andric       }
166906c3fb27SDimitry Andric     }
16705f757f3fSDimitry Andric 
16715f757f3fSDimitry Andric     if (conditionVariableEnabled())
16725f757f3fSDimitry Andric       Region->FLLockCV.notifyAll(Region->FLLock);
167306c3fb27SDimitry Andric   }
167406c3fb27SDimitry Andric 
167506c3fb27SDimitry Andric   // TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
167606c3fb27SDimitry Andric   // deprecated.
167706c3fb27SDimitry Andric   uptr PrimaryBase = 0;
167806c3fb27SDimitry Andric   ReservedMemoryT ReservedMemory = {};
167906c3fb27SDimitry Andric   // The minimum size of pushed blocks that we will try to release the pages in
168006c3fb27SDimitry Andric   // that size class.
168106c3fb27SDimitry Andric   uptr SmallerBlockReleasePageDelta = 0;
168206c3fb27SDimitry Andric   atomic_s32 ReleaseToOsIntervalMs = {};
168306c3fb27SDimitry Andric   alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
16840b57cec5SDimitry Andric };
16850b57cec5SDimitry Andric 
16860b57cec5SDimitry Andric } // namespace scudo
16870b57cec5SDimitry Andric 
16880b57cec5SDimitry Andric #endif // SCUDO_PRIMARY64_H_
1689