13cab2bb3Spatrick //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===// 23cab2bb3Spatrick // 33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information. 53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 63cab2bb3Spatrick // 73cab2bb3Spatrick //===----------------------------------------------------------------------===// 83cab2bb3Spatrick // 93cab2bb3Spatrick // Part of the Sanitizer Allocator. 103cab2bb3Spatrick // 113cab2bb3Spatrick //===----------------------------------------------------------------------===// 123cab2bb3Spatrick #ifndef SANITIZER_ALLOCATOR_H 133cab2bb3Spatrick #error This file must be included inside sanitizer_allocator.h 143cab2bb3Spatrick #endif 153cab2bb3Spatrick 163cab2bb3Spatrick template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache; 173cab2bb3Spatrick 183cab2bb3Spatrick // SizeClassAllocator64 -- allocator for 64-bit address space. 193cab2bb3Spatrick // The template parameter Params is a class containing the actual parameters. 203cab2bb3Spatrick // 213cab2bb3Spatrick // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg. 22d89ec533Spatrick // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap. 233cab2bb3Spatrick // Otherwise SpaceBeg=kSpaceBeg (fixed address). 243cab2bb3Spatrick // kSpaceSize is a power of two. 253cab2bb3Spatrick // At the beginning the entire space is mprotect-ed, then small parts of it 263cab2bb3Spatrick // are mapped on demand. 273cab2bb3Spatrick // 283cab2bb3Spatrick // Region: a part of Space dedicated to a single size class. 293cab2bb3Spatrick // There are kNumClasses Regions of equal size. 303cab2bb3Spatrick // 313cab2bb3Spatrick // UserChunk: a piece of memory returned to user. 323cab2bb3Spatrick // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. 333cab2bb3Spatrick 343cab2bb3Spatrick // FreeArray is an array free-d chunks (stored as 4-byte offsets) 353cab2bb3Spatrick // 363cab2bb3Spatrick // A Region looks like this: 373cab2bb3Spatrick // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray 383cab2bb3Spatrick 393cab2bb3Spatrick struct SizeClassAllocator64FlagMasks { // Bit masks. 403cab2bb3Spatrick enum { 413cab2bb3Spatrick kRandomShuffleChunks = 1, 423cab2bb3Spatrick }; 433cab2bb3Spatrick }; 443cab2bb3Spatrick 45d89ec533Spatrick template <typename Allocator> 46d89ec533Spatrick class MemoryMapper { 47d89ec533Spatrick public: 48d89ec533Spatrick typedef typename Allocator::CompactPtrT CompactPtrT; 49d89ec533Spatrick MemoryMapper(const Allocator & allocator)50d89ec533Spatrick explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {} 51d89ec533Spatrick GetAndResetStats(uptr & ranges,uptr & bytes)52d89ec533Spatrick bool GetAndResetStats(uptr &ranges, uptr &bytes) { 53d89ec533Spatrick ranges = released_ranges_count_; 54d89ec533Spatrick released_ranges_count_ = 0; 55d89ec533Spatrick bytes = released_bytes_; 56d89ec533Spatrick released_bytes_ = 0; 57d89ec533Spatrick return ranges != 0; 58d89ec533Spatrick } 59d89ec533Spatrick MapPackedCounterArrayBuffer(uptr count)60d89ec533Spatrick u64 *MapPackedCounterArrayBuffer(uptr count) { 61d89ec533Spatrick buffer_.clear(); 62d89ec533Spatrick buffer_.resize(count); 63d89ec533Spatrick return buffer_.data(); 64d89ec533Spatrick } 65d89ec533Spatrick 66d89ec533Spatrick // Releases [from, to) range of pages back to OS. ReleasePageRangeToOS(uptr class_id,CompactPtrT from,CompactPtrT to)67d89ec533Spatrick void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) { 68d89ec533Spatrick const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id); 69d89ec533Spatrick const uptr from_page = allocator_.CompactPtrToPointer(region_base, from); 70d89ec533Spatrick const uptr to_page = allocator_.CompactPtrToPointer(region_base, to); 71d89ec533Spatrick ReleaseMemoryPagesToOS(from_page, to_page); 72d89ec533Spatrick released_ranges_count_++; 73d89ec533Spatrick released_bytes_ += to_page - from_page; 74d89ec533Spatrick } 75d89ec533Spatrick 76d89ec533Spatrick private: 77d89ec533Spatrick const Allocator &allocator_; 78d89ec533Spatrick uptr released_ranges_count_ = 0; 79d89ec533Spatrick uptr released_bytes_ = 0; 80d89ec533Spatrick InternalMmapVector<u64> buffer_; 81d89ec533Spatrick }; 82d89ec533Spatrick 833cab2bb3Spatrick template <class Params> 843cab2bb3Spatrick class SizeClassAllocator64 { 853cab2bb3Spatrick public: 863cab2bb3Spatrick using AddressSpaceView = typename Params::AddressSpaceView; 873cab2bb3Spatrick static const uptr kSpaceBeg = Params::kSpaceBeg; 883cab2bb3Spatrick static const uptr kSpaceSize = Params::kSpaceSize; 893cab2bb3Spatrick static const uptr kMetadataSize = Params::kMetadataSize; 903cab2bb3Spatrick typedef typename Params::SizeClassMap SizeClassMap; 913cab2bb3Spatrick typedef typename Params::MapUnmapCallback MapUnmapCallback; 923cab2bb3Spatrick 933cab2bb3Spatrick static const bool kRandomShuffleChunks = 943cab2bb3Spatrick Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks; 953cab2bb3Spatrick 963cab2bb3Spatrick typedef SizeClassAllocator64<Params> ThisT; 973cab2bb3Spatrick typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache; 98d89ec533Spatrick typedef MemoryMapper<ThisT> MemoryMapperT; 993cab2bb3Spatrick 1003cab2bb3Spatrick // When we know the size class (the region base) we can represent a pointer 1013cab2bb3Spatrick // as a 4-byte integer (offset from the region start shifted right by 4). 1023cab2bb3Spatrick typedef u32 CompactPtrT; 1033cab2bb3Spatrick static const uptr kCompactPtrScale = 4; PointerToCompactPtr(uptr base,uptr ptr)1043cab2bb3Spatrick CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const { 1053cab2bb3Spatrick return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale); 1063cab2bb3Spatrick } CompactPtrToPointer(uptr base,CompactPtrT ptr32)1073cab2bb3Spatrick uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const { 1083cab2bb3Spatrick return base + (static_cast<uptr>(ptr32) << kCompactPtrScale); 1093cab2bb3Spatrick } 1103cab2bb3Spatrick 111d89ec533Spatrick // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W 112d89ec533Spatrick // at heap_start and places the heap there. This mode requires kSpaceBeg == 113d89ec533Spatrick // ~(uptr)0. 114d89ec533Spatrick void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { 1153cab2bb3Spatrick uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); 116d89ec533Spatrick PremappedHeap = heap_start != 0; 117d89ec533Spatrick if (PremappedHeap) { 118d89ec533Spatrick CHECK(!kUsingConstantSpaceBeg); 119d89ec533Spatrick NonConstSpaceBeg = heap_start; 120d89ec533Spatrick uptr RegionInfoSize = AdditionalSize(); 121d89ec533Spatrick RegionInfoSpace = 122d89ec533Spatrick address_range.Init(RegionInfoSize, PrimaryAllocatorName); 123d89ec533Spatrick CHECK_NE(RegionInfoSpace, ~(uptr)0); 124d89ec533Spatrick CHECK_EQ(RegionInfoSpace, 125d89ec533Spatrick address_range.MapOrDie(RegionInfoSpace, RegionInfoSize, 126d89ec533Spatrick "SizeClassAllocator: region info")); 127d89ec533Spatrick MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize); 128d89ec533Spatrick } else { 1293cab2bb3Spatrick if (kUsingConstantSpaceBeg) { 1301f9cb04fSpatrick CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize)); 131d89ec533Spatrick CHECK_EQ(kSpaceBeg, 132d89ec533Spatrick address_range.Init(TotalSpaceSize, PrimaryAllocatorName, 133d89ec533Spatrick kSpaceBeg)); 1343cab2bb3Spatrick } else { 135d89ec533Spatrick // Combined allocator expects that an 2^N allocation is always aligned 136d89ec533Spatrick // to 2^N. For this to work, the start of the space needs to be aligned 137d89ec533Spatrick // as high as the largest size class (which also needs to be a power of 138d89ec533Spatrick // 2). 1391f9cb04fSpatrick NonConstSpaceBeg = address_range.InitAligned( 1401f9cb04fSpatrick TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName); 1413cab2bb3Spatrick CHECK_NE(NonConstSpaceBeg, ~(uptr)0); 1423cab2bb3Spatrick } 143d89ec533Spatrick RegionInfoSpace = SpaceEnd(); 144d89ec533Spatrick MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(), 1453cab2bb3Spatrick "SizeClassAllocator: region info"); 146d89ec533Spatrick } 147d89ec533Spatrick SetReleaseToOSIntervalMs(release_to_os_interval_ms); 1483cab2bb3Spatrick // Check that the RegionInfo array is aligned on the CacheLine size. 149d89ec533Spatrick DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0); 1503cab2bb3Spatrick } 1513cab2bb3Spatrick ReleaseToOSIntervalMs()1523cab2bb3Spatrick s32 ReleaseToOSIntervalMs() const { 1533cab2bb3Spatrick return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed); 1543cab2bb3Spatrick } 1553cab2bb3Spatrick SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms)1563cab2bb3Spatrick void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { 1573cab2bb3Spatrick atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms, 1583cab2bb3Spatrick memory_order_relaxed); 1593cab2bb3Spatrick } 1603cab2bb3Spatrick ForceReleaseToOS()1613cab2bb3Spatrick void ForceReleaseToOS() { 162d89ec533Spatrick MemoryMapperT memory_mapper(*this); 1633cab2bb3Spatrick for (uptr class_id = 1; class_id < kNumClasses; class_id++) { 164*810390e3Srobert Lock l(&GetRegionInfo(class_id)->mutex); 165d89ec533Spatrick MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/); 1663cab2bb3Spatrick } 1673cab2bb3Spatrick } 1683cab2bb3Spatrick CanAllocate(uptr size,uptr alignment)1693cab2bb3Spatrick static bool CanAllocate(uptr size, uptr alignment) { 1703cab2bb3Spatrick return size <= SizeClassMap::kMaxSize && 1713cab2bb3Spatrick alignment <= SizeClassMap::kMaxSize; 1723cab2bb3Spatrick } 1733cab2bb3Spatrick ReturnToAllocator(MemoryMapperT * memory_mapper,AllocatorStats * stat,uptr class_id,const CompactPtrT * chunks,uptr n_chunks)174d89ec533Spatrick NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper, 175d89ec533Spatrick AllocatorStats *stat, uptr class_id, 1763cab2bb3Spatrick const CompactPtrT *chunks, uptr n_chunks) { 1773cab2bb3Spatrick RegionInfo *region = GetRegionInfo(class_id); 1783cab2bb3Spatrick uptr region_beg = GetRegionBeginBySizeClass(class_id); 1793cab2bb3Spatrick CompactPtrT *free_array = GetFreeArray(region_beg); 1803cab2bb3Spatrick 181*810390e3Srobert Lock l(®ion->mutex); 1823cab2bb3Spatrick uptr old_num_chunks = region->num_freed_chunks; 1833cab2bb3Spatrick uptr new_num_freed_chunks = old_num_chunks + n_chunks; 1843cab2bb3Spatrick // Failure to allocate free array space while releasing memory is non 1853cab2bb3Spatrick // recoverable. 1863cab2bb3Spatrick if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, 1873cab2bb3Spatrick new_num_freed_chunks))) { 1883cab2bb3Spatrick Report("FATAL: Internal error: %s's allocator exhausted the free list " 1893cab2bb3Spatrick "space for size class %zd (%zd bytes).\n", SanitizerToolName, 1903cab2bb3Spatrick class_id, ClassIdToSize(class_id)); 1913cab2bb3Spatrick Die(); 1923cab2bb3Spatrick } 1933cab2bb3Spatrick for (uptr i = 0; i < n_chunks; i++) 1943cab2bb3Spatrick free_array[old_num_chunks + i] = chunks[i]; 1953cab2bb3Spatrick region->num_freed_chunks = new_num_freed_chunks; 1963cab2bb3Spatrick region->stats.n_freed += n_chunks; 1973cab2bb3Spatrick 198d89ec533Spatrick MaybeReleaseToOS(memory_mapper, class_id, false /*force*/); 1993cab2bb3Spatrick } 2003cab2bb3Spatrick GetFromAllocator(AllocatorStats * stat,uptr class_id,CompactPtrT * chunks,uptr n_chunks)2013cab2bb3Spatrick NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id, 2023cab2bb3Spatrick CompactPtrT *chunks, uptr n_chunks) { 2033cab2bb3Spatrick RegionInfo *region = GetRegionInfo(class_id); 2043cab2bb3Spatrick uptr region_beg = GetRegionBeginBySizeClass(class_id); 2053cab2bb3Spatrick CompactPtrT *free_array = GetFreeArray(region_beg); 2063cab2bb3Spatrick 207*810390e3Srobert Lock l(®ion->mutex); 208d89ec533Spatrick #if SANITIZER_WINDOWS 209d89ec533Spatrick /* On Windows unmapping of memory during __sanitizer_purge_allocator is 210d89ec533Spatrick explicit and immediate, so unmapped regions must be explicitly mapped back 211d89ec533Spatrick in when they are accessed again. */ 212d89ec533Spatrick if (region->rtoi.last_released_bytes > 0) { 213d89ec533Spatrick MmapFixedOrDie(region_beg, region->mapped_user, 214d89ec533Spatrick "SizeClassAllocator: region data"); 215d89ec533Spatrick region->rtoi.n_freed_at_last_release = 0; 216d89ec533Spatrick region->rtoi.last_released_bytes = 0; 217d89ec533Spatrick } 218d89ec533Spatrick #endif 2193cab2bb3Spatrick if (UNLIKELY(region->num_freed_chunks < n_chunks)) { 2203cab2bb3Spatrick if (UNLIKELY(!PopulateFreeArray(stat, class_id, region, 2213cab2bb3Spatrick n_chunks - region->num_freed_chunks))) 2223cab2bb3Spatrick return false; 2233cab2bb3Spatrick CHECK_GE(region->num_freed_chunks, n_chunks); 2243cab2bb3Spatrick } 2253cab2bb3Spatrick region->num_freed_chunks -= n_chunks; 2263cab2bb3Spatrick uptr base_idx = region->num_freed_chunks; 2273cab2bb3Spatrick for (uptr i = 0; i < n_chunks; i++) 2283cab2bb3Spatrick chunks[i] = free_array[base_idx + i]; 2293cab2bb3Spatrick region->stats.n_allocated += n_chunks; 2303cab2bb3Spatrick return true; 2313cab2bb3Spatrick } 2323cab2bb3Spatrick PointerIsMine(const void * p)2333cab2bb3Spatrick bool PointerIsMine(const void *p) const { 2343cab2bb3Spatrick uptr P = reinterpret_cast<uptr>(p); 2353cab2bb3Spatrick if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) 2363cab2bb3Spatrick return P / kSpaceSize == kSpaceBeg / kSpaceSize; 2373cab2bb3Spatrick return P >= SpaceBeg() && P < SpaceEnd(); 2383cab2bb3Spatrick } 2393cab2bb3Spatrick GetRegionBegin(const void * p)2403cab2bb3Spatrick uptr GetRegionBegin(const void *p) { 2413cab2bb3Spatrick if (kUsingConstantSpaceBeg) 2423cab2bb3Spatrick return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1); 2433cab2bb3Spatrick uptr space_beg = SpaceBeg(); 2443cab2bb3Spatrick return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) + 2453cab2bb3Spatrick space_beg; 2463cab2bb3Spatrick } 2473cab2bb3Spatrick GetRegionBeginBySizeClass(uptr class_id)2483cab2bb3Spatrick uptr GetRegionBeginBySizeClass(uptr class_id) const { 2493cab2bb3Spatrick return SpaceBeg() + kRegionSize * class_id; 2503cab2bb3Spatrick } 2513cab2bb3Spatrick GetSizeClass(const void * p)2523cab2bb3Spatrick uptr GetSizeClass(const void *p) { 2533cab2bb3Spatrick if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) 2543cab2bb3Spatrick return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded; 2553cab2bb3Spatrick return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) % 2563cab2bb3Spatrick kNumClassesRounded; 2573cab2bb3Spatrick } 2583cab2bb3Spatrick GetBlockBegin(const void * p)2593cab2bb3Spatrick void *GetBlockBegin(const void *p) { 2603cab2bb3Spatrick uptr class_id = GetSizeClass(p); 261d89ec533Spatrick if (class_id >= kNumClasses) return nullptr; 2623cab2bb3Spatrick uptr size = ClassIdToSize(class_id); 2633cab2bb3Spatrick if (!size) return nullptr; 2643cab2bb3Spatrick uptr chunk_idx = GetChunkIdx((uptr)p, size); 2653cab2bb3Spatrick uptr reg_beg = GetRegionBegin(p); 2663cab2bb3Spatrick uptr beg = chunk_idx * size; 2673cab2bb3Spatrick uptr next_beg = beg + size; 2683cab2bb3Spatrick const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id)); 2693cab2bb3Spatrick if (region->mapped_user >= next_beg) 2703cab2bb3Spatrick return reinterpret_cast<void*>(reg_beg + beg); 2713cab2bb3Spatrick return nullptr; 2723cab2bb3Spatrick } 2733cab2bb3Spatrick GetActuallyAllocatedSize(void * p)2743cab2bb3Spatrick uptr GetActuallyAllocatedSize(void *p) { 2753cab2bb3Spatrick CHECK(PointerIsMine(p)); 2763cab2bb3Spatrick return ClassIdToSize(GetSizeClass(p)); 2773cab2bb3Spatrick } 2783cab2bb3Spatrick ClassID(uptr size)2793cab2bb3Spatrick static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } 2803cab2bb3Spatrick GetMetaData(const void * p)2813cab2bb3Spatrick void *GetMetaData(const void *p) { 282d89ec533Spatrick CHECK(kMetadataSize); 2833cab2bb3Spatrick uptr class_id = GetSizeClass(p); 2843cab2bb3Spatrick uptr size = ClassIdToSize(class_id); 285*810390e3Srobert if (!size) 286*810390e3Srobert return nullptr; 2873cab2bb3Spatrick uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size); 2883cab2bb3Spatrick uptr region_beg = GetRegionBeginBySizeClass(class_id); 2893cab2bb3Spatrick return reinterpret_cast<void *>(GetMetadataEnd(region_beg) - 2903cab2bb3Spatrick (1 + chunk_idx) * kMetadataSize); 2913cab2bb3Spatrick } 2923cab2bb3Spatrick TotalMemoryUsed()2933cab2bb3Spatrick uptr TotalMemoryUsed() { 2943cab2bb3Spatrick uptr res = 0; 2953cab2bb3Spatrick for (uptr i = 0; i < kNumClasses; i++) 2963cab2bb3Spatrick res += GetRegionInfo(i)->allocated_user; 2973cab2bb3Spatrick return res; 2983cab2bb3Spatrick } 2993cab2bb3Spatrick 3003cab2bb3Spatrick // Test-only. TestOnlyUnmap()3013cab2bb3Spatrick void TestOnlyUnmap() { 3021f9cb04fSpatrick UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size()); 3033cab2bb3Spatrick } 3043cab2bb3Spatrick FillMemoryProfile(uptr start,uptr rss,bool file,uptr * stats)305*810390e3Srobert static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats) { 306*810390e3Srobert for (uptr class_id = 0; class_id < kNumClasses; class_id++) 3073cab2bb3Spatrick if (stats[class_id] == start) 3083cab2bb3Spatrick stats[class_id] = rss; 3093cab2bb3Spatrick } 3103cab2bb3Spatrick PrintStats(uptr class_id,uptr rss)3113cab2bb3Spatrick void PrintStats(uptr class_id, uptr rss) { 3123cab2bb3Spatrick RegionInfo *region = GetRegionInfo(class_id); 3133cab2bb3Spatrick if (region->mapped_user == 0) return; 3143cab2bb3Spatrick uptr in_use = region->stats.n_allocated - region->stats.n_freed; 3153cab2bb3Spatrick uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); 3163cab2bb3Spatrick Printf( 3173cab2bb3Spatrick "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd " 3183cab2bb3Spatrick "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd " 319*810390e3Srobert "last released: %6lldK region: 0x%zx\n", 3203cab2bb3Spatrick region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id), 3213cab2bb3Spatrick region->mapped_user >> 10, region->stats.n_allocated, 3223cab2bb3Spatrick region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks, 3233cab2bb3Spatrick rss >> 10, region->rtoi.num_releases, 3243cab2bb3Spatrick region->rtoi.last_released_bytes >> 10, 3253cab2bb3Spatrick SpaceBeg() + kRegionSize * class_id); 3263cab2bb3Spatrick } 3273cab2bb3Spatrick PrintStats()3283cab2bb3Spatrick void PrintStats() { 3293cab2bb3Spatrick uptr rss_stats[kNumClasses]; 3303cab2bb3Spatrick for (uptr class_id = 0; class_id < kNumClasses; class_id++) 3313cab2bb3Spatrick rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id; 332*810390e3Srobert GetMemoryProfile(FillMemoryProfile, rss_stats); 3333cab2bb3Spatrick 3343cab2bb3Spatrick uptr total_mapped = 0; 3353cab2bb3Spatrick uptr total_rss = 0; 3363cab2bb3Spatrick uptr n_allocated = 0; 3373cab2bb3Spatrick uptr n_freed = 0; 3383cab2bb3Spatrick for (uptr class_id = 1; class_id < kNumClasses; class_id++) { 3393cab2bb3Spatrick RegionInfo *region = GetRegionInfo(class_id); 3403cab2bb3Spatrick if (region->mapped_user != 0) { 3413cab2bb3Spatrick total_mapped += region->mapped_user; 3423cab2bb3Spatrick total_rss += rss_stats[class_id]; 3433cab2bb3Spatrick } 3443cab2bb3Spatrick n_allocated += region->stats.n_allocated; 3453cab2bb3Spatrick n_freed += region->stats.n_freed; 3463cab2bb3Spatrick } 3473cab2bb3Spatrick 3483cab2bb3Spatrick Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in " 3493cab2bb3Spatrick "%zd allocations; remains %zd\n", total_mapped >> 20, 3503cab2bb3Spatrick total_rss >> 20, n_allocated, n_allocated - n_freed); 3513cab2bb3Spatrick for (uptr class_id = 1; class_id < kNumClasses; class_id++) 3523cab2bb3Spatrick PrintStats(class_id, rss_stats[class_id]); 3533cab2bb3Spatrick } 3543cab2bb3Spatrick 3553cab2bb3Spatrick // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone 3563cab2bb3Spatrick // introspection API. ForceLock()357*810390e3Srobert void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { 3583cab2bb3Spatrick for (uptr i = 0; i < kNumClasses; i++) { 3593cab2bb3Spatrick GetRegionInfo(i)->mutex.Lock(); 3603cab2bb3Spatrick } 3613cab2bb3Spatrick } 3623cab2bb3Spatrick ForceUnlock()363*810390e3Srobert void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { 3643cab2bb3Spatrick for (int i = (int)kNumClasses - 1; i >= 0; i--) { 3653cab2bb3Spatrick GetRegionInfo(i)->mutex.Unlock(); 3663cab2bb3Spatrick } 3673cab2bb3Spatrick } 3683cab2bb3Spatrick 3693cab2bb3Spatrick // Iterate over all existing chunks. 3703cab2bb3Spatrick // The allocator must be locked when calling this function. ForEachChunk(ForEachChunkCallback callback,void * arg)3713cab2bb3Spatrick void ForEachChunk(ForEachChunkCallback callback, void *arg) { 3723cab2bb3Spatrick for (uptr class_id = 1; class_id < kNumClasses; class_id++) { 3733cab2bb3Spatrick RegionInfo *region = GetRegionInfo(class_id); 3743cab2bb3Spatrick uptr chunk_size = ClassIdToSize(class_id); 3753cab2bb3Spatrick uptr region_beg = SpaceBeg() + class_id * kRegionSize; 3763cab2bb3Spatrick uptr region_allocated_user_size = 3773cab2bb3Spatrick AddressSpaceView::Load(region)->allocated_user; 3783cab2bb3Spatrick for (uptr chunk = region_beg; 3793cab2bb3Spatrick chunk < region_beg + region_allocated_user_size; 3803cab2bb3Spatrick chunk += chunk_size) { 3813cab2bb3Spatrick // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); 3823cab2bb3Spatrick callback(chunk, arg); 3833cab2bb3Spatrick } 3843cab2bb3Spatrick } 3853cab2bb3Spatrick } 3863cab2bb3Spatrick ClassIdToSize(uptr class_id)3873cab2bb3Spatrick static uptr ClassIdToSize(uptr class_id) { 3883cab2bb3Spatrick return SizeClassMap::Size(class_id); 3893cab2bb3Spatrick } 3903cab2bb3Spatrick AdditionalSize()3913cab2bb3Spatrick static uptr AdditionalSize() { 3923cab2bb3Spatrick return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded, 3933cab2bb3Spatrick GetPageSizeCached()); 3943cab2bb3Spatrick } 3953cab2bb3Spatrick 3963cab2bb3Spatrick typedef SizeClassMap SizeClassMapT; 3973cab2bb3Spatrick static const uptr kNumClasses = SizeClassMap::kNumClasses; 3983cab2bb3Spatrick static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; 3993cab2bb3Spatrick 4003cab2bb3Spatrick // A packed array of counters. Each counter occupies 2^n bits, enough to store 4013cab2bb3Spatrick // counter's max_value. Ctor will try to allocate the required buffer via 4023cab2bb3Spatrick // mapper->MapPackedCounterArrayBuffer and the caller is expected to check 4033cab2bb3Spatrick // whether the initialization was successful by checking IsAllocated() result. 4043cab2bb3Spatrick // For the performance sake, none of the accessors check the validity of the 4053cab2bb3Spatrick // arguments, it is assumed that index is always in [0, n) range and the value 4063cab2bb3Spatrick // is not incremented past max_value. 4073cab2bb3Spatrick class PackedCounterArray { 4083cab2bb3Spatrick public: 409d89ec533Spatrick template <typename MemoryMapper> PackedCounterArray(u64 num_counters,u64 max_value,MemoryMapper * mapper)410d89ec533Spatrick PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper) 411d89ec533Spatrick : n(num_counters) { 4123cab2bb3Spatrick CHECK_GT(num_counters, 0); 4133cab2bb3Spatrick CHECK_GT(max_value, 0); 4143cab2bb3Spatrick constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL; 4153cab2bb3Spatrick // Rounding counter storage size up to the power of two allows for using 4163cab2bb3Spatrick // bit shifts calculating particular counter's index and offset. 4173cab2bb3Spatrick uptr counter_size_bits = 4183cab2bb3Spatrick RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1); 4193cab2bb3Spatrick CHECK_LE(counter_size_bits, kMaxCounterBits); 4203cab2bb3Spatrick counter_size_bits_log = Log2(counter_size_bits); 4213cab2bb3Spatrick counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits); 4223cab2bb3Spatrick 4233cab2bb3Spatrick uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log; 4243cab2bb3Spatrick CHECK_GT(packing_ratio, 0); 4253cab2bb3Spatrick packing_ratio_log = Log2(packing_ratio); 4263cab2bb3Spatrick bit_offset_mask = packing_ratio - 1; 4273cab2bb3Spatrick 428d89ec533Spatrick buffer = mapper->MapPackedCounterArrayBuffer( 429d89ec533Spatrick RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log); 4303cab2bb3Spatrick } 4313cab2bb3Spatrick IsAllocated()4323cab2bb3Spatrick bool IsAllocated() const { 4333cab2bb3Spatrick return !!buffer; 4343cab2bb3Spatrick } 4353cab2bb3Spatrick GetCount()4363cab2bb3Spatrick u64 GetCount() const { 4373cab2bb3Spatrick return n; 4383cab2bb3Spatrick } 4393cab2bb3Spatrick Get(uptr i)4403cab2bb3Spatrick uptr Get(uptr i) const { 4413cab2bb3Spatrick DCHECK_LT(i, n); 4423cab2bb3Spatrick uptr index = i >> packing_ratio_log; 4433cab2bb3Spatrick uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log; 4443cab2bb3Spatrick return (buffer[index] >> bit_offset) & counter_mask; 4453cab2bb3Spatrick } 4463cab2bb3Spatrick Inc(uptr i)4473cab2bb3Spatrick void Inc(uptr i) const { 4483cab2bb3Spatrick DCHECK_LT(Get(i), counter_mask); 4493cab2bb3Spatrick uptr index = i >> packing_ratio_log; 4503cab2bb3Spatrick uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log; 4513cab2bb3Spatrick buffer[index] += 1ULL << bit_offset; 4523cab2bb3Spatrick } 4533cab2bb3Spatrick IncRange(uptr from,uptr to)4543cab2bb3Spatrick void IncRange(uptr from, uptr to) const { 4553cab2bb3Spatrick DCHECK_LE(from, to); 4563cab2bb3Spatrick for (uptr i = from; i <= to; i++) 4573cab2bb3Spatrick Inc(i); 4583cab2bb3Spatrick } 4593cab2bb3Spatrick 4603cab2bb3Spatrick private: 4613cab2bb3Spatrick const u64 n; 4623cab2bb3Spatrick u64 counter_size_bits_log; 4633cab2bb3Spatrick u64 counter_mask; 4643cab2bb3Spatrick u64 packing_ratio_log; 4653cab2bb3Spatrick u64 bit_offset_mask; 4663cab2bb3Spatrick u64* buffer; 4673cab2bb3Spatrick }; 4683cab2bb3Spatrick 4693cab2bb3Spatrick template <class MemoryMapperT> 4703cab2bb3Spatrick class FreePagesRangeTracker { 4713cab2bb3Spatrick public: FreePagesRangeTracker(MemoryMapperT * mapper,uptr class_id)472d89ec533Spatrick FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id) 4733cab2bb3Spatrick : memory_mapper(mapper), 474d89ec533Spatrick class_id(class_id), 475d89ec533Spatrick page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {} 4763cab2bb3Spatrick NextPage(bool freed)4773cab2bb3Spatrick void NextPage(bool freed) { 4783cab2bb3Spatrick if (freed) { 4793cab2bb3Spatrick if (!in_the_range) { 4803cab2bb3Spatrick current_range_start_page = current_page; 4813cab2bb3Spatrick in_the_range = true; 4823cab2bb3Spatrick } 4833cab2bb3Spatrick } else { 4843cab2bb3Spatrick CloseOpenedRange(); 4853cab2bb3Spatrick } 4863cab2bb3Spatrick current_page++; 4873cab2bb3Spatrick } 4883cab2bb3Spatrick Done()4893cab2bb3Spatrick void Done() { 4903cab2bb3Spatrick CloseOpenedRange(); 4913cab2bb3Spatrick } 4923cab2bb3Spatrick 4933cab2bb3Spatrick private: CloseOpenedRange()4943cab2bb3Spatrick void CloseOpenedRange() { 4953cab2bb3Spatrick if (in_the_range) { 4963cab2bb3Spatrick memory_mapper->ReleasePageRangeToOS( 497d89ec533Spatrick class_id, current_range_start_page << page_size_scaled_log, 4983cab2bb3Spatrick current_page << page_size_scaled_log); 4993cab2bb3Spatrick in_the_range = false; 5003cab2bb3Spatrick } 5013cab2bb3Spatrick } 5023cab2bb3Spatrick 503d89ec533Spatrick MemoryMapperT *const memory_mapper = nullptr; 504d89ec533Spatrick const uptr class_id = 0; 505d89ec533Spatrick const uptr page_size_scaled_log = 0; 506d89ec533Spatrick bool in_the_range = false; 507d89ec533Spatrick uptr current_page = 0; 508d89ec533Spatrick uptr current_range_start_page = 0; 5093cab2bb3Spatrick }; 5103cab2bb3Spatrick 5113cab2bb3Spatrick // Iterates over the free_array to identify memory pages containing freed 5123cab2bb3Spatrick // chunks only and returns these pages back to OS. 5133cab2bb3Spatrick // allocated_pages_count is the total number of pages allocated for the 5143cab2bb3Spatrick // current bucket. 515d89ec533Spatrick template <typename MemoryMapper> ReleaseFreeMemoryToOS(CompactPtrT * free_array,uptr free_array_count,uptr chunk_size,uptr allocated_pages_count,MemoryMapper * memory_mapper,uptr class_id)5163cab2bb3Spatrick static void ReleaseFreeMemoryToOS(CompactPtrT *free_array, 5173cab2bb3Spatrick uptr free_array_count, uptr chunk_size, 5183cab2bb3Spatrick uptr allocated_pages_count, 519d89ec533Spatrick MemoryMapper *memory_mapper, 520d89ec533Spatrick uptr class_id) { 5213cab2bb3Spatrick const uptr page_size = GetPageSizeCached(); 5223cab2bb3Spatrick 5233cab2bb3Spatrick // Figure out the number of chunks per page and whether we can take a fast 5243cab2bb3Spatrick // path (the number of chunks per page is the same for all pages). 5253cab2bb3Spatrick uptr full_pages_chunk_count_max; 5263cab2bb3Spatrick bool same_chunk_count_per_page; 5273cab2bb3Spatrick if (chunk_size <= page_size && page_size % chunk_size == 0) { 5283cab2bb3Spatrick // Same number of chunks per page, no cross overs. 5293cab2bb3Spatrick full_pages_chunk_count_max = page_size / chunk_size; 5303cab2bb3Spatrick same_chunk_count_per_page = true; 5313cab2bb3Spatrick } else if (chunk_size <= page_size && page_size % chunk_size != 0 && 5323cab2bb3Spatrick chunk_size % (page_size % chunk_size) == 0) { 5333cab2bb3Spatrick // Some chunks are crossing page boundaries, which means that the page 5343cab2bb3Spatrick // contains one or two partial chunks, but all pages contain the same 5353cab2bb3Spatrick // number of chunks. 5363cab2bb3Spatrick full_pages_chunk_count_max = page_size / chunk_size + 1; 5373cab2bb3Spatrick same_chunk_count_per_page = true; 5383cab2bb3Spatrick } else if (chunk_size <= page_size) { 5393cab2bb3Spatrick // Some chunks are crossing page boundaries, which means that the page 5403cab2bb3Spatrick // contains one or two partial chunks. 5413cab2bb3Spatrick full_pages_chunk_count_max = page_size / chunk_size + 2; 5423cab2bb3Spatrick same_chunk_count_per_page = false; 5433cab2bb3Spatrick } else if (chunk_size > page_size && chunk_size % page_size == 0) { 5443cab2bb3Spatrick // One chunk covers multiple pages, no cross overs. 5453cab2bb3Spatrick full_pages_chunk_count_max = 1; 5463cab2bb3Spatrick same_chunk_count_per_page = true; 5473cab2bb3Spatrick } else if (chunk_size > page_size) { 5483cab2bb3Spatrick // One chunk covers multiple pages, Some chunks are crossing page 5493cab2bb3Spatrick // boundaries. Some pages contain one chunk, some contain two. 5503cab2bb3Spatrick full_pages_chunk_count_max = 2; 5513cab2bb3Spatrick same_chunk_count_per_page = false; 5523cab2bb3Spatrick } else { 5533cab2bb3Spatrick UNREACHABLE("All chunk_size/page_size ratios must be handled."); 5543cab2bb3Spatrick } 5553cab2bb3Spatrick 556d89ec533Spatrick PackedCounterArray counters(allocated_pages_count, 557d89ec533Spatrick full_pages_chunk_count_max, memory_mapper); 5583cab2bb3Spatrick if (!counters.IsAllocated()) 5593cab2bb3Spatrick return; 5603cab2bb3Spatrick 5613cab2bb3Spatrick const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale; 5623cab2bb3Spatrick const uptr page_size_scaled = page_size >> kCompactPtrScale; 5633cab2bb3Spatrick const uptr page_size_scaled_log = Log2(page_size_scaled); 5643cab2bb3Spatrick 5653cab2bb3Spatrick // Iterate over free chunks and count how many free chunks affect each 5663cab2bb3Spatrick // allocated page. 5673cab2bb3Spatrick if (chunk_size <= page_size && page_size % chunk_size == 0) { 5683cab2bb3Spatrick // Each chunk affects one page only. 5693cab2bb3Spatrick for (uptr i = 0; i < free_array_count; i++) 5703cab2bb3Spatrick counters.Inc(free_array[i] >> page_size_scaled_log); 5713cab2bb3Spatrick } else { 5723cab2bb3Spatrick // In all other cases chunks might affect more than one page. 5733cab2bb3Spatrick for (uptr i = 0; i < free_array_count; i++) { 5743cab2bb3Spatrick counters.IncRange( 5753cab2bb3Spatrick free_array[i] >> page_size_scaled_log, 5763cab2bb3Spatrick (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log); 5773cab2bb3Spatrick } 5783cab2bb3Spatrick } 5793cab2bb3Spatrick 5803cab2bb3Spatrick // Iterate over pages detecting ranges of pages with chunk counters equal 5813cab2bb3Spatrick // to the expected number of chunks for the particular page. 582d89ec533Spatrick FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id); 5833cab2bb3Spatrick if (same_chunk_count_per_page) { 5843cab2bb3Spatrick // Fast path, every page has the same number of chunks affecting it. 5853cab2bb3Spatrick for (uptr i = 0; i < counters.GetCount(); i++) 5863cab2bb3Spatrick range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max); 5873cab2bb3Spatrick } else { 5883cab2bb3Spatrick // Show path, go through the pages keeping count how many chunks affect 5893cab2bb3Spatrick // each page. 5903cab2bb3Spatrick const uptr pn = 5913cab2bb3Spatrick chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1; 5923cab2bb3Spatrick const uptr pnc = pn * chunk_size_scaled; 5933cab2bb3Spatrick // The idea is to increment the current page pointer by the first chunk 5943cab2bb3Spatrick // size, middle portion size (the portion of the page covered by chunks 5953cab2bb3Spatrick // except the first and the last one) and then the last chunk size, adding 5963cab2bb3Spatrick // up the number of chunks on the current page and checking on every step 5973cab2bb3Spatrick // whether the page boundary was crossed. 5983cab2bb3Spatrick uptr prev_page_boundary = 0; 5993cab2bb3Spatrick uptr current_boundary = 0; 6003cab2bb3Spatrick for (uptr i = 0; i < counters.GetCount(); i++) { 6013cab2bb3Spatrick uptr page_boundary = prev_page_boundary + page_size_scaled; 6023cab2bb3Spatrick uptr chunks_per_page = pn; 6033cab2bb3Spatrick if (current_boundary < page_boundary) { 6043cab2bb3Spatrick if (current_boundary > prev_page_boundary) 6053cab2bb3Spatrick chunks_per_page++; 6063cab2bb3Spatrick current_boundary += pnc; 6073cab2bb3Spatrick if (current_boundary < page_boundary) { 6083cab2bb3Spatrick chunks_per_page++; 6093cab2bb3Spatrick current_boundary += chunk_size_scaled; 6103cab2bb3Spatrick } 6113cab2bb3Spatrick } 6123cab2bb3Spatrick prev_page_boundary = page_boundary; 6133cab2bb3Spatrick 6143cab2bb3Spatrick range_tracker.NextPage(counters.Get(i) == chunks_per_page); 6153cab2bb3Spatrick } 6163cab2bb3Spatrick } 6173cab2bb3Spatrick range_tracker.Done(); 6183cab2bb3Spatrick } 6193cab2bb3Spatrick 6203cab2bb3Spatrick private: 621d89ec533Spatrick friend class MemoryMapper<ThisT>; 6223cab2bb3Spatrick 6233cab2bb3Spatrick ReservedAddressRange address_range; 6243cab2bb3Spatrick 6253cab2bb3Spatrick static const uptr kRegionSize = kSpaceSize / kNumClassesRounded; 6263cab2bb3Spatrick // FreeArray is the array of free-d chunks (stored as 4-byte offsets). 627*810390e3Srobert // In the worst case it may require kRegionSize/SizeClassMap::kMinSize 6283cab2bb3Spatrick // elements, but in reality this will not happen. For simplicity we 6293cab2bb3Spatrick // dedicate 1/8 of the region's virtual space to FreeArray. 6303cab2bb3Spatrick static const uptr kFreeArraySize = kRegionSize / 8; 6313cab2bb3Spatrick 6323cab2bb3Spatrick static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0; 6333cab2bb3Spatrick uptr NonConstSpaceBeg; SpaceBeg()6343cab2bb3Spatrick uptr SpaceBeg() const { 6353cab2bb3Spatrick return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg; 6363cab2bb3Spatrick } SpaceEnd()6373cab2bb3Spatrick uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; } 6383cab2bb3Spatrick // kRegionSize must be >= 2^32. 6393cab2bb3Spatrick COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2))); 6403cab2bb3Spatrick // kRegionSize must be <= 2^36, see CompactPtrT. 6413cab2bb3Spatrick COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4))); 6423cab2bb3Spatrick // Call mmap for user memory with at least this size. 6433cab2bb3Spatrick static const uptr kUserMapSize = 1 << 16; 6443cab2bb3Spatrick // Call mmap for metadata memory with at least this size. 6453cab2bb3Spatrick static const uptr kMetaMapSize = 1 << 16; 6463cab2bb3Spatrick // Call mmap for free array memory with at least this size. 6473cab2bb3Spatrick static const uptr kFreeArrayMapSize = 1 << 16; 6483cab2bb3Spatrick 6493cab2bb3Spatrick atomic_sint32_t release_to_os_interval_ms_; 6503cab2bb3Spatrick 651d89ec533Spatrick uptr RegionInfoSpace; 652d89ec533Spatrick 653d89ec533Spatrick // True if the user has already mapped the entire heap R/W. 654d89ec533Spatrick bool PremappedHeap; 655d89ec533Spatrick 6563cab2bb3Spatrick struct Stats { 6573cab2bb3Spatrick uptr n_allocated; 6583cab2bb3Spatrick uptr n_freed; 6593cab2bb3Spatrick }; 6603cab2bb3Spatrick 6613cab2bb3Spatrick struct ReleaseToOsInfo { 6623cab2bb3Spatrick uptr n_freed_at_last_release; 6633cab2bb3Spatrick uptr num_releases; 6643cab2bb3Spatrick u64 last_release_at_ns; 6653cab2bb3Spatrick u64 last_released_bytes; 6663cab2bb3Spatrick }; 6673cab2bb3Spatrick ALIGNED(SANITIZER_CACHE_LINE_SIZE)6683cab2bb3Spatrick struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo { 669*810390e3Srobert Mutex mutex; 6703cab2bb3Spatrick uptr num_freed_chunks; // Number of elements in the freearray. 6713cab2bb3Spatrick uptr mapped_free_array; // Bytes mapped for freearray. 6723cab2bb3Spatrick uptr allocated_user; // Bytes allocated for user memory. 6733cab2bb3Spatrick uptr allocated_meta; // Bytes allocated for metadata. 6743cab2bb3Spatrick uptr mapped_user; // Bytes mapped for user memory. 6753cab2bb3Spatrick uptr mapped_meta; // Bytes mapped for metadata. 6763cab2bb3Spatrick u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks. 6773cab2bb3Spatrick bool exhausted; // Whether region is out of space for new chunks. 6783cab2bb3Spatrick Stats stats; 6793cab2bb3Spatrick ReleaseToOsInfo rtoi; 6803cab2bb3Spatrick }; 6813cab2bb3Spatrick COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0); 6823cab2bb3Spatrick GetRegionInfo(uptr class_id)6833cab2bb3Spatrick RegionInfo *GetRegionInfo(uptr class_id) const { 6843cab2bb3Spatrick DCHECK_LT(class_id, kNumClasses); 685d89ec533Spatrick RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace); 6863cab2bb3Spatrick return ®ions[class_id]; 6873cab2bb3Spatrick } 6883cab2bb3Spatrick GetMetadataEnd(uptr region_beg)6893cab2bb3Spatrick uptr GetMetadataEnd(uptr region_beg) const { 6903cab2bb3Spatrick return region_beg + kRegionSize - kFreeArraySize; 6913cab2bb3Spatrick } 6923cab2bb3Spatrick GetChunkIdx(uptr chunk,uptr size)6933cab2bb3Spatrick uptr GetChunkIdx(uptr chunk, uptr size) const { 6943cab2bb3Spatrick if (!kUsingConstantSpaceBeg) 6953cab2bb3Spatrick chunk -= SpaceBeg(); 6963cab2bb3Spatrick 6973cab2bb3Spatrick uptr offset = chunk % kRegionSize; 6983cab2bb3Spatrick // Here we divide by a non-constant. This is costly. 6993cab2bb3Spatrick // size always fits into 32-bits. If the offset fits too, use 32-bit div. 7003cab2bb3Spatrick if (offset >> (SANITIZER_WORDSIZE / 2)) 7013cab2bb3Spatrick return offset / size; 7023cab2bb3Spatrick return (u32)offset / (u32)size; 7033cab2bb3Spatrick } 7043cab2bb3Spatrick GetFreeArray(uptr region_beg)7053cab2bb3Spatrick CompactPtrT *GetFreeArray(uptr region_beg) const { 7063cab2bb3Spatrick return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg)); 7073cab2bb3Spatrick } 7083cab2bb3Spatrick MapWithCallback(uptr beg,uptr size,const char * name)7093cab2bb3Spatrick bool MapWithCallback(uptr beg, uptr size, const char *name) { 710d89ec533Spatrick if (PremappedHeap) 711d89ec533Spatrick return beg >= NonConstSpaceBeg && 712d89ec533Spatrick beg + size <= NonConstSpaceBeg + kSpaceSize; 7133cab2bb3Spatrick uptr mapped = address_range.Map(beg, size, name); 7143cab2bb3Spatrick if (UNLIKELY(!mapped)) 7153cab2bb3Spatrick return false; 7163cab2bb3Spatrick CHECK_EQ(beg, mapped); 7173cab2bb3Spatrick MapUnmapCallback().OnMap(beg, size); 7183cab2bb3Spatrick return true; 7193cab2bb3Spatrick } 7203cab2bb3Spatrick MapWithCallbackOrDie(uptr beg,uptr size,const char * name)7213cab2bb3Spatrick void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) { 722d89ec533Spatrick if (PremappedHeap) { 723d89ec533Spatrick CHECK_GE(beg, NonConstSpaceBeg); 724d89ec533Spatrick CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize); 725d89ec533Spatrick return; 726d89ec533Spatrick } 7273cab2bb3Spatrick CHECK_EQ(beg, address_range.MapOrDie(beg, size, name)); 7283cab2bb3Spatrick MapUnmapCallback().OnMap(beg, size); 7293cab2bb3Spatrick } 7303cab2bb3Spatrick UnmapWithCallbackOrDie(uptr beg,uptr size)7313cab2bb3Spatrick void UnmapWithCallbackOrDie(uptr beg, uptr size) { 732d89ec533Spatrick if (PremappedHeap) 733d89ec533Spatrick return; 7343cab2bb3Spatrick MapUnmapCallback().OnUnmap(beg, size); 7353cab2bb3Spatrick address_range.Unmap(beg, size); 7363cab2bb3Spatrick } 7373cab2bb3Spatrick EnsureFreeArraySpace(RegionInfo * region,uptr region_beg,uptr num_freed_chunks)7383cab2bb3Spatrick bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg, 7393cab2bb3Spatrick uptr num_freed_chunks) { 7403cab2bb3Spatrick uptr needed_space = num_freed_chunks * sizeof(CompactPtrT); 7413cab2bb3Spatrick if (region->mapped_free_array < needed_space) { 7423cab2bb3Spatrick uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize); 7433cab2bb3Spatrick CHECK_LE(new_mapped_free_array, kFreeArraySize); 7443cab2bb3Spatrick uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) + 7453cab2bb3Spatrick region->mapped_free_array; 7463cab2bb3Spatrick uptr new_map_size = new_mapped_free_array - region->mapped_free_array; 7473cab2bb3Spatrick if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size, 7483cab2bb3Spatrick "SizeClassAllocator: freearray"))) 7493cab2bb3Spatrick return false; 7503cab2bb3Spatrick region->mapped_free_array = new_mapped_free_array; 7513cab2bb3Spatrick } 7523cab2bb3Spatrick return true; 7533cab2bb3Spatrick } 7543cab2bb3Spatrick 7553cab2bb3Spatrick // Check whether this size class is exhausted. IsRegionExhausted(RegionInfo * region,uptr class_id,uptr additional_map_size)7563cab2bb3Spatrick bool IsRegionExhausted(RegionInfo *region, uptr class_id, 7573cab2bb3Spatrick uptr additional_map_size) { 7583cab2bb3Spatrick if (LIKELY(region->mapped_user + region->mapped_meta + 7593cab2bb3Spatrick additional_map_size <= kRegionSize - kFreeArraySize)) 7603cab2bb3Spatrick return false; 7613cab2bb3Spatrick if (!region->exhausted) { 7623cab2bb3Spatrick region->exhausted = true; 7633cab2bb3Spatrick Printf("%s: Out of memory. ", SanitizerToolName); 7643cab2bb3Spatrick Printf("The process has exhausted %zuMB for size class %zu.\n", 7653cab2bb3Spatrick kRegionSize >> 20, ClassIdToSize(class_id)); 7663cab2bb3Spatrick } 7673cab2bb3Spatrick return true; 7683cab2bb3Spatrick } 7693cab2bb3Spatrick PopulateFreeArray(AllocatorStats * stat,uptr class_id,RegionInfo * region,uptr requested_count)7703cab2bb3Spatrick NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id, 7713cab2bb3Spatrick RegionInfo *region, uptr requested_count) { 7723cab2bb3Spatrick // region->mutex is held. 7733cab2bb3Spatrick const uptr region_beg = GetRegionBeginBySizeClass(class_id); 7743cab2bb3Spatrick const uptr size = ClassIdToSize(class_id); 7753cab2bb3Spatrick 7763cab2bb3Spatrick const uptr total_user_bytes = 7773cab2bb3Spatrick region->allocated_user + requested_count * size; 7783cab2bb3Spatrick // Map more space for chunks, if necessary. 7793cab2bb3Spatrick if (LIKELY(total_user_bytes > region->mapped_user)) { 7803cab2bb3Spatrick if (UNLIKELY(region->mapped_user == 0)) { 7813cab2bb3Spatrick if (!kUsingConstantSpaceBeg && kRandomShuffleChunks) 7823cab2bb3Spatrick // The random state is initialized from ASLR. 7833cab2bb3Spatrick region->rand_state = static_cast<u32>(region_beg >> 12); 7843cab2bb3Spatrick // Postpone the first release to OS attempt for ReleaseToOSIntervalMs, 7853cab2bb3Spatrick // preventing just allocated memory from being released sooner than 7863cab2bb3Spatrick // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls 7873cab2bb3Spatrick // for short lived processes. 7883cab2bb3Spatrick // Do it only when the feature is turned on, to avoid a potentially 7893cab2bb3Spatrick // extraneous syscall. 7903cab2bb3Spatrick if (ReleaseToOSIntervalMs() >= 0) 7913cab2bb3Spatrick region->rtoi.last_release_at_ns = MonotonicNanoTime(); 7923cab2bb3Spatrick } 7933cab2bb3Spatrick // Do the mmap for the user memory. 7943cab2bb3Spatrick const uptr user_map_size = 7953cab2bb3Spatrick RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize); 7963cab2bb3Spatrick if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size))) 7973cab2bb3Spatrick return false; 7983cab2bb3Spatrick if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user, 7993cab2bb3Spatrick user_map_size, 8003cab2bb3Spatrick "SizeClassAllocator: region data"))) 8013cab2bb3Spatrick return false; 8023cab2bb3Spatrick stat->Add(AllocatorStatMapped, user_map_size); 8033cab2bb3Spatrick region->mapped_user += user_map_size; 8043cab2bb3Spatrick } 8053cab2bb3Spatrick const uptr new_chunks_count = 8063cab2bb3Spatrick (region->mapped_user - region->allocated_user) / size; 8073cab2bb3Spatrick 8083cab2bb3Spatrick if (kMetadataSize) { 8093cab2bb3Spatrick // Calculate the required space for metadata. 8103cab2bb3Spatrick const uptr total_meta_bytes = 8113cab2bb3Spatrick region->allocated_meta + new_chunks_count * kMetadataSize; 8123cab2bb3Spatrick const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ? 8133cab2bb3Spatrick RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0; 8143cab2bb3Spatrick // Map more space for metadata, if necessary. 8153cab2bb3Spatrick if (meta_map_size) { 8163cab2bb3Spatrick if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size))) 8173cab2bb3Spatrick return false; 8183cab2bb3Spatrick if (UNLIKELY(!MapWithCallback( 8193cab2bb3Spatrick GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size, 8203cab2bb3Spatrick meta_map_size, "SizeClassAllocator: region metadata"))) 8213cab2bb3Spatrick return false; 8223cab2bb3Spatrick region->mapped_meta += meta_map_size; 8233cab2bb3Spatrick } 8243cab2bb3Spatrick } 8253cab2bb3Spatrick 8263cab2bb3Spatrick // If necessary, allocate more space for the free array and populate it with 8273cab2bb3Spatrick // newly allocated chunks. 8283cab2bb3Spatrick const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count; 8293cab2bb3Spatrick if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks))) 8303cab2bb3Spatrick return false; 8313cab2bb3Spatrick CompactPtrT *free_array = GetFreeArray(region_beg); 8323cab2bb3Spatrick for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count; 8333cab2bb3Spatrick i++, chunk += size) 8343cab2bb3Spatrick free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk); 8353cab2bb3Spatrick if (kRandomShuffleChunks) 8363cab2bb3Spatrick RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count, 8373cab2bb3Spatrick ®ion->rand_state); 8383cab2bb3Spatrick 8393cab2bb3Spatrick // All necessary memory is mapped and now it is safe to advance all 8403cab2bb3Spatrick // 'allocated_*' counters. 8413cab2bb3Spatrick region->num_freed_chunks += new_chunks_count; 8423cab2bb3Spatrick region->allocated_user += new_chunks_count * size; 8433cab2bb3Spatrick CHECK_LE(region->allocated_user, region->mapped_user); 8443cab2bb3Spatrick region->allocated_meta += new_chunks_count * kMetadataSize; 8453cab2bb3Spatrick CHECK_LE(region->allocated_meta, region->mapped_meta); 8463cab2bb3Spatrick region->exhausted = false; 8473cab2bb3Spatrick 8483cab2bb3Spatrick // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent 8493cab2bb3Spatrick // MaybeReleaseToOS from releasing just allocated pages or protect these 8503cab2bb3Spatrick // not yet used chunks some other way. 8513cab2bb3Spatrick 8523cab2bb3Spatrick return true; 8533cab2bb3Spatrick } 8543cab2bb3Spatrick 8553cab2bb3Spatrick // Attempts to release RAM occupied by freed chunks back to OS. The region is 8563cab2bb3Spatrick // expected to be locked. 857d89ec533Spatrick // 858d89ec533Spatrick // TODO(morehouse): Support a callback on memory release so HWASan can release 859d89ec533Spatrick // aliases as well. MaybeReleaseToOS(MemoryMapperT * memory_mapper,uptr class_id,bool force)860d89ec533Spatrick void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id, 861d89ec533Spatrick bool force) { 8623cab2bb3Spatrick RegionInfo *region = GetRegionInfo(class_id); 8633cab2bb3Spatrick const uptr chunk_size = ClassIdToSize(class_id); 8643cab2bb3Spatrick const uptr page_size = GetPageSizeCached(); 8653cab2bb3Spatrick 8663cab2bb3Spatrick uptr n = region->num_freed_chunks; 8673cab2bb3Spatrick if (n * chunk_size < page_size) 8683cab2bb3Spatrick return; // No chance to release anything. 8693cab2bb3Spatrick if ((region->stats.n_freed - 8703cab2bb3Spatrick region->rtoi.n_freed_at_last_release) * chunk_size < page_size) { 8713cab2bb3Spatrick return; // Nothing new to release. 8723cab2bb3Spatrick } 8733cab2bb3Spatrick 8743cab2bb3Spatrick if (!force) { 8753cab2bb3Spatrick s32 interval_ms = ReleaseToOSIntervalMs(); 8763cab2bb3Spatrick if (interval_ms < 0) 8773cab2bb3Spatrick return; 8783cab2bb3Spatrick 8793cab2bb3Spatrick if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > 8803cab2bb3Spatrick MonotonicNanoTime()) { 8813cab2bb3Spatrick return; // Memory was returned recently. 8823cab2bb3Spatrick } 8833cab2bb3Spatrick } 8843cab2bb3Spatrick 885d89ec533Spatrick ReleaseFreeMemoryToOS( 8863cab2bb3Spatrick GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size, 887d89ec533Spatrick RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper, 888d89ec533Spatrick class_id); 8893cab2bb3Spatrick 890d89ec533Spatrick uptr ranges, bytes; 891d89ec533Spatrick if (memory_mapper->GetAndResetStats(ranges, bytes)) { 8923cab2bb3Spatrick region->rtoi.n_freed_at_last_release = region->stats.n_freed; 893d89ec533Spatrick region->rtoi.num_releases += ranges; 894d89ec533Spatrick region->rtoi.last_released_bytes = bytes; 8953cab2bb3Spatrick } 8963cab2bb3Spatrick region->rtoi.last_release_at_ns = MonotonicNanoTime(); 8973cab2bb3Spatrick } 8983cab2bb3Spatrick }; 899