1 //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Part of the Sanitizer Allocator.
10 //
11 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ALLOCATOR_H
13 #error This file must be included inside sanitizer_allocator.h
14 #endif
15 
16 template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
17 
18 // SizeClassAllocator64 -- allocator for 64-bit address space.
19 // The template parameter Params is a class containing the actual parameters.
20 //
21 // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
22 // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap.
23 // Otherwise SpaceBeg=kSpaceBeg (fixed address).
24 // kSpaceSize is a power of two.
25 // At the beginning the entire space is mprotect-ed, then small parts of it
26 // are mapped on demand.
27 //
28 // Region: a part of Space dedicated to a single size class.
29 // There are kNumClasses Regions of equal size.
30 //
31 // UserChunk: a piece of memory returned to user.
32 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
33 
34 // FreeArray is an array free-d chunks (stored as 4-byte offsets)
35 //
36 // A Region looks like this:
37 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
38 
39 struct SizeClassAllocator64FlagMasks {  //  Bit masks.
40   enum {
41     kRandomShuffleChunks = 1,
42   };
43 };
44 
45 template <typename Allocator>
46 class MemoryMapper {
47  public:
48   typedef typename Allocator::CompactPtrT CompactPtrT;
49 
50   explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
51 
52   bool GetAndResetStats(uptr &ranges, uptr &bytes) {
53     ranges = released_ranges_count_;
54     released_ranges_count_ = 0;
55     bytes = released_bytes_;
56     released_bytes_ = 0;
57     return ranges != 0;
58   }
59 
60   u64 *MapPackedCounterArrayBuffer(uptr count) {
61     buffer_.clear();
62     buffer_.resize(count);
63     return buffer_.data();
64   }
65 
66   // Releases [from, to) range of pages back to OS.
67   void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) {
68     const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id);
69     const uptr from_page = allocator_.CompactPtrToPointer(region_base, from);
70     const uptr to_page = allocator_.CompactPtrToPointer(region_base, to);
71     ReleaseMemoryPagesToOS(from_page, to_page);
72     released_ranges_count_++;
73     released_bytes_ += to_page - from_page;
74   }
75 
76  private:
77   const Allocator &allocator_;
78   uptr released_ranges_count_ = 0;
79   uptr released_bytes_ = 0;
80   InternalMmapVector<u64> buffer_;
81 };
82 
83 template <class Params>
84 class SizeClassAllocator64 {
85  public:
86   using AddressSpaceView = typename Params::AddressSpaceView;
87   static const uptr kSpaceBeg = Params::kSpaceBeg;
88   static const uptr kSpaceSize = Params::kSpaceSize;
89   static const uptr kMetadataSize = Params::kMetadataSize;
90   typedef typename Params::SizeClassMap SizeClassMap;
91   typedef typename Params::MapUnmapCallback MapUnmapCallback;
92 
93   static const bool kRandomShuffleChunks =
94       Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
95 
96   typedef SizeClassAllocator64<Params> ThisT;
97   typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
98   typedef MemoryMapper<ThisT> MemoryMapperT;
99 
100   // When we know the size class (the region base) we can represent a pointer
101   // as a 4-byte integer (offset from the region start shifted right by 4).
102   typedef u32 CompactPtrT;
103   static const uptr kCompactPtrScale = 4;
104   CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
105     return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
106   }
107   uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
108     return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
109   }
110 
111   // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
112   // at heap_start and places the heap there.  This mode requires kSpaceBeg ==
113   // ~(uptr)0.
114   void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
115     uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
116     PremappedHeap = heap_start != 0;
117     if (PremappedHeap) {
118       CHECK(!kUsingConstantSpaceBeg);
119       NonConstSpaceBeg = heap_start;
120       uptr RegionInfoSize = AdditionalSize();
121       RegionInfoSpace =
122           address_range.Init(RegionInfoSize, PrimaryAllocatorName);
123       CHECK_NE(RegionInfoSpace, ~(uptr)0);
124       CHECK_EQ(RegionInfoSpace,
125                address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,
126                                       "SizeClassAllocator: region info"));
127       MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
128     } else {
129       if (kUsingConstantSpaceBeg) {
130         CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
131         CHECK_EQ(kSpaceBeg,
132                  address_range.Init(TotalSpaceSize, PrimaryAllocatorName,
133                                     kSpaceBeg));
134       } else {
135         // Combined allocator expects that an 2^N allocation is always aligned
136         // to 2^N. For this to work, the start of the space needs to be aligned
137         // as high as the largest size class (which also needs to be a power of
138         // 2).
139         NonConstSpaceBeg = address_range.InitAligned(
140             TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
141         CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
142       }
143       RegionInfoSpace = SpaceEnd();
144       MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
145                            "SizeClassAllocator: region info");
146     }
147     SetReleaseToOSIntervalMs(release_to_os_interval_ms);
148     // Check that the RegionInfo array is aligned on the CacheLine size.
149     DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
150   }
151 
152   s32 ReleaseToOSIntervalMs() const {
153     return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
154   }
155 
156   void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
157     atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
158                  memory_order_relaxed);
159   }
160 
161   void ForceReleaseToOS() {
162     MemoryMapperT memory_mapper(*this);
163     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
164       BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
165       MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
166     }
167   }
168 
169   static bool CanAllocate(uptr size, uptr alignment) {
170     return size <= SizeClassMap::kMaxSize &&
171       alignment <= SizeClassMap::kMaxSize;
172   }
173 
174   NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,
175                                   AllocatorStats *stat, uptr class_id,
176                                   const CompactPtrT *chunks, uptr n_chunks) {
177     RegionInfo *region = GetRegionInfo(class_id);
178     uptr region_beg = GetRegionBeginBySizeClass(class_id);
179     CompactPtrT *free_array = GetFreeArray(region_beg);
180 
181     BlockingMutexLock l(&region->mutex);
182     uptr old_num_chunks = region->num_freed_chunks;
183     uptr new_num_freed_chunks = old_num_chunks + n_chunks;
184     // Failure to allocate free array space while releasing memory is non
185     // recoverable.
186     if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
187                                        new_num_freed_chunks))) {
188       Report("FATAL: Internal error: %s's allocator exhausted the free list "
189              "space for size class %zd (%zd bytes).\n", SanitizerToolName,
190              class_id, ClassIdToSize(class_id));
191       Die();
192     }
193     for (uptr i = 0; i < n_chunks; i++)
194       free_array[old_num_chunks + i] = chunks[i];
195     region->num_freed_chunks = new_num_freed_chunks;
196     region->stats.n_freed += n_chunks;
197 
198     MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);
199   }
200 
201   NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
202                                  CompactPtrT *chunks, uptr n_chunks) {
203     RegionInfo *region = GetRegionInfo(class_id);
204     uptr region_beg = GetRegionBeginBySizeClass(class_id);
205     CompactPtrT *free_array = GetFreeArray(region_beg);
206 
207     BlockingMutexLock l(&region->mutex);
208 #if SANITIZER_WINDOWS
209     /* On Windows unmapping of memory during __sanitizer_purge_allocator is
210     explicit and immediate, so unmapped regions must be explicitly mapped back
211     in when they are accessed again. */
212     if (region->rtoi.last_released_bytes > 0) {
213       MmapFixedOrDie(region_beg, region->mapped_user,
214                                       "SizeClassAllocator: region data");
215       region->rtoi.n_freed_at_last_release = 0;
216       region->rtoi.last_released_bytes = 0;
217     }
218 #endif
219     if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
220       if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
221                                       n_chunks - region->num_freed_chunks)))
222         return false;
223       CHECK_GE(region->num_freed_chunks, n_chunks);
224     }
225     region->num_freed_chunks -= n_chunks;
226     uptr base_idx = region->num_freed_chunks;
227     for (uptr i = 0; i < n_chunks; i++)
228       chunks[i] = free_array[base_idx + i];
229     region->stats.n_allocated += n_chunks;
230     return true;
231   }
232 
233   bool PointerIsMine(const void *p) const {
234     uptr P = reinterpret_cast<uptr>(p);
235     if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
236       return P / kSpaceSize == kSpaceBeg / kSpaceSize;
237     return P >= SpaceBeg() && P < SpaceEnd();
238   }
239 
240   uptr GetRegionBegin(const void *p) {
241     if (kUsingConstantSpaceBeg)
242       return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
243     uptr space_beg = SpaceBeg();
244     return ((reinterpret_cast<uptr>(p)  - space_beg) & ~(kRegionSize - 1)) +
245         space_beg;
246   }
247 
248   uptr GetRegionBeginBySizeClass(uptr class_id) const {
249     return SpaceBeg() + kRegionSize * class_id;
250   }
251 
252   uptr GetSizeClass(const void *p) {
253     if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
254       return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
255     return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
256            kNumClassesRounded;
257   }
258 
259   void *GetBlockBegin(const void *p) {
260     uptr class_id = GetSizeClass(p);
261     if (class_id >= kNumClasses) return nullptr;
262     uptr size = ClassIdToSize(class_id);
263     if (!size) return nullptr;
264     uptr chunk_idx = GetChunkIdx((uptr)p, size);
265     uptr reg_beg = GetRegionBegin(p);
266     uptr beg = chunk_idx * size;
267     uptr next_beg = beg + size;
268     const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
269     if (region->mapped_user >= next_beg)
270       return reinterpret_cast<void*>(reg_beg + beg);
271     return nullptr;
272   }
273 
274   uptr GetActuallyAllocatedSize(void *p) {
275     CHECK(PointerIsMine(p));
276     return ClassIdToSize(GetSizeClass(p));
277   }
278 
279   static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
280 
281   void *GetMetaData(const void *p) {
282     CHECK(kMetadataSize);
283     uptr class_id = GetSizeClass(p);
284     uptr size = ClassIdToSize(class_id);
285     uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
286     uptr region_beg = GetRegionBeginBySizeClass(class_id);
287     return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
288                                     (1 + chunk_idx) * kMetadataSize);
289   }
290 
291   uptr TotalMemoryUsed() {
292     uptr res = 0;
293     for (uptr i = 0; i < kNumClasses; i++)
294       res += GetRegionInfo(i)->allocated_user;
295     return res;
296   }
297 
298   // Test-only.
299   void TestOnlyUnmap() {
300     UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size());
301   }
302 
303   static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
304                            uptr stats_size) {
305     for (uptr class_id = 0; class_id < stats_size; class_id++)
306       if (stats[class_id] == start)
307         stats[class_id] = rss;
308   }
309 
310   void PrintStats(uptr class_id, uptr rss) {
311     RegionInfo *region = GetRegionInfo(class_id);
312     if (region->mapped_user == 0) return;
313     uptr in_use = region->stats.n_allocated - region->stats.n_freed;
314     uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
315     Printf(
316         "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
317         "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
318         "last released: %6zdK region: 0x%zx\n",
319         region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
320         region->mapped_user >> 10, region->stats.n_allocated,
321         region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
322         rss >> 10, region->rtoi.num_releases,
323         region->rtoi.last_released_bytes >> 10,
324         SpaceBeg() + kRegionSize * class_id);
325   }
326 
327   void PrintStats() {
328     uptr rss_stats[kNumClasses];
329     for (uptr class_id = 0; class_id < kNumClasses; class_id++)
330       rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
331     GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
332 
333     uptr total_mapped = 0;
334     uptr total_rss = 0;
335     uptr n_allocated = 0;
336     uptr n_freed = 0;
337     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
338       RegionInfo *region = GetRegionInfo(class_id);
339       if (region->mapped_user != 0) {
340         total_mapped += region->mapped_user;
341         total_rss += rss_stats[class_id];
342       }
343       n_allocated += region->stats.n_allocated;
344       n_freed += region->stats.n_freed;
345     }
346 
347     Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
348            "%zd allocations; remains %zd\n", total_mapped >> 20,
349            total_rss >> 20, n_allocated, n_allocated - n_freed);
350     for (uptr class_id = 1; class_id < kNumClasses; class_id++)
351       PrintStats(class_id, rss_stats[class_id]);
352   }
353 
354   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
355   // introspection API.
356   void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
357     for (uptr i = 0; i < kNumClasses; i++) {
358       GetRegionInfo(i)->mutex.Lock();
359     }
360   }
361 
362   void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
363     for (int i = (int)kNumClasses - 1; i >= 0; i--) {
364       GetRegionInfo(i)->mutex.Unlock();
365     }
366   }
367 
368   // Iterate over all existing chunks.
369   // The allocator must be locked when calling this function.
370   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
371     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
372       RegionInfo *region = GetRegionInfo(class_id);
373       uptr chunk_size = ClassIdToSize(class_id);
374       uptr region_beg = SpaceBeg() + class_id * kRegionSize;
375       uptr region_allocated_user_size =
376           AddressSpaceView::Load(region)->allocated_user;
377       for (uptr chunk = region_beg;
378            chunk < region_beg + region_allocated_user_size;
379            chunk += chunk_size) {
380         // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
381         callback(chunk, arg);
382       }
383     }
384   }
385 
386   static uptr ClassIdToSize(uptr class_id) {
387     return SizeClassMap::Size(class_id);
388   }
389 
390   static uptr AdditionalSize() {
391     return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
392                      GetPageSizeCached());
393   }
394 
395   typedef SizeClassMap SizeClassMapT;
396   static const uptr kNumClasses = SizeClassMap::kNumClasses;
397   static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
398 
399   // A packed array of counters. Each counter occupies 2^n bits, enough to store
400   // counter's max_value. Ctor will try to allocate the required buffer via
401   // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
402   // whether the initialization was successful by checking IsAllocated() result.
403   // For the performance sake, none of the accessors check the validity of the
404   // arguments, it is assumed that index is always in [0, n) range and the value
405   // is not incremented past max_value.
406   class PackedCounterArray {
407    public:
408     template <typename MemoryMapper>
409     PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper)
410         : n(num_counters) {
411       CHECK_GT(num_counters, 0);
412       CHECK_GT(max_value, 0);
413       constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
414       // Rounding counter storage size up to the power of two allows for using
415       // bit shifts calculating particular counter's index and offset.
416       uptr counter_size_bits =
417           RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
418       CHECK_LE(counter_size_bits, kMaxCounterBits);
419       counter_size_bits_log = Log2(counter_size_bits);
420       counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
421 
422       uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
423       CHECK_GT(packing_ratio, 0);
424       packing_ratio_log = Log2(packing_ratio);
425       bit_offset_mask = packing_ratio - 1;
426 
427       buffer = mapper->MapPackedCounterArrayBuffer(
428           RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log);
429     }
430 
431     bool IsAllocated() const {
432       return !!buffer;
433     }
434 
435     u64 GetCount() const {
436       return n;
437     }
438 
439     uptr Get(uptr i) const {
440       DCHECK_LT(i, n);
441       uptr index = i >> packing_ratio_log;
442       uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
443       return (buffer[index] >> bit_offset) & counter_mask;
444     }
445 
446     void Inc(uptr i) const {
447       DCHECK_LT(Get(i), counter_mask);
448       uptr index = i >> packing_ratio_log;
449       uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
450       buffer[index] += 1ULL << bit_offset;
451     }
452 
453     void IncRange(uptr from, uptr to) const {
454       DCHECK_LE(from, to);
455       for (uptr i = from; i <= to; i++)
456         Inc(i);
457     }
458 
459    private:
460     const u64 n;
461     u64 counter_size_bits_log;
462     u64 counter_mask;
463     u64 packing_ratio_log;
464     u64 bit_offset_mask;
465     u64* buffer;
466   };
467 
468   template <class MemoryMapperT>
469   class FreePagesRangeTracker {
470    public:
471     FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id)
472         : memory_mapper(mapper),
473           class_id(class_id),
474           page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {}
475 
476     void NextPage(bool freed) {
477       if (freed) {
478         if (!in_the_range) {
479           current_range_start_page = current_page;
480           in_the_range = true;
481         }
482       } else {
483         CloseOpenedRange();
484       }
485       current_page++;
486     }
487 
488     void Done() {
489       CloseOpenedRange();
490     }
491 
492    private:
493     void CloseOpenedRange() {
494       if (in_the_range) {
495         memory_mapper->ReleasePageRangeToOS(
496             class_id, current_range_start_page << page_size_scaled_log,
497             current_page << page_size_scaled_log);
498         in_the_range = false;
499       }
500     }
501 
502     MemoryMapperT *const memory_mapper = nullptr;
503     const uptr class_id = 0;
504     const uptr page_size_scaled_log = 0;
505     bool in_the_range = false;
506     uptr current_page = 0;
507     uptr current_range_start_page = 0;
508   };
509 
510   // Iterates over the free_array to identify memory pages containing freed
511   // chunks only and returns these pages back to OS.
512   // allocated_pages_count is the total number of pages allocated for the
513   // current bucket.
514   template <typename MemoryMapper>
515   static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
516                                     uptr free_array_count, uptr chunk_size,
517                                     uptr allocated_pages_count,
518                                     MemoryMapper *memory_mapper,
519                                     uptr class_id) {
520     const uptr page_size = GetPageSizeCached();
521 
522     // Figure out the number of chunks per page and whether we can take a fast
523     // path (the number of chunks per page is the same for all pages).
524     uptr full_pages_chunk_count_max;
525     bool same_chunk_count_per_page;
526     if (chunk_size <= page_size && page_size % chunk_size == 0) {
527       // Same number of chunks per page, no cross overs.
528       full_pages_chunk_count_max = page_size / chunk_size;
529       same_chunk_count_per_page = true;
530     } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
531         chunk_size % (page_size % chunk_size) == 0) {
532       // Some chunks are crossing page boundaries, which means that the page
533       // contains one or two partial chunks, but all pages contain the same
534       // number of chunks.
535       full_pages_chunk_count_max = page_size / chunk_size + 1;
536       same_chunk_count_per_page = true;
537     } else if (chunk_size <= page_size) {
538       // Some chunks are crossing page boundaries, which means that the page
539       // contains one or two partial chunks.
540       full_pages_chunk_count_max = page_size / chunk_size + 2;
541       same_chunk_count_per_page = false;
542     } else if (chunk_size > page_size && chunk_size % page_size == 0) {
543       // One chunk covers multiple pages, no cross overs.
544       full_pages_chunk_count_max = 1;
545       same_chunk_count_per_page = true;
546     } else if (chunk_size > page_size) {
547       // One chunk covers multiple pages, Some chunks are crossing page
548       // boundaries. Some pages contain one chunk, some contain two.
549       full_pages_chunk_count_max = 2;
550       same_chunk_count_per_page = false;
551     } else {
552       UNREACHABLE("All chunk_size/page_size ratios must be handled.");
553     }
554 
555     PackedCounterArray counters(allocated_pages_count,
556                                 full_pages_chunk_count_max, memory_mapper);
557     if (!counters.IsAllocated())
558       return;
559 
560     const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
561     const uptr page_size_scaled = page_size >> kCompactPtrScale;
562     const uptr page_size_scaled_log = Log2(page_size_scaled);
563 
564     // Iterate over free chunks and count how many free chunks affect each
565     // allocated page.
566     if (chunk_size <= page_size && page_size % chunk_size == 0) {
567       // Each chunk affects one page only.
568       for (uptr i = 0; i < free_array_count; i++)
569         counters.Inc(free_array[i] >> page_size_scaled_log);
570     } else {
571       // In all other cases chunks might affect more than one page.
572       for (uptr i = 0; i < free_array_count; i++) {
573         counters.IncRange(
574             free_array[i] >> page_size_scaled_log,
575             (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
576       }
577     }
578 
579     // Iterate over pages detecting ranges of pages with chunk counters equal
580     // to the expected number of chunks for the particular page.
581     FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id);
582     if (same_chunk_count_per_page) {
583       // Fast path, every page has the same number of chunks affecting it.
584       for (uptr i = 0; i < counters.GetCount(); i++)
585         range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
586     } else {
587       // Show path, go through the pages keeping count how many chunks affect
588       // each page.
589       const uptr pn =
590           chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
591       const uptr pnc = pn * chunk_size_scaled;
592       // The idea is to increment the current page pointer by the first chunk
593       // size, middle portion size (the portion of the page covered by chunks
594       // except the first and the last one) and then the last chunk size, adding
595       // up the number of chunks on the current page and checking on every step
596       // whether the page boundary was crossed.
597       uptr prev_page_boundary = 0;
598       uptr current_boundary = 0;
599       for (uptr i = 0; i < counters.GetCount(); i++) {
600         uptr page_boundary = prev_page_boundary + page_size_scaled;
601         uptr chunks_per_page = pn;
602         if (current_boundary < page_boundary) {
603           if (current_boundary > prev_page_boundary)
604             chunks_per_page++;
605           current_boundary += pnc;
606           if (current_boundary < page_boundary) {
607             chunks_per_page++;
608             current_boundary += chunk_size_scaled;
609           }
610         }
611         prev_page_boundary = page_boundary;
612 
613         range_tracker.NextPage(counters.Get(i) == chunks_per_page);
614       }
615     }
616     range_tracker.Done();
617   }
618 
619  private:
620   friend class MemoryMapper<ThisT>;
621 
622   ReservedAddressRange address_range;
623 
624   static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
625   // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
626   // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
627   // elements, but in reality this will not happen. For simplicity we
628   // dedicate 1/8 of the region's virtual space to FreeArray.
629   static const uptr kFreeArraySize = kRegionSize / 8;
630 
631   static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
632   uptr NonConstSpaceBeg;
633   uptr SpaceBeg() const {
634     return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
635   }
636   uptr SpaceEnd() const { return  SpaceBeg() + kSpaceSize; }
637   // kRegionSize must be >= 2^32.
638   COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
639   // kRegionSize must be <= 2^36, see CompactPtrT.
640   COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
641   // Call mmap for user memory with at least this size.
642   static const uptr kUserMapSize = 1 << 16;
643   // Call mmap for metadata memory with at least this size.
644   static const uptr kMetaMapSize = 1 << 16;
645   // Call mmap for free array memory with at least this size.
646   static const uptr kFreeArrayMapSize = 1 << 16;
647 
648   atomic_sint32_t release_to_os_interval_ms_;
649 
650   uptr RegionInfoSpace;
651 
652   // True if the user has already mapped the entire heap R/W.
653   bool PremappedHeap;
654 
655   struct Stats {
656     uptr n_allocated;
657     uptr n_freed;
658   };
659 
660   struct ReleaseToOsInfo {
661     uptr n_freed_at_last_release;
662     uptr num_releases;
663     u64 last_release_at_ns;
664     u64 last_released_bytes;
665   };
666 
667   struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
668     BlockingMutex mutex;
669     uptr num_freed_chunks;  // Number of elements in the freearray.
670     uptr mapped_free_array;  // Bytes mapped for freearray.
671     uptr allocated_user;  // Bytes allocated for user memory.
672     uptr allocated_meta;  // Bytes allocated for metadata.
673     uptr mapped_user;  // Bytes mapped for user memory.
674     uptr mapped_meta;  // Bytes mapped for metadata.
675     u32 rand_state;  // Seed for random shuffle, used if kRandomShuffleChunks.
676     bool exhausted;  // Whether region is out of space for new chunks.
677     Stats stats;
678     ReleaseToOsInfo rtoi;
679   };
680   COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);
681 
682   RegionInfo *GetRegionInfo(uptr class_id) const {
683     DCHECK_LT(class_id, kNumClasses);
684     RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
685     return &regions[class_id];
686   }
687 
688   uptr GetMetadataEnd(uptr region_beg) const {
689     return region_beg + kRegionSize - kFreeArraySize;
690   }
691 
692   uptr GetChunkIdx(uptr chunk, uptr size) const {
693     if (!kUsingConstantSpaceBeg)
694       chunk -= SpaceBeg();
695 
696     uptr offset = chunk % kRegionSize;
697     // Here we divide by a non-constant. This is costly.
698     // size always fits into 32-bits. If the offset fits too, use 32-bit div.
699     if (offset >> (SANITIZER_WORDSIZE / 2))
700       return offset / size;
701     return (u32)offset / (u32)size;
702   }
703 
704   CompactPtrT *GetFreeArray(uptr region_beg) const {
705     return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
706   }
707 
708   bool MapWithCallback(uptr beg, uptr size, const char *name) {
709     if (PremappedHeap)
710       return beg >= NonConstSpaceBeg &&
711              beg + size <= NonConstSpaceBeg + kSpaceSize;
712     uptr mapped = address_range.Map(beg, size, name);
713     if (UNLIKELY(!mapped))
714       return false;
715     CHECK_EQ(beg, mapped);
716     MapUnmapCallback().OnMap(beg, size);
717     return true;
718   }
719 
720   void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
721     if (PremappedHeap) {
722       CHECK_GE(beg, NonConstSpaceBeg);
723       CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);
724       return;
725     }
726     CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
727     MapUnmapCallback().OnMap(beg, size);
728   }
729 
730   void UnmapWithCallbackOrDie(uptr beg, uptr size) {
731     if (PremappedHeap)
732       return;
733     MapUnmapCallback().OnUnmap(beg, size);
734     address_range.Unmap(beg, size);
735   }
736 
737   bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
738                             uptr num_freed_chunks) {
739     uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
740     if (region->mapped_free_array < needed_space) {
741       uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
742       CHECK_LE(new_mapped_free_array, kFreeArraySize);
743       uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
744                              region->mapped_free_array;
745       uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
746       if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,
747                                     "SizeClassAllocator: freearray")))
748         return false;
749       region->mapped_free_array = new_mapped_free_array;
750     }
751     return true;
752   }
753 
754   // Check whether this size class is exhausted.
755   bool IsRegionExhausted(RegionInfo *region, uptr class_id,
756                          uptr additional_map_size) {
757     if (LIKELY(region->mapped_user + region->mapped_meta +
758                additional_map_size <= kRegionSize - kFreeArraySize))
759       return false;
760     if (!region->exhausted) {
761       region->exhausted = true;
762       Printf("%s: Out of memory. ", SanitizerToolName);
763       Printf("The process has exhausted %zuMB for size class %zu.\n",
764              kRegionSize >> 20, ClassIdToSize(class_id));
765     }
766     return true;
767   }
768 
769   NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
770                                   RegionInfo *region, uptr requested_count) {
771     // region->mutex is held.
772     const uptr region_beg = GetRegionBeginBySizeClass(class_id);
773     const uptr size = ClassIdToSize(class_id);
774 
775     const uptr total_user_bytes =
776         region->allocated_user + requested_count * size;
777     // Map more space for chunks, if necessary.
778     if (LIKELY(total_user_bytes > region->mapped_user)) {
779       if (UNLIKELY(region->mapped_user == 0)) {
780         if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
781           // The random state is initialized from ASLR.
782           region->rand_state = static_cast<u32>(region_beg >> 12);
783         // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
784         // preventing just allocated memory from being released sooner than
785         // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
786         // for short lived processes.
787         // Do it only when the feature is turned on, to avoid a potentially
788         // extraneous syscall.
789         if (ReleaseToOSIntervalMs() >= 0)
790           region->rtoi.last_release_at_ns = MonotonicNanoTime();
791       }
792       // Do the mmap for the user memory.
793       const uptr user_map_size =
794           RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
795       if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
796         return false;
797       if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
798                                     user_map_size,
799                                     "SizeClassAllocator: region data")))
800         return false;
801       stat->Add(AllocatorStatMapped, user_map_size);
802       region->mapped_user += user_map_size;
803     }
804     const uptr new_chunks_count =
805         (region->mapped_user - region->allocated_user) / size;
806 
807     if (kMetadataSize) {
808       // Calculate the required space for metadata.
809       const uptr total_meta_bytes =
810           region->allocated_meta + new_chunks_count * kMetadataSize;
811       const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
812           RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
813       // Map more space for metadata, if necessary.
814       if (meta_map_size) {
815         if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))
816           return false;
817         if (UNLIKELY(!MapWithCallback(
818             GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
819             meta_map_size, "SizeClassAllocator: region metadata")))
820           return false;
821         region->mapped_meta += meta_map_size;
822       }
823     }
824 
825     // If necessary, allocate more space for the free array and populate it with
826     // newly allocated chunks.
827     const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
828     if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
829       return false;
830     CompactPtrT *free_array = GetFreeArray(region_beg);
831     for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
832          i++, chunk += size)
833       free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
834     if (kRandomShuffleChunks)
835       RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
836                     &region->rand_state);
837 
838     // All necessary memory is mapped and now it is safe to advance all
839     // 'allocated_*' counters.
840     region->num_freed_chunks += new_chunks_count;
841     region->allocated_user += new_chunks_count * size;
842     CHECK_LE(region->allocated_user, region->mapped_user);
843     region->allocated_meta += new_chunks_count * kMetadataSize;
844     CHECK_LE(region->allocated_meta, region->mapped_meta);
845     region->exhausted = false;
846 
847     // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
848     // MaybeReleaseToOS from releasing just allocated pages or protect these
849     // not yet used chunks some other way.
850 
851     return true;
852   }
853 
854   // Attempts to release RAM occupied by freed chunks back to OS. The region is
855   // expected to be locked.
856   //
857   // TODO(morehouse): Support a callback on memory release so HWASan can release
858   // aliases as well.
859   void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,
860                         bool force) {
861     RegionInfo *region = GetRegionInfo(class_id);
862     const uptr chunk_size = ClassIdToSize(class_id);
863     const uptr page_size = GetPageSizeCached();
864 
865     uptr n = region->num_freed_chunks;
866     if (n * chunk_size < page_size)
867       return;  // No chance to release anything.
868     if ((region->stats.n_freed -
869          region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
870       return;  // Nothing new to release.
871     }
872 
873     if (!force) {
874       s32 interval_ms = ReleaseToOSIntervalMs();
875       if (interval_ms < 0)
876         return;
877 
878       if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
879           MonotonicNanoTime()) {
880         return;  // Memory was returned recently.
881       }
882     }
883 
884     ReleaseFreeMemoryToOS(
885         GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
886         RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
887         class_id);
888 
889     uptr ranges, bytes;
890     if (memory_mapper->GetAndResetStats(ranges, bytes)) {
891       region->rtoi.n_freed_at_last_release = region->stats.n_freed;
892       region->rtoi.num_releases += ranges;
893       region->rtoi.last_released_bytes = bytes;
894     }
895     region->rtoi.last_release_at_ns = MonotonicNanoTime();
896   }
897 };
898