1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "list.h"
15 #include "mem_map.h"
16 #include "memtag.h"
17 #include "mutex.h"
18 #include "options.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 #include "thread_annotations.h"
22 
23 namespace scudo {
24 
25 // This allocator wraps the platform allocation primitives, and as such is on
26 // the slower side and should preferably be used for larger sized allocations.
27 // Blocks allocated will be preceded and followed by a guard page, and hold
28 // their own header that is not checksummed: the guard pages and the Combined
29 // header should be enough for our purpose.
30 
31 namespace LargeBlock {
32 
33 struct alignas(Max<uptr>(archSupportsMemoryTagging()
34                              ? archMemoryTagGranuleSize()
35                              : 1,
36                          1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
37   LargeBlock::Header *Prev;
38   LargeBlock::Header *Next;
39   uptr CommitBase;
40   uptr CommitSize;
41   MemMapT MemMap;
42 };
43 
44 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
45 static_assert(!archSupportsMemoryTagging() ||
46                   sizeof(Header) % archMemoryTagGranuleSize() == 0,
47               "");
48 
49 constexpr uptr getHeaderSize() { return sizeof(Header); }
50 
51 template <typename Config> static uptr addHeaderTag(uptr Ptr) {
52   if (allocatorSupportsMemoryTagging<Config>())
53     return addFixedTag(Ptr, 1);
54   return Ptr;
55 }
56 
57 template <typename Config> static Header *getHeader(uptr Ptr) {
58   return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
59 }
60 
61 template <typename Config> static Header *getHeader(const void *Ptr) {
62   return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
63 }
64 
65 } // namespace LargeBlock
66 
67 static inline void unmap(LargeBlock::Header *H) {
68   // Note that the `H->MapMap` is stored on the pages managed by itself. Take
69   // over the ownership before unmap() so that any operation along with unmap()
70   // won't touch inaccessible pages.
71   MemMapT MemMap = H->MemMap;
72   MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
73 }
74 
75 template <typename Config> class MapAllocatorNoCache {
76 public:
77   void init(UNUSED s32 ReleaseToOsInterval) {}
78   bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
79                 UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
80     return false;
81   }
82   void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
83   bool canCache(UNUSED uptr Size) { return false; }
84   void disable() {}
85   void enable() {}
86   void releaseToOS() {}
87   void disableMemoryTagging() {}
88   void unmapTestOnly() {}
89   bool setOption(Option O, UNUSED sptr Value) {
90     if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
91         O == Option::MaxCacheEntrySize)
92       return false;
93     // Not supported by the Secondary Cache, but not an error either.
94     return true;
95   }
96 
97   void getStats(UNUSED ScopedString *Str) {
98     Str->append("Secondary Cache Disabled\n");
99   }
100 };
101 
102 static const uptr MaxUnusedCachePages = 4U;
103 
104 template <typename Config>
105 void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
106                   uptr AllocPos, uptr Flags, MemMapT &MemMap) {
107   const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
108   if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
109     const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
110     MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
111                  MAP_RESIZABLE | MAP_MEMTAG | Flags);
112     MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
113                  "scudo:secondary", MAP_RESIZABLE | Flags);
114   } else {
115     const uptr RemapFlags =
116         MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
117         Flags;
118     MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
119   }
120 }
121 
122 // Template specialization to avoid producing zero-length array
123 template <typename T, size_t Size> class NonZeroLengthArray {
124 public:
125   T &operator[](uptr Idx) { return values[Idx]; }
126 
127 private:
128   T values[Size];
129 };
130 template <typename T> class NonZeroLengthArray<T, 0> {
131 public:
132   T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
133 };
134 
135 template <typename Config> class MapAllocatorCache {
136 public:
137   using CacheConfig = typename Config::Secondary::Cache;
138 
139   void getStats(ScopedString *Str) {
140     ScopedLock L(Mutex);
141     Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
142                 "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
143                 EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
144                 atomic_load_relaxed(&MaxEntrySize));
145     for (CachedBlock Entry : Entries) {
146       if (!Entry.CommitBase)
147         continue;
148       Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
149                   "BlockSize: %zu\n",
150                   Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
151                   Entry.CommitSize);
152     }
153   }
154 
155   // Ensure the default maximum specified fits the array.
156   static_assert(CacheConfig::DefaultMaxEntriesCount <=
157                     CacheConfig::EntriesArraySize,
158                 "");
159 
160   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
161     DCHECK_EQ(EntriesCount, 0U);
162     setOption(Option::MaxCacheEntriesCount,
163               static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
164     setOption(Option::MaxCacheEntrySize,
165               static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
166     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
167   }
168 
169   void store(Options Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
170     if (!canCache(H->CommitSize))
171       return unmap(H);
172 
173     bool EntryCached = false;
174     bool EmptyCache = false;
175     const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
176     const u64 Time = getMonotonicTimeFast();
177     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
178     CachedBlock Entry;
179     Entry.CommitBase = H->CommitBase;
180     Entry.CommitSize = H->CommitSize;
181     Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
182     Entry.MemMap = H->MemMap;
183     Entry.Time = Time;
184     if (useMemoryTagging<Config>(Options)) {
185       if (Interval == 0 && !SCUDO_FUCHSIA) {
186         // Release the memory and make it inaccessible at the same time by
187         // creating a new MAP_NOACCESS mapping on top of the existing mapping.
188         // Fuchsia does not support replacing mappings by creating a new mapping
189         // on top so we just do the two syscalls there.
190         Entry.Time = 0;
191         mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
192                              Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
193       } else {
194         Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
195                                          MAP_NOACCESS);
196       }
197     } else if (Interval == 0) {
198       Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
199       Entry.Time = 0;
200     }
201     do {
202       ScopedLock L(Mutex);
203       if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
204         // If we get here then memory tagging was disabled in between when we
205         // read Options and when we locked Mutex. We can't insert our entry into
206         // the quarantine or the cache because the permissions would be wrong so
207         // just unmap it.
208         break;
209       }
210       if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
211         QuarantinePos =
212             (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
213         if (!Quarantine[QuarantinePos].CommitBase) {
214           Quarantine[QuarantinePos] = Entry;
215           return;
216         }
217         CachedBlock PrevEntry = Quarantine[QuarantinePos];
218         Quarantine[QuarantinePos] = Entry;
219         if (OldestTime == 0)
220           OldestTime = Entry.Time;
221         Entry = PrevEntry;
222       }
223       if (EntriesCount >= MaxCount) {
224         if (IsFullEvents++ == 4U)
225           EmptyCache = true;
226       } else {
227         for (u32 I = 0; I < MaxCount; I++) {
228           if (Entries[I].CommitBase)
229             continue;
230           if (I != 0)
231             Entries[I] = Entries[0];
232           Entries[0] = Entry;
233           EntriesCount++;
234           if (OldestTime == 0)
235             OldestTime = Entry.Time;
236           EntryCached = true;
237           break;
238         }
239       }
240     } while (0);
241     if (EmptyCache)
242       empty();
243     else if (Interval >= 0)
244       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
245     if (!EntryCached)
246       Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
247   }
248 
249   bool retrieve(Options Options, uptr Size, uptr Alignment,
250                 LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
251     const uptr PageSize = getPageSizeCached();
252     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
253     bool Found = false;
254     CachedBlock Entry;
255     uptr HeaderPos = 0;
256     {
257       ScopedLock L(Mutex);
258       if (EntriesCount == 0)
259         return false;
260       for (u32 I = 0; I < MaxCount; I++) {
261         const uptr CommitBase = Entries[I].CommitBase;
262         if (!CommitBase)
263           continue;
264         const uptr CommitSize = Entries[I].CommitSize;
265         const uptr AllocPos =
266             roundDown(CommitBase + CommitSize - Size, Alignment);
267         HeaderPos =
268             AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
269         if (HeaderPos > CommitBase + CommitSize)
270           continue;
271         if (HeaderPos < CommitBase ||
272             AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
273           continue;
274         }
275         Found = true;
276         Entry = Entries[I];
277         Entries[I].CommitBase = 0;
278         EntriesCount--;
279         break;
280       }
281     }
282     if (!Found)
283       return false;
284 
285     *H = reinterpret_cast<LargeBlock::Header *>(
286         LargeBlock::addHeaderTag<Config>(HeaderPos));
287     *Zeroed = Entry.Time == 0;
288     if (useMemoryTagging<Config>(Options))
289       Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
290     uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
291     if (useMemoryTagging<Config>(Options)) {
292       if (*Zeroed) {
293         storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
294                   NewBlockBegin);
295       } else if (Entry.BlockBegin < NewBlockBegin) {
296         storeTags(Entry.BlockBegin, NewBlockBegin);
297       } else {
298         storeTags(untagPointer(NewBlockBegin),
299                   untagPointer(Entry.BlockBegin));
300       }
301     }
302     (*H)->CommitBase = Entry.CommitBase;
303     (*H)->CommitSize = Entry.CommitSize;
304     (*H)->MemMap = Entry.MemMap;
305     return true;
306   }
307 
308   bool canCache(uptr Size) {
309     return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
310            Size <= atomic_load_relaxed(&MaxEntrySize);
311   }
312 
313   bool setOption(Option O, sptr Value) {
314     if (O == Option::ReleaseInterval) {
315       const s32 Interval = Max(
316           Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
317           CacheConfig::MinReleaseToOsIntervalMs);
318       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
319       return true;
320     }
321     if (O == Option::MaxCacheEntriesCount) {
322       const u32 MaxCount = static_cast<u32>(Value);
323       if (MaxCount > CacheConfig::EntriesArraySize)
324         return false;
325       atomic_store_relaxed(&MaxEntriesCount, MaxCount);
326       return true;
327     }
328     if (O == Option::MaxCacheEntrySize) {
329       atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
330       return true;
331     }
332     // Not supported by the Secondary Cache, but not an error either.
333     return true;
334   }
335 
336   void releaseToOS() { releaseOlderThan(UINT64_MAX); }
337 
338   void disableMemoryTagging() EXCLUDES(Mutex) {
339     ScopedLock L(Mutex);
340     for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
341       if (Quarantine[I].CommitBase) {
342         MemMapT &MemMap = Quarantine[I].MemMap;
343         MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
344         Quarantine[I].CommitBase = 0;
345       }
346     }
347     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
348     for (u32 I = 0; I < MaxCount; I++) {
349       if (Entries[I].CommitBase) {
350         Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
351                                               Entries[I].CommitSize, 0);
352       }
353     }
354     QuarantinePos = -1U;
355   }
356 
357   void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
358 
359   void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
360 
361   void unmapTestOnly() { empty(); }
362 
363 private:
364   void empty() {
365     MemMapT MapInfo[CacheConfig::EntriesArraySize];
366     uptr N = 0;
367     {
368       ScopedLock L(Mutex);
369       for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
370         if (!Entries[I].CommitBase)
371           continue;
372         MapInfo[N] = Entries[I].MemMap;
373         Entries[I].CommitBase = 0;
374         N++;
375       }
376       EntriesCount = 0;
377       IsFullEvents = 0;
378     }
379     for (uptr I = 0; I < N; I++) {
380       MemMapT &MemMap = MapInfo[I];
381       MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
382     }
383   }
384 
385   struct CachedBlock {
386     uptr CommitBase = 0;
387     uptr CommitSize = 0;
388     uptr BlockBegin = 0;
389     MemMapT MemMap = {};
390     u64 Time = 0;
391   };
392 
393   void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
394     if (!Entry.CommitBase || !Entry.Time)
395       return;
396     if (Entry.Time > Time) {
397       if (OldestTime == 0 || Entry.Time < OldestTime)
398         OldestTime = Entry.Time;
399       return;
400     }
401     Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
402     Entry.Time = 0;
403   }
404 
405   void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
406     ScopedLock L(Mutex);
407     if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
408       return;
409     OldestTime = 0;
410     for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
411       releaseIfOlderThan(Quarantine[I], Time);
412     for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
413       releaseIfOlderThan(Entries[I], Time);
414   }
415 
416   HybridMutex Mutex;
417   u32 EntriesCount GUARDED_BY(Mutex) = 0;
418   u32 QuarantinePos GUARDED_BY(Mutex) = 0;
419   atomic_u32 MaxEntriesCount = {};
420   atomic_uptr MaxEntrySize = {};
421   u64 OldestTime GUARDED_BY(Mutex) = 0;
422   u32 IsFullEvents GUARDED_BY(Mutex) = 0;
423   atomic_s32 ReleaseToOsIntervalMs = {};
424 
425   CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
426   NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
427       Quarantine GUARDED_BY(Mutex) = {};
428 };
429 
430 template <typename Config> class MapAllocator {
431 public:
432   void init(GlobalStats *S,
433             s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
434     DCHECK_EQ(AllocatedBytes, 0U);
435     DCHECK_EQ(FreedBytes, 0U);
436     Cache.init(ReleaseToOsInterval);
437     Stats.init();
438     if (LIKELY(S))
439       S->link(&Stats);
440   }
441 
442   void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
443                  uptr *BlockEnd = nullptr,
444                  FillContentsMode FillContents = NoFill);
445 
446   void deallocate(Options Options, void *Ptr);
447 
448   static uptr getBlockEnd(void *Ptr) {
449     auto *B = LargeBlock::getHeader<Config>(Ptr);
450     return B->CommitBase + B->CommitSize;
451   }
452 
453   static uptr getBlockSize(void *Ptr) {
454     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
455   }
456 
457   void disable() NO_THREAD_SAFETY_ANALYSIS {
458     Mutex.lock();
459     Cache.disable();
460   }
461 
462   void enable() NO_THREAD_SAFETY_ANALYSIS {
463     Cache.enable();
464     Mutex.unlock();
465   }
466 
467   template <typename F> void iterateOverBlocks(F Callback) const {
468     Mutex.assertHeld();
469 
470     for (const auto &H : InUseBlocks) {
471       uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
472       if (allocatorSupportsMemoryTagging<Config>())
473         Ptr = untagPointer(Ptr);
474       Callback(Ptr);
475     }
476   }
477 
478   bool canCache(uptr Size) { return Cache.canCache(Size); }
479 
480   bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
481 
482   void releaseToOS() { Cache.releaseToOS(); }
483 
484   void disableMemoryTagging() { Cache.disableMemoryTagging(); }
485 
486   void unmapTestOnly() { Cache.unmapTestOnly(); }
487 
488   void getStats(ScopedString *Str);
489 
490 private:
491   typename Config::Secondary::template CacheT<Config> Cache;
492 
493   mutable HybridMutex Mutex;
494   DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
495   uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
496   uptr FreedBytes GUARDED_BY(Mutex) = 0;
497   uptr LargestSize GUARDED_BY(Mutex) = 0;
498   u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
499   u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
500   LocalStats Stats GUARDED_BY(Mutex);
501 };
502 
503 // As with the Primary, the size passed to this function includes any desired
504 // alignment, so that the frontend can align the user allocation. The hint
505 // parameter allows us to unmap spurious memory when dealing with larger
506 // (greater than a page) alignments on 32-bit platforms.
507 // Due to the sparsity of address space available on those platforms, requesting
508 // an allocation from the Secondary with a large alignment would end up wasting
509 // VA space (even though we are not committing the whole thing), hence the need
510 // to trim off some of the reserved space.
511 // For allocations requested with an alignment greater than or equal to a page,
512 // the committed memory will amount to something close to Size - AlignmentHint
513 // (pending rounding and headers).
514 template <typename Config>
515 void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
516                                      uptr *BlockEndPtr,
517                                      FillContentsMode FillContents) {
518   if (Options.get(OptionBit::AddLargeAllocationSlack))
519     Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
520   Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
521   const uptr PageSize = getPageSizeCached();
522   uptr RoundedSize =
523       roundUp(roundUp(Size, Alignment) + LargeBlock::getHeaderSize() +
524                   Chunk::getHeaderSize(),
525               PageSize);
526   if (Alignment > PageSize)
527     RoundedSize += Alignment - PageSize;
528 
529   if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
530     LargeBlock::Header *H;
531     bool Zeroed;
532     if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
533       const uptr BlockEnd = H->CommitBase + H->CommitSize;
534       if (BlockEndPtr)
535         *BlockEndPtr = BlockEnd;
536       uptr HInt = reinterpret_cast<uptr>(H);
537       if (allocatorSupportsMemoryTagging<Config>())
538         HInt = untagPointer(HInt);
539       const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
540       void *Ptr = reinterpret_cast<void *>(PtrInt);
541       if (FillContents && !Zeroed)
542         memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
543                BlockEnd - PtrInt);
544       {
545         ScopedLock L(Mutex);
546         InUseBlocks.push_back(H);
547         AllocatedBytes += H->CommitSize;
548         NumberOfAllocs++;
549         Stats.add(StatAllocated, H->CommitSize);
550         Stats.add(StatMapped, H->MemMap.getCapacity());
551       }
552       return Ptr;
553     }
554   }
555 
556   ReservedMemoryT ReservedMemory;
557   const uptr MapSize = RoundedSize + 2 * PageSize;
558   ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, MAP_ALLOWNOMEM);
559 
560   // Take the entire ownership of reserved region.
561   MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
562                                            ReservedMemory.getCapacity());
563   uptr MapBase = MemMap.getBase();
564   if (UNLIKELY(!MapBase))
565     return nullptr;
566   uptr CommitBase = MapBase + PageSize;
567   uptr MapEnd = MapBase + MapSize;
568 
569   // In the unlikely event of alignments larger than a page, adjust the amount
570   // of memory we want to commit, and trim the extra memory.
571   if (UNLIKELY(Alignment >= PageSize)) {
572     // For alignments greater than or equal to a page, the user pointer (eg: the
573     // pointer that is returned by the C or C++ allocation APIs) ends up on a
574     // page boundary , and our headers will live in the preceding page.
575     CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
576     const uptr NewMapBase = CommitBase - PageSize;
577     DCHECK_GE(NewMapBase, MapBase);
578     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
579     // are less constrained memory wise, and that saves us two syscalls.
580     if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
581       MemMap.unmap(MapBase, NewMapBase - MapBase);
582       MapBase = NewMapBase;
583     }
584     const uptr NewMapEnd =
585         CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
586     DCHECK_LE(NewMapEnd, MapEnd);
587     if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
588       MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
589       MapEnd = NewMapEnd;
590     }
591   }
592 
593   const uptr CommitSize = MapEnd - PageSize - CommitBase;
594   const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
595   mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, MemMap);
596   const uptr HeaderPos =
597       AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
598   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
599       LargeBlock::addHeaderTag<Config>(HeaderPos));
600   if (useMemoryTagging<Config>(Options))
601     storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
602               reinterpret_cast<uptr>(H + 1));
603   H->CommitBase = CommitBase;
604   H->CommitSize = CommitSize;
605   H->MemMap = MemMap;
606   if (BlockEndPtr)
607     *BlockEndPtr = CommitBase + CommitSize;
608   {
609     ScopedLock L(Mutex);
610     InUseBlocks.push_back(H);
611     AllocatedBytes += CommitSize;
612     if (LargestSize < CommitSize)
613       LargestSize = CommitSize;
614     NumberOfAllocs++;
615     Stats.add(StatAllocated, CommitSize);
616     Stats.add(StatMapped, H->MemMap.getCapacity());
617   }
618   return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
619 }
620 
621 template <typename Config>
622 void MapAllocator<Config>::deallocate(Options Options, void *Ptr)
623     EXCLUDES(Mutex) {
624   LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
625   const uptr CommitSize = H->CommitSize;
626   {
627     ScopedLock L(Mutex);
628     InUseBlocks.remove(H);
629     FreedBytes += CommitSize;
630     NumberOfFrees++;
631     Stats.sub(StatAllocated, CommitSize);
632     Stats.sub(StatMapped, H->MemMap.getCapacity());
633   }
634   Cache.store(Options, H);
635 }
636 
637 template <typename Config>
638 void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
639   ScopedLock L(Mutex);
640   Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
641               "(%zuK), remains %u (%zuK) max %zuM\n",
642               NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
643               FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
644               (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
645   Cache.getStats(Str);
646 }
647 
648 } // namespace scudo
649 
650 #endif // SCUDO_SECONDARY_H_
651