1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11 
12 #include "chunk.h"
13 #include "common.h"
14 #include "list.h"
15 #include "memtag.h"
16 #include "mutex.h"
17 #include "options.h"
18 #include "stats.h"
19 #include "string_utils.h"
20 
21 namespace scudo {
22 
23 // This allocator wraps the platform allocation primitives, and as such is on
24 // the slower side and should preferably be used for larger sized allocations.
25 // Blocks allocated will be preceded and followed by a guard page, and hold
26 // their own header that is not checksummed: the guard pages and the Combined
27 // header should be enough for our purpose.
28 
29 namespace LargeBlock {
30 
31 struct alignas(Max<uptr>(archSupportsMemoryTagging()
32                              ? archMemoryTagGranuleSize()
33                              : 1,
34                          1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
35   LargeBlock::Header *Prev;
36   LargeBlock::Header *Next;
37   uptr CommitBase;
38   uptr CommitSize;
39   uptr MapBase;
40   uptr MapSize;
41   [[no_unique_address]] MapPlatformData Data;
42 };
43 
44 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
45 static_assert(!archSupportsMemoryTagging() ||
46                   sizeof(Header) % archMemoryTagGranuleSize() == 0,
47               "");
48 
49 constexpr uptr getHeaderSize() { return sizeof(Header); }
50 
51 template <typename Config> static uptr addHeaderTag(uptr Ptr) {
52   if (allocatorSupportsMemoryTagging<Config>())
53     return addFixedTag(Ptr, 1);
54   return Ptr;
55 }
56 
57 template <typename Config> static Header *getHeader(uptr Ptr) {
58   return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
59 }
60 
61 template <typename Config> static Header *getHeader(const void *Ptr) {
62   return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
63 }
64 
65 } // namespace LargeBlock
66 
67 static void unmap(LargeBlock::Header *H) {
68   MapPlatformData Data = H->Data;
69   unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
70 }
71 
72 class MapAllocatorNoCache {
73 public:
74   void init(UNUSED s32 ReleaseToOsInterval) {}
75   bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
76                 UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
77     return false;
78   }
79   void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
80   bool canCache(UNUSED uptr Size) { return false; }
81   void disable() {}
82   void enable() {}
83   void releaseToOS() {}
84   void disableMemoryTagging() {}
85   void unmapTestOnly() {}
86   bool setOption(Option O, UNUSED sptr Value) {
87     if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
88         O == Option::MaxCacheEntrySize)
89       return false;
90     // Not supported by the Secondary Cache, but not an error either.
91     return true;
92   }
93 };
94 
95 static const uptr MaxUnusedCachePages = 4U;
96 
97 template <typename Config>
98 void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
99                   uptr AllocPos, uptr Flags, MapPlatformData *Data) {
100   const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
101   if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
102     const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
103     map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
104         "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
105     map(reinterpret_cast<void *>(UntaggedPos),
106         CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
107         MAP_RESIZABLE | Flags, Data);
108   } else {
109     map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
110         MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
111             Flags,
112         Data);
113   }
114 }
115 
116 // Template specialization to avoid producing zero-length array
117 template <typename T, size_t Size> class NonZeroLengthArray {
118 public:
119   T &operator[](uptr Idx) { return values[Idx]; }
120 
121 private:
122   T values[Size];
123 };
124 template <typename T> class NonZeroLengthArray<T, 0> {
125 public:
126   T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
127 };
128 
129 template <typename Config> class MapAllocatorCache {
130 public:
131   // Ensure the default maximum specified fits the array.
132   static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
133                     Config::SecondaryCacheEntriesArraySize,
134                 "");
135 
136   void init(s32 ReleaseToOsInterval) {
137     DCHECK_EQ(EntriesCount, 0U);
138     setOption(Option::MaxCacheEntriesCount,
139               static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
140     setOption(Option::MaxCacheEntrySize,
141               static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
142     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
143   }
144 
145   void store(Options Options, LargeBlock::Header *H) {
146     if (!canCache(H->CommitSize))
147       return unmap(H);
148 
149     bool EntryCached = false;
150     bool EmptyCache = false;
151     const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
152     const u64 Time = getMonotonicTime();
153     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
154     CachedBlock Entry;
155     Entry.CommitBase = H->CommitBase;
156     Entry.CommitSize = H->CommitSize;
157     Entry.MapBase = H->MapBase;
158     Entry.MapSize = H->MapSize;
159     Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
160     Entry.Data = H->Data;
161     Entry.Time = Time;
162     if (useMemoryTagging<Config>(Options)) {
163       if (Interval == 0 && !SCUDO_FUCHSIA) {
164         // Release the memory and make it inaccessible at the same time by
165         // creating a new MAP_NOACCESS mapping on top of the existing mapping.
166         // Fuchsia does not support replacing mappings by creating a new mapping
167         // on top so we just do the two syscalls there.
168         Entry.Time = 0;
169         mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
170                              Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
171       } else {
172         setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
173                             &Entry.Data);
174       }
175     } else if (Interval == 0) {
176       releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
177       Entry.Time = 0;
178     }
179     do {
180       ScopedLock L(Mutex);
181       if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
182         // If we get here then memory tagging was disabled in between when we
183         // read Options and when we locked Mutex. We can't insert our entry into
184         // the quarantine or the cache because the permissions would be wrong so
185         // just unmap it.
186         break;
187       }
188       if (Config::SecondaryCacheQuarantineSize &&
189           useMemoryTagging<Config>(Options)) {
190         QuarantinePos =
191             (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
192         if (!Quarantine[QuarantinePos].CommitBase) {
193           Quarantine[QuarantinePos] = Entry;
194           return;
195         }
196         CachedBlock PrevEntry = Quarantine[QuarantinePos];
197         Quarantine[QuarantinePos] = Entry;
198         if (OldestTime == 0)
199           OldestTime = Entry.Time;
200         Entry = PrevEntry;
201       }
202       if (EntriesCount >= MaxCount) {
203         if (IsFullEvents++ == 4U)
204           EmptyCache = true;
205       } else {
206         for (u32 I = 0; I < MaxCount; I++) {
207           if (Entries[I].CommitBase)
208             continue;
209           if (I != 0)
210             Entries[I] = Entries[0];
211           Entries[0] = Entry;
212           EntriesCount++;
213           if (OldestTime == 0)
214             OldestTime = Entry.Time;
215           EntryCached = true;
216           break;
217         }
218       }
219     } while (0);
220     if (EmptyCache)
221       empty();
222     else if (Interval >= 0)
223       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
224     if (!EntryCached)
225       unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
226             &Entry.Data);
227   }
228 
229   bool retrieve(Options Options, uptr Size, uptr Alignment,
230                 LargeBlock::Header **H, bool *Zeroed) {
231     const uptr PageSize = getPageSizeCached();
232     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
233     bool Found = false;
234     CachedBlock Entry;
235     uptr HeaderPos = 0;
236     {
237       ScopedLock L(Mutex);
238       if (EntriesCount == 0)
239         return false;
240       for (u32 I = 0; I < MaxCount; I++) {
241         const uptr CommitBase = Entries[I].CommitBase;
242         if (!CommitBase)
243           continue;
244         const uptr CommitSize = Entries[I].CommitSize;
245         const uptr AllocPos =
246             roundDownTo(CommitBase + CommitSize - Size, Alignment);
247         HeaderPos =
248             AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
249         if (HeaderPos > CommitBase + CommitSize)
250           continue;
251         if (HeaderPos < CommitBase ||
252             AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
253           continue;
254         Found = true;
255         Entry = Entries[I];
256         Entries[I].CommitBase = 0;
257         break;
258       }
259     }
260     if (Found) {
261       *H = reinterpret_cast<LargeBlock::Header *>(
262           LargeBlock::addHeaderTag<Config>(HeaderPos));
263       *Zeroed = Entry.Time == 0;
264       if (useMemoryTagging<Config>(Options))
265         setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
266       uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
267       if (useMemoryTagging<Config>(Options)) {
268         if (*Zeroed)
269           storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
270                     NewBlockBegin);
271         else if (Entry.BlockBegin < NewBlockBegin)
272           storeTags(Entry.BlockBegin, NewBlockBegin);
273         else
274           storeTags(untagPointer(NewBlockBegin),
275                     untagPointer(Entry.BlockBegin));
276       }
277       (*H)->CommitBase = Entry.CommitBase;
278       (*H)->CommitSize = Entry.CommitSize;
279       (*H)->MapBase = Entry.MapBase;
280       (*H)->MapSize = Entry.MapSize;
281       (*H)->Data = Entry.Data;
282       EntriesCount--;
283     }
284     return Found;
285   }
286 
287   bool canCache(uptr Size) {
288     return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
289            Size <= atomic_load_relaxed(&MaxEntrySize);
290   }
291 
292   bool setOption(Option O, sptr Value) {
293     if (O == Option::ReleaseInterval) {
294       const s32 Interval =
295           Max(Min(static_cast<s32>(Value),
296                   Config::SecondaryCacheMaxReleaseToOsIntervalMs),
297               Config::SecondaryCacheMinReleaseToOsIntervalMs);
298       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
299       return true;
300     }
301     if (O == Option::MaxCacheEntriesCount) {
302       const u32 MaxCount = static_cast<u32>(Value);
303       if (MaxCount > Config::SecondaryCacheEntriesArraySize)
304         return false;
305       atomic_store_relaxed(&MaxEntriesCount, MaxCount);
306       return true;
307     }
308     if (O == Option::MaxCacheEntrySize) {
309       atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
310       return true;
311     }
312     // Not supported by the Secondary Cache, but not an error either.
313     return true;
314   }
315 
316   void releaseToOS() { releaseOlderThan(UINT64_MAX); }
317 
318   void disableMemoryTagging() {
319     ScopedLock L(Mutex);
320     for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
321       if (Quarantine[I].CommitBase) {
322         unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
323               Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
324         Quarantine[I].CommitBase = 0;
325       }
326     }
327     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
328     for (u32 I = 0; I < MaxCount; I++)
329       if (Entries[I].CommitBase)
330         setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
331                             &Entries[I].Data);
332     QuarantinePos = -1U;
333   }
334 
335   void disable() { Mutex.lock(); }
336 
337   void enable() { Mutex.unlock(); }
338 
339   void unmapTestOnly() { empty(); }
340 
341 private:
342   void empty() {
343     struct {
344       void *MapBase;
345       uptr MapSize;
346       MapPlatformData Data;
347     } MapInfo[Config::SecondaryCacheEntriesArraySize];
348     uptr N = 0;
349     {
350       ScopedLock L(Mutex);
351       for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
352         if (!Entries[I].CommitBase)
353           continue;
354         MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
355         MapInfo[N].MapSize = Entries[I].MapSize;
356         MapInfo[N].Data = Entries[I].Data;
357         Entries[I].CommitBase = 0;
358         N++;
359       }
360       EntriesCount = 0;
361       IsFullEvents = 0;
362     }
363     for (uptr I = 0; I < N; I++)
364       unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
365             &MapInfo[I].Data);
366   }
367 
368   struct CachedBlock {
369     uptr CommitBase;
370     uptr CommitSize;
371     uptr MapBase;
372     uptr MapSize;
373     uptr BlockBegin;
374     [[no_unique_address]] MapPlatformData Data;
375     u64 Time;
376   };
377 
378   void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
379     if (!Entry.CommitBase || !Entry.Time)
380       return;
381     if (Entry.Time > Time) {
382       if (OldestTime == 0 || Entry.Time < OldestTime)
383         OldestTime = Entry.Time;
384       return;
385     }
386     releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
387     Entry.Time = 0;
388   }
389 
390   void releaseOlderThan(u64 Time) {
391     ScopedLock L(Mutex);
392     if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
393       return;
394     OldestTime = 0;
395     for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
396       releaseIfOlderThan(Quarantine[I], Time);
397     for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
398       releaseIfOlderThan(Entries[I], Time);
399   }
400 
401   HybridMutex Mutex;
402   u32 EntriesCount = 0;
403   u32 QuarantinePos = 0;
404   atomic_u32 MaxEntriesCount = {};
405   atomic_uptr MaxEntrySize = {};
406   u64 OldestTime = 0;
407   u32 IsFullEvents = 0;
408   atomic_s32 ReleaseToOsIntervalMs = {};
409 
410   CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
411   NonZeroLengthArray<CachedBlock, Config::SecondaryCacheQuarantineSize>
412       Quarantine = {};
413 };
414 
415 template <typename Config> class MapAllocator {
416 public:
417   void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
418     DCHECK_EQ(AllocatedBytes, 0U);
419     DCHECK_EQ(FreedBytes, 0U);
420     Cache.init(ReleaseToOsInterval);
421     Stats.init();
422     if (LIKELY(S))
423       S->link(&Stats);
424   }
425 
426   void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
427                  uptr *BlockEnd = nullptr,
428                  FillContentsMode FillContents = NoFill);
429 
430   void deallocate(Options Options, void *Ptr);
431 
432   static uptr getBlockEnd(void *Ptr) {
433     auto *B = LargeBlock::getHeader<Config>(Ptr);
434     return B->CommitBase + B->CommitSize;
435   }
436 
437   static uptr getBlockSize(void *Ptr) {
438     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
439   }
440 
441   void getStats(ScopedString *Str) const;
442 
443   void disable() {
444     Mutex.lock();
445     Cache.disable();
446   }
447 
448   void enable() {
449     Cache.enable();
450     Mutex.unlock();
451   }
452 
453   template <typename F> void iterateOverBlocks(F Callback) const {
454     for (const auto &H : InUseBlocks) {
455       uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
456       if (allocatorSupportsMemoryTagging<Config>())
457         Ptr = untagPointer(Ptr);
458       Callback(Ptr);
459     }
460   }
461 
462   bool canCache(uptr Size) { return Cache.canCache(Size); }
463 
464   bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
465 
466   void releaseToOS() { Cache.releaseToOS(); }
467 
468   void disableMemoryTagging() { Cache.disableMemoryTagging(); }
469 
470   void unmapTestOnly() { Cache.unmapTestOnly(); }
471 
472 private:
473   typename Config::SecondaryCache Cache;
474 
475   HybridMutex Mutex;
476   DoublyLinkedList<LargeBlock::Header> InUseBlocks;
477   uptr AllocatedBytes = 0;
478   uptr FreedBytes = 0;
479   uptr LargestSize = 0;
480   u32 NumberOfAllocs = 0;
481   u32 NumberOfFrees = 0;
482   LocalStats Stats;
483 };
484 
485 // As with the Primary, the size passed to this function includes any desired
486 // alignment, so that the frontend can align the user allocation. The hint
487 // parameter allows us to unmap spurious memory when dealing with larger
488 // (greater than a page) alignments on 32-bit platforms.
489 // Due to the sparsity of address space available on those platforms, requesting
490 // an allocation from the Secondary with a large alignment would end up wasting
491 // VA space (even though we are not committing the whole thing), hence the need
492 // to trim off some of the reserved space.
493 // For allocations requested with an alignment greater than or equal to a page,
494 // the committed memory will amount to something close to Size - AlignmentHint
495 // (pending rounding and headers).
496 template <typename Config>
497 void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
498                                      uptr *BlockEndPtr,
499                                      FillContentsMode FillContents) {
500   if (Options.get(OptionBit::AddLargeAllocationSlack))
501     Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
502   Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
503   const uptr PageSize = getPageSizeCached();
504   uptr RoundedSize =
505       roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
506                     Chunk::getHeaderSize(),
507                 PageSize);
508   if (Alignment > PageSize)
509     RoundedSize += Alignment - PageSize;
510 
511   if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
512     LargeBlock::Header *H;
513     bool Zeroed;
514     if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
515       const uptr BlockEnd = H->CommitBase + H->CommitSize;
516       if (BlockEndPtr)
517         *BlockEndPtr = BlockEnd;
518       uptr HInt = reinterpret_cast<uptr>(H);
519       if (allocatorSupportsMemoryTagging<Config>())
520         HInt = untagPointer(HInt);
521       const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
522       void *Ptr = reinterpret_cast<void *>(PtrInt);
523       if (FillContents && !Zeroed)
524         memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
525                BlockEnd - PtrInt);
526       const uptr BlockSize = BlockEnd - HInt;
527       {
528         ScopedLock L(Mutex);
529         InUseBlocks.push_back(H);
530         AllocatedBytes += BlockSize;
531         NumberOfAllocs++;
532         Stats.add(StatAllocated, BlockSize);
533         Stats.add(StatMapped, H->MapSize);
534       }
535       return Ptr;
536     }
537   }
538 
539   MapPlatformData Data = {};
540   const uptr MapSize = RoundedSize + 2 * PageSize;
541   uptr MapBase = reinterpret_cast<uptr>(
542       map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
543   if (UNLIKELY(!MapBase))
544     return nullptr;
545   uptr CommitBase = MapBase + PageSize;
546   uptr MapEnd = MapBase + MapSize;
547 
548   // In the unlikely event of alignments larger than a page, adjust the amount
549   // of memory we want to commit, and trim the extra memory.
550   if (UNLIKELY(Alignment >= PageSize)) {
551     // For alignments greater than or equal to a page, the user pointer (eg: the
552     // pointer that is returned by the C or C++ allocation APIs) ends up on a
553     // page boundary , and our headers will live in the preceding page.
554     CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
555     const uptr NewMapBase = CommitBase - PageSize;
556     DCHECK_GE(NewMapBase, MapBase);
557     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
558     // are less constrained memory wise, and that saves us two syscalls.
559     if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
560       unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
561       MapBase = NewMapBase;
562     }
563     const uptr NewMapEnd =
564         CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
565     DCHECK_LE(NewMapEnd, MapEnd);
566     if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
567       unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
568       MapEnd = NewMapEnd;
569     }
570   }
571 
572   const uptr CommitSize = MapEnd - PageSize - CommitBase;
573   const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
574   mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
575   const uptr HeaderPos =
576       AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
577   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
578       LargeBlock::addHeaderTag<Config>(HeaderPos));
579   if (useMemoryTagging<Config>(Options))
580     storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
581               reinterpret_cast<uptr>(H + 1));
582   H->MapBase = MapBase;
583   H->MapSize = MapEnd - MapBase;
584   H->CommitBase = CommitBase;
585   H->CommitSize = CommitSize;
586   H->Data = Data;
587   if (BlockEndPtr)
588     *BlockEndPtr = CommitBase + CommitSize;
589   {
590     ScopedLock L(Mutex);
591     InUseBlocks.push_back(H);
592     AllocatedBytes += CommitSize;
593     if (LargestSize < CommitSize)
594       LargestSize = CommitSize;
595     NumberOfAllocs++;
596     Stats.add(StatAllocated, CommitSize);
597     Stats.add(StatMapped, H->MapSize);
598   }
599   return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
600 }
601 
602 template <typename Config>
603 void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
604   LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
605   const uptr CommitSize = H->CommitSize;
606   {
607     ScopedLock L(Mutex);
608     InUseBlocks.remove(H);
609     FreedBytes += CommitSize;
610     NumberOfFrees++;
611     Stats.sub(StatAllocated, CommitSize);
612     Stats.sub(StatMapped, H->MapSize);
613   }
614   Cache.store(Options, H);
615 }
616 
617 template <typename Config>
618 void MapAllocator<Config>::getStats(ScopedString *Str) const {
619   Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
620               "(%zuK), remains %u (%zuK) max %zuM\n",
621               NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
622               FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
623               (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
624 }
625 
626 } // namespace scudo
627 
628 #endif // SCUDO_SECONDARY_H_
629