1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11 
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 #include "stats.h"
16 #include "string_utils.h"
17 
18 namespace scudo {
19 
20 // This allocator wraps the platform allocation primitives, and as such is on
21 // the slower side and should preferably be used for larger sized allocations.
22 // Blocks allocated will be preceded and followed by a guard page, and hold
23 // their own header that is not checksummed: the guard pages and the Combined
24 // header should be enough for our purpose.
25 
26 namespace LargeBlock {
27 
28 struct Header {
29   LargeBlock::Header *Prev;
30   LargeBlock::Header *Next;
31   uptr BlockEnd;
32   uptr MapBase;
33   uptr MapSize;
34   MapPlatformData Data;
35 };
36 
37 constexpr uptr getHeaderSize() {
38   return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
39 }
40 
41 static Header *getHeader(uptr Ptr) {
42   return reinterpret_cast<Header *>(Ptr - getHeaderSize());
43 }
44 
45 static Header *getHeader(const void *Ptr) {
46   return getHeader(reinterpret_cast<uptr>(Ptr));
47 }
48 
49 } // namespace LargeBlock
50 
51 template <uptr MaxFreeListSize = 32U> class MapAllocator {
52 public:
53   // Ensure the freelist is disabled on Fuchsia, since it doesn't support
54   // releasing Secondary blocks yet.
55   static_assert(!SCUDO_FUCHSIA || MaxFreeListSize == 0U, "");
56 
57   void initLinkerInitialized(GlobalStats *S) {
58     Stats.initLinkerInitialized();
59     if (LIKELY(S))
60       S->link(&Stats);
61   }
62   void init(GlobalStats *S) {
63     memset(this, 0, sizeof(*this));
64     initLinkerInitialized(S);
65   }
66 
67   void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr,
68                  bool ZeroContents = false);
69 
70   void deallocate(void *Ptr);
71 
72   static uptr getBlockEnd(void *Ptr) {
73     return LargeBlock::getHeader(Ptr)->BlockEnd;
74   }
75 
76   static uptr getBlockSize(void *Ptr) {
77     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
78   }
79 
80   void getStats(ScopedString *Str) const;
81 
82   void disable() { Mutex.lock(); }
83 
84   void enable() { Mutex.unlock(); }
85 
86   template <typename F> void iterateOverBlocks(F Callback) const {
87     for (const auto &H : InUseBlocks)
88       Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
89   }
90 
91   static uptr getMaxFreeListSize(void) { return MaxFreeListSize; }
92 
93 private:
94   HybridMutex Mutex;
95   DoublyLinkedList<LargeBlock::Header> InUseBlocks;
96   // The free list is sorted based on the committed size of blocks.
97   DoublyLinkedList<LargeBlock::Header> FreeBlocks;
98   uptr AllocatedBytes;
99   uptr FreedBytes;
100   uptr LargestSize;
101   u32 NumberOfAllocs;
102   u32 NumberOfFrees;
103   LocalStats Stats;
104 };
105 
106 // As with the Primary, the size passed to this function includes any desired
107 // alignment, so that the frontend can align the user allocation. The hint
108 // parameter allows us to unmap spurious memory when dealing with larger
109 // (greater than a page) alignments on 32-bit platforms.
110 // Due to the sparsity of address space available on those platforms, requesting
111 // an allocation from the Secondary with a large alignment would end up wasting
112 // VA space (even though we are not committing the whole thing), hence the need
113 // to trim off some of the reserved space.
114 // For allocations requested with an alignment greater than or equal to a page,
115 // the committed memory will amount to something close to Size - AlignmentHint
116 // (pending rounding and headers).
117 template <uptr MaxFreeListSize>
118 void *MapAllocator<MaxFreeListSize>::allocate(uptr Size, uptr AlignmentHint,
119                                               uptr *BlockEnd,
120                                               bool ZeroContents) {
121   DCHECK_GE(Size, AlignmentHint);
122   const uptr PageSize = getPageSizeCached();
123   const uptr RoundedSize =
124       roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
125 
126   if (MaxFreeListSize && AlignmentHint < PageSize) {
127     ScopedLock L(Mutex);
128     for (auto &H : FreeBlocks) {
129       const uptr FreeBlockSize = H.BlockEnd - reinterpret_cast<uptr>(&H);
130       if (FreeBlockSize < RoundedSize)
131         continue;
132       // Candidate free block should only be at most 4 pages larger.
133       if (FreeBlockSize > RoundedSize + 4 * PageSize)
134         break;
135       FreeBlocks.remove(&H);
136       InUseBlocks.push_back(&H);
137       AllocatedBytes += FreeBlockSize;
138       NumberOfAllocs++;
139       Stats.add(StatAllocated, FreeBlockSize);
140       if (BlockEnd)
141         *BlockEnd = H.BlockEnd;
142       void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(&H) +
143                                            LargeBlock::getHeaderSize());
144       if (ZeroContents)
145         memset(Ptr, 0, H.BlockEnd - reinterpret_cast<uptr>(Ptr));
146       return Ptr;
147     }
148   }
149 
150   MapPlatformData Data = {};
151   const uptr MapSize = RoundedSize + 2 * PageSize;
152   uptr MapBase =
153       reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
154                                  MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
155   if (UNLIKELY(!MapBase))
156     return nullptr;
157   uptr CommitBase = MapBase + PageSize;
158   uptr MapEnd = MapBase + MapSize;
159 
160   // In the unlikely event of alignments larger than a page, adjust the amount
161   // of memory we want to commit, and trim the extra memory.
162   if (UNLIKELY(AlignmentHint >= PageSize)) {
163     // For alignments greater than or equal to a page, the user pointer (eg: the
164     // pointer that is returned by the C or C++ allocation APIs) ends up on a
165     // page boundary , and our headers will live in the preceding page.
166     CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
167     const uptr NewMapBase = CommitBase - PageSize;
168     DCHECK_GE(NewMapBase, MapBase);
169     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
170     // are less constrained memory wise, and that saves us two syscalls.
171     if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
172       unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
173       MapBase = NewMapBase;
174     }
175     const uptr NewMapEnd = CommitBase + PageSize +
176                            roundUpTo((Size - AlignmentHint), PageSize) +
177                            PageSize;
178     DCHECK_LE(NewMapEnd, MapEnd);
179     if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
180       unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
181       MapEnd = NewMapEnd;
182     }
183   }
184 
185   const uptr CommitSize = MapEnd - PageSize - CommitBase;
186   const uptr Ptr =
187       reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
188                                  CommitSize, "scudo:secondary", 0, &Data));
189   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
190   H->MapBase = MapBase;
191   H->MapSize = MapEnd - MapBase;
192   H->BlockEnd = CommitBase + CommitSize;
193   H->Data = Data;
194   {
195     ScopedLock L(Mutex);
196     InUseBlocks.push_back(H);
197     AllocatedBytes += CommitSize;
198     if (LargestSize < CommitSize)
199       LargestSize = CommitSize;
200     NumberOfAllocs++;
201     Stats.add(StatAllocated, CommitSize);
202     Stats.add(StatMapped, H->MapSize);
203   }
204   if (BlockEnd)
205     *BlockEnd = CommitBase + CommitSize;
206   return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
207 }
208 
209 template <uptr MaxFreeListSize>
210 void MapAllocator<MaxFreeListSize>::deallocate(void *Ptr) {
211   LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
212   const uptr Block = reinterpret_cast<uptr>(H);
213   {
214     ScopedLock L(Mutex);
215     InUseBlocks.remove(H);
216     const uptr CommitSize = H->BlockEnd - Block;
217     FreedBytes += CommitSize;
218     NumberOfFrees++;
219     Stats.sub(StatAllocated, CommitSize);
220     if (MaxFreeListSize && FreeBlocks.size() < MaxFreeListSize) {
221       bool Inserted = false;
222       for (auto &F : FreeBlocks) {
223         const uptr FreeBlockSize = F.BlockEnd - reinterpret_cast<uptr>(&F);
224         if (FreeBlockSize >= CommitSize) {
225           FreeBlocks.insert(H, &F);
226           Inserted = true;
227           break;
228         }
229       }
230       if (!Inserted)
231         FreeBlocks.push_back(H);
232       const uptr RoundedAllocationStart =
233           roundUpTo(Block + LargeBlock::getHeaderSize(), getPageSizeCached());
234       MapPlatformData Data = H->Data;
235       // TODO(kostyak): use release_to_os_interval_ms
236       releasePagesToOS(Block, RoundedAllocationStart - Block,
237                        H->BlockEnd - RoundedAllocationStart, &Data);
238       return;
239     }
240     Stats.sub(StatMapped, H->MapSize);
241   }
242   void *Addr = reinterpret_cast<void *>(H->MapBase);
243   const uptr Size = H->MapSize;
244   MapPlatformData Data = H->Data;
245   unmap(Addr, Size, UNMAP_ALL, &Data);
246 }
247 
248 template <uptr MaxFreeListSize>
249 void MapAllocator<MaxFreeListSize>::getStats(ScopedString *Str) const {
250   Str->append(
251       "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
252       "(%zuK), remains %zu (%zuK) max %zuM\n",
253       NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
254       NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
255       LargestSize >> 20);
256 }
257 
258 } // namespace scudo
259 
260 #endif // SCUDO_SECONDARY_H_
261