1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 #include "stats.h"
16 #include "string_utils.h"
17
18 namespace scudo {
19
20 // This allocator wraps the platform allocation primitives, and as such is on
21 // the slower side and should preferably be used for larger sized allocations.
22 // Blocks allocated will be preceded and followed by a guard page, and hold
23 // their own header that is not checksummed: the guard pages and the Combined
24 // header should be enough for our purpose.
25
26 namespace LargeBlock {
27
28 struct Header {
29 LargeBlock::Header *Prev;
30 LargeBlock::Header *Next;
31 uptr BlockEnd;
32 uptr MapBase;
33 uptr MapSize;
34 [[no_unique_address]] MapPlatformData Data;
35 };
36
getHeaderSize()37 constexpr uptr getHeaderSize() {
38 return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
39 }
40
getHeader(uptr Ptr)41 static Header *getHeader(uptr Ptr) {
42 return reinterpret_cast<Header *>(Ptr - getHeaderSize());
43 }
44
getHeader(const void * Ptr)45 static Header *getHeader(const void *Ptr) {
46 return getHeader(reinterpret_cast<uptr>(Ptr));
47 }
48
49 } // namespace LargeBlock
50
51 class MapAllocatorNoCache {
52 public:
initLinkerInitialized(UNUSED s32 ReleaseToOsInterval)53 void initLinkerInitialized(UNUSED s32 ReleaseToOsInterval) {}
init(UNUSED s32 ReleaseToOsInterval)54 void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED uptr Size,UNUSED LargeBlock::Header ** H,UNUSED bool * Zeroed)55 bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H,
56 UNUSED bool *Zeroed) {
57 return false;
58 }
store(UNUSED LargeBlock::Header * H)59 bool store(UNUSED LargeBlock::Header *H) { return false; }
canCache(UNUSED uptr Size)60 bool canCache(UNUSED uptr Size) { return false; }
disable()61 void disable() {}
enable()62 void enable() {}
releaseToOS()63 void releaseToOS() {}
setOption(Option O,UNUSED sptr Value)64 bool setOption(Option O, UNUSED sptr Value) {
65 if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
66 O == Option::MaxCacheEntrySize)
67 return false;
68 // Not supported by the Secondary Cache, but not an error either.
69 return true;
70 }
71 };
72
73 template <typename Config> class MapAllocatorCache {
74 public:
75 // Ensure the default maximum specified fits the array.
76 static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
77 Config::SecondaryCacheEntriesArraySize,
78 "");
79
initLinkerInitialized(s32 ReleaseToOsInterval)80 void initLinkerInitialized(s32 ReleaseToOsInterval) {
81 setOption(Option::MaxCacheEntriesCount,
82 static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
83 setOption(Option::MaxCacheEntrySize,
84 static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
85 setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
86 }
init(s32 ReleaseToOsInterval)87 void init(s32 ReleaseToOsInterval) {
88 memset(this, 0, sizeof(*this));
89 initLinkerInitialized(ReleaseToOsInterval);
90 }
91
store(LargeBlock::Header * H)92 bool store(LargeBlock::Header *H) {
93 bool EntryCached = false;
94 bool EmptyCache = false;
95 const u64 Time = getMonotonicTime();
96 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
97 {
98 ScopedLock L(Mutex);
99 if (EntriesCount >= MaxCount) {
100 if (IsFullEvents++ == 4U)
101 EmptyCache = true;
102 } else {
103 for (u32 I = 0; I < MaxCount; I++) {
104 if (Entries[I].Block)
105 continue;
106 if (I != 0)
107 Entries[I] = Entries[0];
108 Entries[0].Block = reinterpret_cast<uptr>(H);
109 Entries[0].BlockEnd = H->BlockEnd;
110 Entries[0].MapBase = H->MapBase;
111 Entries[0].MapSize = H->MapSize;
112 Entries[0].Data = H->Data;
113 Entries[0].Time = Time;
114 EntriesCount++;
115 EntryCached = true;
116 break;
117 }
118 }
119 }
120 s32 Interval;
121 if (EmptyCache)
122 empty();
123 else if ((Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs)) >= 0)
124 releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
125 return EntryCached;
126 }
127
retrieve(uptr Size,LargeBlock::Header ** H,bool * Zeroed)128 bool retrieve(uptr Size, LargeBlock::Header **H, bool *Zeroed) {
129 const uptr PageSize = getPageSizeCached();
130 const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
131 ScopedLock L(Mutex);
132 if (EntriesCount == 0)
133 return false;
134 for (u32 I = 0; I < MaxCount; I++) {
135 if (!Entries[I].Block)
136 continue;
137 const uptr BlockSize = Entries[I].BlockEnd - Entries[I].Block;
138 if (Size > BlockSize)
139 continue;
140 if (Size < BlockSize - PageSize * 4U)
141 continue;
142 *H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block);
143 *Zeroed = Entries[I].Time == 0;
144 Entries[I].Block = 0;
145 (*H)->BlockEnd = Entries[I].BlockEnd;
146 (*H)->MapBase = Entries[I].MapBase;
147 (*H)->MapSize = Entries[I].MapSize;
148 (*H)->Data = Entries[I].Data;
149 EntriesCount--;
150 return true;
151 }
152 return false;
153 }
154
canCache(uptr Size)155 bool canCache(uptr Size) {
156 return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
157 Size <= atomic_load_relaxed(&MaxEntrySize);
158 }
159
setOption(Option O,sptr Value)160 bool setOption(Option O, sptr Value) {
161 if (O == Option::ReleaseInterval) {
162 const s32 Interval =
163 Max(Min(static_cast<s32>(Value),
164 Config::SecondaryCacheMaxReleaseToOsIntervalMs),
165 Config::SecondaryCacheMinReleaseToOsIntervalMs);
166 atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
167 return true;
168 } else if (O == Option::MaxCacheEntriesCount) {
169 const u32 MaxCount = static_cast<u32>(Value);
170 if (MaxCount > Config::SecondaryCacheEntriesArraySize)
171 return false;
172 atomic_store_relaxed(&MaxEntriesCount, MaxCount);
173 return true;
174 } else if (O == Option::MaxCacheEntrySize) {
175 atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
176 return true;
177 }
178 // Not supported by the Secondary Cache, but not an error either.
179 return true;
180 }
181
releaseToOS()182 void releaseToOS() { releaseOlderThan(UINT64_MAX); }
183
disable()184 void disable() { Mutex.lock(); }
185
enable()186 void enable() { Mutex.unlock(); }
187
188 private:
empty()189 void empty() {
190 struct {
191 void *MapBase;
192 uptr MapSize;
193 MapPlatformData Data;
194 } MapInfo[Config::SecondaryCacheEntriesArraySize];
195 uptr N = 0;
196 {
197 ScopedLock L(Mutex);
198 for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
199 if (!Entries[I].Block)
200 continue;
201 MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
202 MapInfo[N].MapSize = Entries[I].MapSize;
203 MapInfo[N].Data = Entries[I].Data;
204 Entries[I].Block = 0;
205 N++;
206 }
207 EntriesCount = 0;
208 IsFullEvents = 0;
209 }
210 for (uptr I = 0; I < N; I++)
211 unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
212 &MapInfo[I].Data);
213 }
214
releaseOlderThan(u64 Time)215 void releaseOlderThan(u64 Time) {
216 ScopedLock L(Mutex);
217 if (!EntriesCount)
218 return;
219 for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
220 if (!Entries[I].Block || !Entries[I].Time || Entries[I].Time > Time)
221 continue;
222 releasePagesToOS(Entries[I].Block, 0,
223 Entries[I].BlockEnd - Entries[I].Block,
224 &Entries[I].Data);
225 Entries[I].Time = 0;
226 }
227 }
228
229 struct CachedBlock {
230 uptr Block;
231 uptr BlockEnd;
232 uptr MapBase;
233 uptr MapSize;
234 [[no_unique_address]] MapPlatformData Data;
235 u64 Time;
236 };
237
238 HybridMutex Mutex;
239 CachedBlock Entries[Config::SecondaryCacheEntriesArraySize];
240 u32 EntriesCount;
241 atomic_u32 MaxEntriesCount;
242 atomic_uptr MaxEntrySize;
243 uptr LargestSize;
244 u32 IsFullEvents;
245 atomic_s32 ReleaseToOsIntervalMs;
246 };
247
248 template <typename Config> class MapAllocator {
249 public:
250 void initLinkerInitialized(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
251 Cache.initLinkerInitialized(ReleaseToOsInterval);
252 Stats.initLinkerInitialized();
253 if (LIKELY(S))
254 S->link(&Stats);
255 }
256 void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
257 memset(this, 0, sizeof(*this));
258 initLinkerInitialized(S, ReleaseToOsInterval);
259 }
260
261 void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr,
262 FillContentsMode FillContents = NoFill);
263
264 void deallocate(void *Ptr);
265
getBlockEnd(void * Ptr)266 static uptr getBlockEnd(void *Ptr) {
267 return LargeBlock::getHeader(Ptr)->BlockEnd;
268 }
269
getBlockSize(void * Ptr)270 static uptr getBlockSize(void *Ptr) {
271 return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
272 }
273
274 void getStats(ScopedString *Str) const;
275
disable()276 void disable() {
277 Mutex.lock();
278 Cache.disable();
279 }
280
enable()281 void enable() {
282 Cache.enable();
283 Mutex.unlock();
284 }
285
iterateOverBlocks(F Callback)286 template <typename F> void iterateOverBlocks(F Callback) const {
287 for (const auto &H : InUseBlocks)
288 Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
289 }
290
canCache(uptr Size)291 uptr canCache(uptr Size) { return Cache.canCache(Size); }
292
setOption(Option O,sptr Value)293 bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
294
releaseToOS()295 void releaseToOS() { Cache.releaseToOS(); }
296
297 private:
298 typename Config::SecondaryCache Cache;
299
300 HybridMutex Mutex;
301 DoublyLinkedList<LargeBlock::Header> InUseBlocks;
302 uptr AllocatedBytes;
303 uptr FreedBytes;
304 uptr LargestSize;
305 u32 NumberOfAllocs;
306 u32 NumberOfFrees;
307 LocalStats Stats;
308 };
309
310 // As with the Primary, the size passed to this function includes any desired
311 // alignment, so that the frontend can align the user allocation. The hint
312 // parameter allows us to unmap spurious memory when dealing with larger
313 // (greater than a page) alignments on 32-bit platforms.
314 // Due to the sparsity of address space available on those platforms, requesting
315 // an allocation from the Secondary with a large alignment would end up wasting
316 // VA space (even though we are not committing the whole thing), hence the need
317 // to trim off some of the reserved space.
318 // For allocations requested with an alignment greater than or equal to a page,
319 // the committed memory will amount to something close to Size - AlignmentHint
320 // (pending rounding and headers).
321 template <typename Config>
allocate(uptr Size,uptr AlignmentHint,uptr * BlockEnd,FillContentsMode FillContents)322 void *MapAllocator<Config>::allocate(uptr Size, uptr AlignmentHint,
323 uptr *BlockEnd,
324 FillContentsMode FillContents) {
325 DCHECK_GE(Size, AlignmentHint);
326 const uptr PageSize = getPageSizeCached();
327 const uptr RoundedSize =
328 roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
329
330 if (AlignmentHint < PageSize && Cache.canCache(RoundedSize)) {
331 LargeBlock::Header *H;
332 bool Zeroed;
333 if (Cache.retrieve(RoundedSize, &H, &Zeroed)) {
334 if (BlockEnd)
335 *BlockEnd = H->BlockEnd;
336 void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(H) +
337 LargeBlock::getHeaderSize());
338 if (FillContents && !Zeroed)
339 memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
340 H->BlockEnd - reinterpret_cast<uptr>(Ptr));
341 const uptr BlockSize = H->BlockEnd - reinterpret_cast<uptr>(H);
342 {
343 ScopedLock L(Mutex);
344 InUseBlocks.push_back(H);
345 AllocatedBytes += BlockSize;
346 NumberOfAllocs++;
347 Stats.add(StatAllocated, BlockSize);
348 Stats.add(StatMapped, H->MapSize);
349 }
350 return Ptr;
351 }
352 }
353
354 MapPlatformData Data = {};
355 const uptr MapSize = RoundedSize + 2 * PageSize;
356 uptr MapBase =
357 reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
358 MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
359 if (UNLIKELY(!MapBase))
360 return nullptr;
361 uptr CommitBase = MapBase + PageSize;
362 uptr MapEnd = MapBase + MapSize;
363
364 // In the unlikely event of alignments larger than a page, adjust the amount
365 // of memory we want to commit, and trim the extra memory.
366 if (UNLIKELY(AlignmentHint >= PageSize)) {
367 // For alignments greater than or equal to a page, the user pointer (eg: the
368 // pointer that is returned by the C or C++ allocation APIs) ends up on a
369 // page boundary , and our headers will live in the preceding page.
370 CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
371 const uptr NewMapBase = CommitBase - PageSize;
372 DCHECK_GE(NewMapBase, MapBase);
373 // We only trim the extra memory on 32-bit platforms: 64-bit platforms
374 // are less constrained memory wise, and that saves us two syscalls.
375 if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
376 unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
377 MapBase = NewMapBase;
378 }
379 const uptr NewMapEnd = CommitBase + PageSize +
380 roundUpTo((Size - AlignmentHint), PageSize) +
381 PageSize;
382 DCHECK_LE(NewMapEnd, MapEnd);
383 if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
384 unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
385 MapEnd = NewMapEnd;
386 }
387 }
388
389 const uptr CommitSize = MapEnd - PageSize - CommitBase;
390 const uptr Ptr = reinterpret_cast<uptr>(
391 map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
392 MAP_RESIZABLE, &Data));
393 LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
394 H->MapBase = MapBase;
395 H->MapSize = MapEnd - MapBase;
396 H->BlockEnd = CommitBase + CommitSize;
397 H->Data = Data;
398 if (BlockEnd)
399 *BlockEnd = CommitBase + CommitSize;
400 {
401 ScopedLock L(Mutex);
402 InUseBlocks.push_back(H);
403 AllocatedBytes += CommitSize;
404 if (LargestSize < CommitSize)
405 LargestSize = CommitSize;
406 NumberOfAllocs++;
407 Stats.add(StatAllocated, CommitSize);
408 Stats.add(StatMapped, H->MapSize);
409 }
410 return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
411 }
412
deallocate(void * Ptr)413 template <typename Config> void MapAllocator<Config>::deallocate(void *Ptr) {
414 LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
415 const uptr Block = reinterpret_cast<uptr>(H);
416 const uptr CommitSize = H->BlockEnd - Block;
417 {
418 ScopedLock L(Mutex);
419 InUseBlocks.remove(H);
420 FreedBytes += CommitSize;
421 NumberOfFrees++;
422 Stats.sub(StatAllocated, CommitSize);
423 Stats.sub(StatMapped, H->MapSize);
424 }
425 if (Cache.canCache(CommitSize) && Cache.store(H))
426 return;
427 void *Addr = reinterpret_cast<void *>(H->MapBase);
428 const uptr Size = H->MapSize;
429 MapPlatformData Data = H->Data;
430 unmap(Addr, Size, UNMAP_ALL, &Data);
431 }
432
433 template <typename Config>
getStats(ScopedString * Str)434 void MapAllocator<Config>::getStats(ScopedString *Str) const {
435 Str->append(
436 "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
437 "(%zuK), remains %zu (%zuK) max %zuM\n",
438 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
439 NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
440 LargestSize >> 20);
441 }
442
443 } // namespace scudo
444
445 #endif // SCUDO_SECONDARY_H_
446