1//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_PREFIX
10#error "Define SCUDO_PREFIX prior to including this file!"
11#endif
12
13// malloc-type functions have to be aligned to std::max_align_t. This is
14// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
15// do not have to abide by the same requirement.
16#ifndef SCUDO_MALLOC_ALIGNMENT
17#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
18#endif
19
20extern "C" {
21
22INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
23  scudo::uptr Product;
24  if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
25    if (SCUDO_ALLOCATOR.canReturnNull()) {
26      errno = ENOMEM;
27      return nullptr;
28    }
29    scudo::reportCallocOverflow(nmemb, size);
30  }
31  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
32      Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
33}
34
35INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
36  SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
37}
38
39INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
40  struct SCUDO_MALLINFO Info = {};
41  scudo::StatCounters Stats;
42  SCUDO_ALLOCATOR.getStats(Stats);
43  // Space allocated in mmapped regions (bytes)
44  Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
45  // Maximum total allocated space (bytes)
46  Info.usmblks = Info.hblkhd;
47  // Space in freed fastbin blocks (bytes)
48  Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
49  // Total allocated space (bytes)
50  Info.uordblks =
51      static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
52  // Total free space (bytes)
53  Info.fordblks = Info.fsmblks;
54  return Info;
55}
56
57INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
58  struct __scudo_mallinfo2 Info = {};
59  scudo::StatCounters Stats;
60  SCUDO_ALLOCATOR.getStats(Stats);
61  // Space allocated in mmapped regions (bytes)
62  Info.hblkhd = Stats[scudo::StatMapped];
63  // Maximum total allocated space (bytes)
64  Info.usmblks = Info.hblkhd;
65  // Space in freed fastbin blocks (bytes)
66  Info.fsmblks = Stats[scudo::StatFree];
67  // Total allocated space (bytes)
68  Info.uordblks = Stats[scudo::StatAllocated];
69  // Total free space (bytes)
70  Info.fordblks = Info.fsmblks;
71  return Info;
72}
73
74INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
75  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
76      size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
77}
78
79#if SCUDO_ANDROID
80INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
81#else
82INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
83#endif
84  return SCUDO_ALLOCATOR.getUsableSize(ptr);
85}
86
87INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
88  // Android rounds up the alignment to a power of two if it isn't one.
89  if (SCUDO_ANDROID) {
90    if (UNLIKELY(!alignment)) {
91      alignment = 1U;
92    } else {
93      if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
94        alignment = scudo::roundUpToPowerOfTwo(alignment);
95    }
96  } else {
97    if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
98      if (SCUDO_ALLOCATOR.canReturnNull()) {
99        errno = EINVAL;
100        return nullptr;
101      }
102      scudo::reportAlignmentNotPowerOfTwo(alignment);
103    }
104  }
105  return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
106                                  alignment);
107}
108
109INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
110                                                size_t size) {
111  if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
112    if (!SCUDO_ALLOCATOR.canReturnNull())
113      scudo::reportInvalidPosixMemalignAlignment(alignment);
114    return EINVAL;
115  }
116  void *Ptr =
117      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
118  if (UNLIKELY(!Ptr))
119    return ENOMEM;
120  *memptr = Ptr;
121  return 0;
122}
123
124INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
125  const scudo::uptr PageSize = scudo::getPageSizeCached();
126  if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
127    if (SCUDO_ALLOCATOR.canReturnNull()) {
128      errno = ENOMEM;
129      return nullptr;
130    }
131    scudo::reportPvallocOverflow(size);
132  }
133  // pvalloc(0) should allocate one page.
134  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
135      size ? scudo::roundUpTo(size, PageSize) : PageSize,
136      scudo::Chunk::Origin::Memalign, PageSize));
137}
138
139INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
140  if (!ptr)
141    return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
142        size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
143  if (size == 0) {
144    SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
145    return nullptr;
146  }
147  return scudo::setErrnoOnNull(
148      SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
149}
150
151INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
152  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
153      size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
154}
155
156INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
157    uintptr_t base, size_t size,
158    void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
159  SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
160  return 0;
161}
162
163INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
164
165INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
166  SCUDO_ALLOCATOR.disable();
167}
168
169void SCUDO_PREFIX(malloc_postinit)() {
170  SCUDO_ALLOCATOR.initGwpAsan();
171  pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable),
172                 SCUDO_PREFIX(malloc_enable));
173}
174
175INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
176  if (param == M_DECAY_TIME) {
177    if (SCUDO_ANDROID) {
178      if (value == 0) {
179        // Will set the release values to their minimum values.
180        value = INT32_MIN;
181      } else {
182        // Will set the release values to their maximum values.
183        value = INT32_MAX;
184      }
185    }
186
187    SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval,
188                              static_cast<scudo::sptr>(value));
189    return 1;
190  } else if (param == M_PURGE) {
191    SCUDO_ALLOCATOR.releaseToOS();
192    return 1;
193  } else {
194    scudo::Option option;
195    switch (param) {
196    case M_MEMTAG_TUNING:
197      option = scudo::Option::MemtagTuning;
198      break;
199    case M_THREAD_DISABLE_MEM_INIT:
200      option = scudo::Option::ThreadDisableMemInit;
201      break;
202    case M_CACHE_COUNT_MAX:
203      option = scudo::Option::MaxCacheEntriesCount;
204      break;
205    case M_CACHE_SIZE_MAX:
206      option = scudo::Option::MaxCacheEntrySize;
207      break;
208    case M_TSDS_COUNT_MAX:
209      option = scudo::Option::MaxTSDsCount;
210      break;
211    default:
212      return 0;
213    }
214    return SCUDO_ALLOCATOR.setOption(option, static_cast<scudo::sptr>(value));
215  }
216}
217
218INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
219                                                 size_t size) {
220  if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
221    if (SCUDO_ALLOCATOR.canReturnNull()) {
222      errno = EINVAL;
223      return nullptr;
224    }
225    scudo::reportInvalidAlignedAllocAlignment(alignment, size);
226  }
227  return scudo::setErrnoOnNull(
228      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
229}
230
231INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
232  const scudo::uptr max_size =
233      decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize;
234  auto *sizes = static_cast<scudo::uptr *>(
235      SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr)));
236  auto callback = [](uintptr_t, size_t size, void *arg) {
237    auto *sizes = reinterpret_cast<scudo::uptr *>(arg);
238    if (size < max_size)
239      sizes[size]++;
240  };
241  SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes);
242
243  fputs("<malloc version=\"scudo-1\">\n", stream);
244  for (scudo::uptr i = 0; i != max_size; ++i)
245    if (sizes[i])
246      fprintf(stream, "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]);
247  fputs("</malloc>\n", stream);
248  SCUDO_PREFIX(free)(sizes);
249  return 0;
250}
251
252// Disable memory tagging for the heap. The caller must disable memory tag
253// checks globally (e.g. by clearing TCF0 on aarch64) before calling this
254// function, and may not re-enable them after calling the function.
255INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
256  SCUDO_ALLOCATOR.disableMemoryTagging();
257}
258
259// Sets whether scudo records stack traces and other metadata for allocations
260// and deallocations. This function only has an effect if the allocator and
261// hardware support memory tagging.
262INTERFACE WEAK void
263SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
264  SCUDO_ALLOCATOR.setTrackAllocationStacks(track);
265}
266
267// Sets whether scudo zero-initializes all allocated memory.
268INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
269  SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill
270                                                : scudo::NoFill);
271}
272
273// Sets whether scudo pattern-initializes all allocated memory.
274INTERFACE WEAK void
275SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
276  SCUDO_ALLOCATOR.setFillContents(
277      pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill);
278}
279
280// Sets whether scudo adds a small amount of slack at the end of large
281// allocations, before the guard page. This can be enabled to work around buggy
282// applications that read a few bytes past the end of their allocation.
283INTERFACE WEAK void
284SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) {
285  SCUDO_ALLOCATOR.setAddLargeAllocationSlack(add_slack);
286}
287
288} // extern "C"
289